aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug79
-rw-r--r--lib/bitmap.c3
-rw-r--r--lib/bug.c6
-rw-r--r--lib/div64.c52
-rw-r--r--lib/dma-debug.c1
-rw-r--r--lib/dynamic_debug.c140
-rw-r--r--lib/idr.c78
-rw-r--r--lib/kobject.c39
-rw-r--r--lib/list_sort.c174
-rw-r--r--lib/parser.c7
-rw-r--r--lib/percpu_counter.c55
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c18
-rw-r--r--lib/vsprintf.c19
14 files changed, 480 insertions, 193 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0deaf81ac11..28b42b9274d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -317,6 +317,14 @@ config DEBUG_OBJECTS_RCU_HEAD
help
Enable this to turn on debugging of RCU list heads (call_rcu() usage).
+config DEBUG_OBJECTS_PERCPU_COUNTER
+ bool "Debug percpu counter objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ percpu counter routines to track the life time of percpu counter
+ objects and validate the percpu counter operations.
+
config DEBUG_OBJECTS_ENABLE_DEFAULT
int "debug_objects bootup default value (0-1)"
range 0 1
@@ -353,7 +361,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
- depends on SLUB && SLUB_DEBUG && SYSFS
+ depends on SLUB && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be
@@ -366,7 +374,7 @@ config SLUB_STATS
config DEBUG_KMEMLEAK
bool "Kernel memory leak detector"
depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
- (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE)
+ (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE)
select DEBUG_FS if SYSFS
select STACKTRACE if STACKTRACE_SUPPORT
@@ -461,6 +469,15 @@ config DEBUG_MUTEXES
This feature allows mutex semantics violations to be detected and
reported.
+config BKL
+ bool "Big Kernel Lock" if (SMP || PREEMPT)
+ default y
+ help
+ This is the traditional lock that is used in old code instead
+ of proper locking. All drivers that use the BKL should depend
+ on this symbol.
+ Say Y here unless you are working on removing the BKL.
+
config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -482,6 +499,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK
select DEBUG_MUTEXES
select DEBUG_LOCK_ALLOC
+ select TRACE_IRQFLAGS
default n
help
This feature enables the kernel to prove that all locking
@@ -539,6 +557,23 @@ config PROVE_RCU_REPEATEDLY
disabling, allowing multiple RCU-lockdep warnings to be printed
on a single reboot.
+ Say Y to allow multiple RCU-lockdep warnings per boot.
+
+ Say N if you are unsure.
+
+config SPARSE_RCU_POINTER
+ bool "RCU debugging: sparse-based checks for pointer usage"
+ default n
+ help
+ This feature enables the __rcu sparse annotation for
+ RCU-protected pointers. This annotation will cause sparse
+ to flag any non-RCU used of annotated pointers. This can be
+ helpful when debugging RCU usage. Please note that this feature
+ is not intended to enforce code cleanliness; it is instead merely
+ a debugging aid.
+
+ Say Y to make sparse flag questionable use of RCU-protected pointers
+
Say N if you are unsure.
config LOCKDEP
@@ -579,11 +614,10 @@ config DEBUG_LOCKDEP
of more runtime overhead.
config TRACE_IRQFLAGS
- depends on DEBUG_KERNEL
bool
- default y
- depends on TRACE_IRQFLAGS_SUPPORT
- depends on PROVE_LOCKING
+ help
+ Enables hooks to interrupt enabling and disabling for
+ either tracing or lock debugging.
config DEBUG_SPINLOCK_SLEEP
bool "Spinlock debugging: sleep-inside-spinlock checking"
@@ -714,6 +748,15 @@ config DEBUG_LIST
If unsure, say N.
+config TEST_LIST_SORT
+ bool "Linked list sorting test"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on 'list_sort()' function test. This test is
+ executed only once during system boot, so affects only boot time.
+
+ If unsure, say N.
+
config DEBUG_SG
bool "Debug SG table operations"
depends on DEBUG_KERNEL
@@ -832,6 +875,30 @@ config RCU_CPU_STALL_DETECTOR
Say Y if you are unsure.
+config RCU_CPU_STALL_TIMEOUT
+ int "RCU CPU stall timeout in seconds"
+ depends on RCU_CPU_STALL_DETECTOR
+ range 3 300
+ default 60
+ help
+ If a given RCU grace period extends more than the specified
+ number of seconds, a CPU stall warning is printed. If the
+ RCU grace period persists, additional CPU stall warnings are
+ printed at more widely spaced intervals.
+
+config RCU_CPU_STALL_DETECTOR_RUNNABLE
+ bool "RCU CPU stall checking starts automatically at boot"
+ depends on RCU_CPU_STALL_DETECTOR
+ default y
+ help
+ If set, start checking for RCU CPU stalls immediately on
+ boot. Otherwise, RCU CPU stall checking must be manually
+ enabled.
+
+ Say Y if you are unsure.
+
+ Say N if you wish to suppress RCU CPU stall checking during boot.
+
config RCU_CPU_STALL_VERBOSE
bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ffb78c916cc..741fae905ae 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -359,7 +359,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
#define CHUNKSZ 32
#define nbits_to_hold_value(val) fls(val)
-#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
#define BASEDEC 10 /* fancier cpuset lists input in decimal */
/**
@@ -466,7 +465,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
return -EOVERFLOW;
- chunk = (chunk << 4) | unhex(c);
+ chunk = (chunk << 4) | hex_to_bin(c);
ndigits++; totaldigits++;
}
if (ndigits == 0)
diff --git a/lib/bug.c b/lib/bug.c
index 7cdfad88128..19552096d16 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr)
return NULL;
}
-int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *mod)
+void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
{
char *secstrings;
unsigned int i;
@@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&mod->bug_list, &module_bug_list);
-
- return 0;
}
void module_bug_cleanup(struct module *mod)
diff --git a/lib/div64.c b/lib/div64.c
index a111eb8de9c..5b491919177 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem);
#endif
-/* 64bit divisor, dividend and result. dynamic precision */
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'. The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
+ */
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
- u32 high, d;
+ u32 high = divisor >> 32;
+ u64 quot;
- high = divisor >> 32;
- if (high) {
- unsigned int shift = fls(high);
+ if (high == 0) {
+ quot = div_u64(dividend, divisor);
+ } else {
+ int n = 1 + fls(high);
+ quot = div_u64(dividend >> n, divisor >> n);
- d = divisor >> shift;
- dividend >>= shift;
- } else
- d = divisor;
+ if (quot != 0)
+ quot--;
+ if ((dividend - quot * divisor) >= divisor)
+ quot++;
+ }
- return div_u64(dividend, d);
+ return quot;
}
EXPORT_SYMBOL(div64_u64);
#endif
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ */
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+ s64 quot, t;
+
+ quot = div64_u64(abs64(dividend), abs64(divisor));
+ t = (dividend ^ divisor) >> 63;
+
+ return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
#endif /* BITS_PER_LONG == 32 */
/*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 01e64270e24..4bfb0471f10 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -590,6 +590,7 @@ out_unlock:
static const struct file_operations filter_fops = {
.read = filter_read,
.write = filter_write,
+ .llseek = default_llseek,
};
static int dma_debug_fs_init(void)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 02afc253372..3094318bfea 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -26,19 +26,11 @@
#include <linux/dynamic_debug.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
extern struct _ddebug __start___verbose[];
extern struct _ddebug __stop___verbose[];
-/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-long long dynamic_debug_enabled;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
-long long dynamic_debug_enabled2;
-EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
-
struct ddebug_table {
struct list_head link;
char *mod_name;
@@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
}
/*
- * must be called with ddebug_lock held
- */
-
-static int disabled_hash(char hash, bool first_table)
-{
- struct ddebug_table *dt;
- char table_hash_value;
-
- list_for_each_entry(dt, &ddebug_tables, link) {
- if (first_table)
- table_hash_value = dt->ddebugs->primary_hash;
- else
- table_hash_value = dt->ddebugs->secondary_hash;
- if (dt->num_enabled && (hash == table_hash_value))
- return 0;
- }
- return 1;
-}
-
-/*
* Search the tables for _ddebug's which match the given
* `query' and apply the `flags' and `mask' to them. Tells
* the user which ddebug's were changed, or whether none
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
dt->num_enabled++;
dp->flags = newflags;
if (newflags) {
- dynamic_debug_enabled |=
- (1LL << dp->primary_hash);
- dynamic_debug_enabled2 |=
- (1LL << dp->secondary_hash);
+ jump_label_enable(&dp->enabled);
} else {
- if (disabled_hash(dp->primary_hash, true))
- dynamic_debug_enabled &=
- ~(1LL << dp->primary_hash);
- if (disabled_hash(dp->secondary_hash, false))
- dynamic_debug_enabled2 &=
- ~(1LL << dp->secondary_hash);
+ jump_label_disable(&dp->enabled);
}
if (verbose)
printk(KERN_INFO
@@ -429,6 +393,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
return 0;
}
+static int ddebug_exec_query(char *query_string)
+{
+ unsigned int flags = 0, mask = 0;
+ struct ddebug_query query;
+#define MAXWORDS 9
+ int nwords;
+ char *words[MAXWORDS];
+
+ nwords = ddebug_tokenize(query_string, words, MAXWORDS);
+ if (nwords <= 0)
+ return -EINVAL;
+ if (ddebug_parse_query(words, nwords-1, &query))
+ return -EINVAL;
+ if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
+ return -EINVAL;
+
+ /* actually go and implement the change */
+ ddebug_change(&query, flags, mask);
+ return 0;
+}
+
+static __initdata char ddebug_setup_string[1024];
+static __init int ddebug_setup_query(char *str)
+{
+ if (strlen(str) >= 1024) {
+ pr_warning("ddebug boot param string too large\n");
+ return 0;
+ }
+ strcpy(ddebug_setup_string, str);
+ return 1;
+}
+
+__setup("ddebug_query=", ddebug_setup_query);
+
/*
* File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
* command text from userspace, parses and executes it.
@@ -436,12 +434,8 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
- unsigned int flags = 0, mask = 0;
- struct ddebug_query query;
-#define MAXWORDS 9
- int nwords;
- char *words[MAXWORDS];
char tmpbuf[256];
+ int ret;
if (len == 0)
return 0;
@@ -455,16 +449,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
printk(KERN_INFO "%s: read %d bytes from userspace\n",
__func__, (int)len);
- nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS);
- if (nwords <= 0)
- return -EINVAL;
- if (ddebug_parse_query(words, nwords-1, &query))
- return -EINVAL;
- if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
- return -EINVAL;
-
- /* actually go and implement the change */
- ddebug_change(&query, flags, mask);
+ ret = ddebug_exec_query(tmpbuf);
+ if (ret)
+ return ret;
*offp += len;
return len;
@@ -725,13 +712,14 @@ static void ddebug_remove_all_tables(void)
mutex_unlock(&ddebug_lock);
}
-static int __init dynamic_debug_init(void)
+static __initdata int ddebug_init_success;
+
+static int __init dynamic_debug_init_debugfs(void)
{
struct dentry *dir, *file;
- struct _ddebug *iter, *iter_start;
- const char *modname = NULL;
- int ret = 0;
- int n = 0;
+
+ if (!ddebug_init_success)
+ return -ENODEV;
dir = debugfs_create_dir("dynamic_debug", NULL);
if (!dir)
@@ -742,6 +730,16 @@ static int __init dynamic_debug_init(void)
debugfs_remove(dir);
return -ENOMEM;
}
+ return 0;
+}
+
+static int __init dynamic_debug_init(void)
+{
+ struct _ddebug *iter, *iter_start;
+ const char *modname = NULL;
+ int ret = 0;
+ int n = 0;
+
if (__start___verbose != __stop___verbose) {
iter = __start___verbose;
modname = iter->modname;
@@ -759,12 +757,26 @@ static int __init dynamic_debug_init(void)
}
ret = ddebug_add_module(iter_start, n, modname);
}
+
+ /* ddebug_query boot param got passed -> set it up */
+ if (ddebug_setup_string[0] != '\0') {
+ ret = ddebug_exec_query(ddebug_setup_string);
+ if (ret)
+ pr_warning("Invalid ddebug boot param %s",
+ ddebug_setup_string);
+ else
+ pr_info("ddebug initialized with string %s",
+ ddebug_setup_string);
+ }
+
out_free:
- if (ret) {
+ if (ret)
ddebug_remove_all_tables();
- debugfs_remove(dir);
- debugfs_remove(file);
- }
+ else
+ ddebug_init_success = 1;
return 0;
}
-module_init(dynamic_debug_init);
+/* Allow early initialization for boot messages via boot param */
+arch_initcall(dynamic_debug_init);
+/* Debugfs setup must be done later */
+module_init(dynamic_debug_init_debugfs);
diff --git a/lib/idr.c b/lib/idr.c
index 7f1a4f0acf5..e15502e8b21 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id)
}
/**
- * idr_pre_get - reserver resources for idr allocation
+ * idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
- * This function should be called prior to locking and calling the
- * idr_get_new* functions. It preallocates enough memory to satisfy
- * the worst possible allocation.
+ * This function should be called prior to calling the idr_get_new* functions.
+ * It preallocates enough memory to satisfy the worst possible allocation. The
+ * caller should pass in GFP_KERNEL if possible. This of course requires that
+ * no spinning locks be held.
*
- * If the system is REALLY out of memory this function returns 0,
- * otherwise 1.
+ * If the system is REALLY out of memory this function returns %0,
+ * otherwise %1.
*/
int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
@@ -284,17 +285,19 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
- * @start_id: id to start search at
+ * @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* This is the allocate id function. It should be called with any
* required locks.
*
- * If memory is required, it will return -EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return -ENOSPC.
+ * If allocation from IDR's private freelist fails, idr_get_new_above() will
+ * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
+ * IDR's preallocation and then retry the idr_get_new_above() call.
+ *
+ * If the idr is full idr_get_new_above() will return %-ENOSPC.
*
- * @id returns a value in the range @starting_id ... 0x7fffffff
+ * @id returns a value in the range @starting_id ... %0x7fffffff
*/
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
{
@@ -318,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above);
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
- * This is the allocate id function. It should be called with any
- * required locks.
+ * If allocation from IDR's private freelist fails, idr_get_new_above() will
+ * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
+ * IDR's preallocation and then retry the idr_get_new_above() call.
*
- * If memory is required, it will return -EAGAIN, you should unlock
- * and go back to the idr_pre_get() call. If the idr is full, it will
- * return -ENOSPC.
+ * If the idr is full idr_get_new_above() will return %-ENOSPC.
*
- * @id returns a value in the range 0 ... 0x7fffffff
+ * @id returns a value in the range %0 ... %0x7fffffff
*/
int idr_get_new(struct idr *idp, void *ptr, int *id)
{
@@ -388,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
}
/**
- * idr_remove - remove the given id and free it's slot
+ * idr_remove - remove the given id and free its slot
* @idp: idr handle
* @id: unique key
*/
@@ -437,7 +439,7 @@ EXPORT_SYMBOL(idr_remove);
* function will remove all id mappings and leave all idp_layers
* unused.
*
- * A typical clean-up sequence for objects stored in an idr tree, will
+ * A typical clean-up sequence for objects stored in an idr tree will
* use idr_for_each() to free all objects, if necessay, then
* idr_remove_all() to remove all ids, and idr_destroy() to free
* up the cached idr_layers.
@@ -479,7 +481,7 @@ EXPORT_SYMBOL(idr_remove_all);
/**
* idr_destroy - release all cached layers within an idr tree
- * idp: idr handle
+ * @idp: idr handle
*/
void idr_destroy(struct idr *idp)
{
@@ -542,7 +544,7 @@ EXPORT_SYMBOL(idr_find);
* not allowed.
*
* We check the return of @fn each time. If it returns anything other
- * than 0, we break out and return that value.
+ * than %0, we break out and return that value.
*
* The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
*/
@@ -586,10 +588,11 @@ EXPORT_SYMBOL(idr_for_each);
/**
* idr_get_next - lookup next object of id to given id.
* @idp: idr handle
- * @id: pointer to lookup key
+ * @nextidp: pointer to lookup key
*
* Returns pointer to registered object with id, which is next number to
- * given id.
+ * given id. After being looked up, *@nextidp will be updated for the next
+ * iteration.
*/
void *idr_get_next(struct idr *idp, int *nextidp)
@@ -636,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next);
* @id: lookup key
*
* Replace the pointer registered with an id and return the old value.
- * A -ENOENT return indicates that @id was not found.
- * A -EINVAL return indicates that @id was not within valid constraints.
+ * A %-ENOENT return indicates that @id was not found.
+ * A %-EINVAL return indicates that @id was not within valid constraints.
*
* The caller must serialize with writers.
*/
@@ -695,10 +698,11 @@ void idr_init(struct idr *idp)
EXPORT_SYMBOL(idr_init);
-/*
+/**
+ * DOC: IDA description
* IDA - IDR based ID allocator
*
- * this is id allocator without id -> pointer translation. Memory
+ * This is id allocator without id -> pointer translation. Memory
* usage is much lower than full blown idr because each id only
* occupies a bit. ida uses a custom leaf node which contains
* IDA_BITMAP_BITS slots.
@@ -731,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
* following function. It preallocates enough memory to satisfy the
* worst possible allocation.
*
- * If the system is REALLY out of memory this function returns 0,
- * otherwise 1.
+ * If the system is REALLY out of memory this function returns %0,
+ * otherwise %1.
*/
int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
{
@@ -758,17 +762,17 @@ EXPORT_SYMBOL(ida_pre_get);
/**
* ida_get_new_above - allocate new ID above or equal to a start id
* @ida: ida handle
- * @staring_id: id to start search at
+ * @starting_id: id to start search at
* @p_id: pointer to the allocated handle
*
* Allocate new ID above or equal to @ida. It should be called with
* any required locks.
*
- * If memory is required, it will return -EAGAIN, you should unlock
+ * If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the ida_pre_get() call. If the ida is full, it will
- * return -ENOSPC.
+ * return %-ENOSPC.
*
- * @p_id returns a value in the range @starting_id ... 0x7fffffff.
+ * @p_id returns a value in the range @starting_id ... %0x7fffffff.
*/
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
@@ -850,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above);
*
* Allocate new ID. It should be called with any required locks.
*
- * If memory is required, it will return -EAGAIN, you should unlock
+ * If memory is required, it will return %-EAGAIN, you should unlock
* and go back to the idr_pre_get() call. If the idr is full, it will
- * return -ENOSPC.
+ * return %-ENOSPC.
*
- * @id returns a value in the range 0 ... 0x7fffffff.
+ * @id returns a value in the range %0 ... %0x7fffffff.
*/
int ida_get_new(struct ida *ida, int *p_id)
{
@@ -912,7 +916,7 @@ EXPORT_SYMBOL(ida_remove);
/**
* ida_destroy - release all cached layers within an ida tree
- * ida: ida handle
+ * @ida: ida handle
*/
void ida_destroy(struct ida *ida)
{
diff --git a/lib/kobject.c b/lib/kobject.c
index f07c57252e8..82dc34c095c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -746,17 +746,56 @@ void kset_unregister(struct kset *k)
*/
struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
+ return kset_find_obj_hinted(kset, name, NULL);
+}
+
+/**
+ * kset_find_obj_hinted - search for object in kset given a predecessor hint.
+ * @kset: kset we're looking in.
+ * @name: object's name.
+ * @hint: hint to possible object's predecessor.
+ *
+ * Check the hint's next object and if it is a match return it directly,
+ * otherwise, fall back to the behavior of kset_find_obj(). Either way
+ * a reference for the returned object is held and the reference on the
+ * hinted object is released.
+ */
+struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
+ struct kobject *hint)
+{
struct kobject *k;
struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
+
+ if (!hint)
+ goto slow_search;
+
+ /* end of list detection */
+ if (hint->entry.next == kset->list.next)
+ goto slow_search;
+
+ k = container_of(hint->entry.next, struct kobject, entry);
+ if (!kobject_name(k) || strcmp(kobject_name(k), name))
+ goto slow_search;
+
+ ret = kobject_get(k);
+ goto unlock_exit;
+
+slow_search:
list_for_each_entry(k, &kset->list, entry) {
if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get(k);
break;
}
}
+
+unlock_exit:
spin_unlock(&kset->list_lock);
+
+ if (hint)
+ kobject_put(hint);
+
return ret;
}
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 4b5cb794c38..d7325c6b103 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv,
* element comparison is needed, so the client's cmp()
* routine can invoke cond_resched() periodically.
*/
- (*cmp)(priv, tail, tail);
+ (*cmp)(priv, tail->next, tail->next);
tail->next->prev = tail;
tail = tail->next;
@@ -141,77 +141,151 @@ void list_sort(void *priv, struct list_head *head,
}
EXPORT_SYMBOL(list_sort);
-#ifdef DEBUG_LIST_SORT
+#ifdef CONFIG_TEST_LIST_SORT
+
+#include <linux/random.h>
+
+/*
+ * The pattern of set bits in the list length determines which cases
+ * are hit in list_sort().
+ */
+#define TEST_LIST_LEN (512+128+2) /* not including head */
+
+#define TEST_POISON1 0xDEADBEEF
+#define TEST_POISON2 0xA324354C
+
struct debug_el {
- struct list_head l_h;
+ unsigned int poison1;
+ struct list_head list;
+ unsigned int poison2;
int value;
unsigned serial;
};
-static int cmp(void *priv, struct list_head *a, struct list_head *b)
+/* Array, containing pointers to all elements in the test list */
+static struct debug_el **elts __initdata;
+
+static int __init check(struct debug_el *ela, struct debug_el *elb)
{
- return container_of(a, struct debug_el, l_h)->value
- - container_of(b, struct debug_el, l_h)->value;
+ if (ela->serial >= TEST_LIST_LEN) {
+ printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
+ ela->serial);
+ return -EINVAL;
+ }
+ if (elb->serial >= TEST_LIST_LEN) {
+ printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
+ elb->serial);
+ return -EINVAL;
+ }
+ if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
+ printk(KERN_ERR "list_sort_test: error: phantom element\n");
+ return -EINVAL;
+ }
+ if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
+ printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
+ ela->poison1, ela->poison2);
+ return -EINVAL;
+ }
+ if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
+ printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
+ elb->poison1, elb->poison2);
+ return -EINVAL;
+ }
+ return 0;
}
-/*
- * The pattern of set bits in the list length determines which cases
- * are hit in list_sort().
- */
-#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */
+static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct debug_el *ela, *elb;
+
+ ela = container_of(a, struct debug_el, list);
+ elb = container_of(b, struct debug_el, list);
+
+ check(ela, elb);
+ return ela->value - elb->value;
+}
static int __init list_sort_test(void)
{
- int i, r = 1, count;
- struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL);
- struct list_head *cur;
+ int i, count = 1, err = -EINVAL;
+ struct debug_el *el;
+ struct list_head *cur, *tmp;
+ LIST_HEAD(head);
+
+ printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
- printk(KERN_WARNING "testing list_sort()\n");
+ elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
+ if (!elts) {
+ printk(KERN_ERR "list_sort_test: error: cannot allocate "
+ "memory\n");
+ goto exit;
+ }
- cur = head;
- for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) {
- struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL);
- BUG_ON(!el);
+ for (i = 0; i < TEST_LIST_LEN; i++) {
+ el = kmalloc(sizeof(*el), GFP_KERNEL);
+ if (!el) {
+ printk(KERN_ERR "list_sort_test: error: cannot "
+ "allocate memory\n");
+ goto exit;
+ }
/* force some equivalencies */
- el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3);
+ el->value = random32() % (TEST_LIST_LEN/3);
el->serial = i;
-
- el->l_h.prev = cur;
- cur->next = &el->l_h;
- cur = cur->next;
+ el->poison1 = TEST_POISON1;
+ el->poison2 = TEST_POISON2;
+ elts[i] = el;
+ list_add_tail(&el->list, &head);
}
- head->prev = cur;
- list_sort(NULL, head, cmp);
+ list_sort(NULL, &head, cmp);
+
+ for (cur = head.next; cur->next != &head; cur = cur->next) {
+ struct debug_el *el1;
+ int cmp_result;
- count = 1;
- for (cur = head->next; cur->next != head; cur = cur->next) {
- struct debug_el *el = container_of(cur, struct debug_el, l_h);
- int cmp_result = cmp(NULL, cur, cur->next);
if (cur->next->prev != cur) {
- printk(KERN_EMERG "list_sort() returned "
- "a corrupted list!\n");
- return 1;
- } else if (cmp_result > 0) {
- printk(KERN_EMERG "list_sort() failed to sort!\n");
- return 1;
- } else if (cmp_result == 0 &&
- el->serial >= container_of(cur->next,
- struct debug_el, l_h)->serial) {
- printk(KERN_EMERG "list_sort() failed to preserve order"
- " of equivalent elements!\n");
- return 1;
+ printk(KERN_ERR "list_sort_test: error: list is "
+ "corrupted\n");
+ goto exit;
+ }
+
+ cmp_result = cmp(NULL, cur, cur->next);
+ if (cmp_result > 0) {
+ printk(KERN_ERR "list_sort_test: error: list is not "
+ "sorted\n");
+ goto exit;
+ }
+
+ el = container_of(cur, struct debug_el, list);
+ el1 = container_of(cur->next, struct debug_el, list);
+ if (cmp_result == 0 && el->serial >= el1->serial) {
+ printk(KERN_ERR "list_sort_test: error: order of "
+ "equivalent elements not preserved\n");
+ goto exit;
+ }
+
+ if (check(el, el1)) {
+ printk(KERN_ERR "list_sort_test: error: element check "
+ "failed\n");
+ goto exit;
}
- kfree(cur->prev);
count++;
}
- kfree(cur);
- if (count != LIST_SORT_TEST_LENGTH) {
- printk(KERN_EMERG "list_sort() returned list of"
- "different length!\n");
- return 1;
+
+ if (count != TEST_LIST_LEN) {
+ printk(KERN_ERR "list_sort_test: error: bad list length %d",
+ count);
+ goto exit;
}
- return 0;
+
+ err = 0;
+exit:
+ kfree(elts);
+ list_for_each_safe(cur, tmp, &head) {
+ list_del(cur);
+ kfree(container_of(cur, struct debug_el, list));
+ }
+ return err;
}
module_init(list_sort_test);
-#endif
+#endif /* CONFIG_TEST_LIST_SORT */
diff --git a/lib/parser.c b/lib/parser.c
index fb34977246b..6e89eca5cca 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -128,12 +128,13 @@ static int match_number(substring_t *s, int *result, int base)
char *endp;
char *buf;
int ret;
+ size_t len = s->to - s->from;
- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+ buf = kmalloc(len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, s->from, s->to - s->from);
- buf[s->to - s->from] = '\0';
+ memcpy(buf, s->from, len);
+ buf[len] = '\0';
*result = simple_strtol(buf, &endp, base);
ret = 0;
if (endp == buf)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ec9048e74f4..604678d7d06 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -8,10 +8,53 @@
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/debugobjects.h>
static LIST_HEAD(percpu_counters);
static DEFINE_MUTEX(percpu_counters_lock);
+#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
+
+static struct debug_obj_descr percpu_counter_debug_descr;
+
+static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct percpu_counter *fbc = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ percpu_counter_destroy(fbc);
+ debug_object_free(fbc, &percpu_counter_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static struct debug_obj_descr percpu_counter_debug_descr = {
+ .name = "percpu_counter",
+ .fixup_free = percpu_counter_fixup_free,
+};
+
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{
+ debug_object_init(fbc, &percpu_counter_debug_descr);
+ debug_object_activate(fbc, &percpu_counter_debug_descr);
+}
+
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{
+ debug_object_deactivate(fbc, &percpu_counter_debug_descr);
+ debug_object_free(fbc, &percpu_counter_debug_descr);
+}
+
+#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
+{ }
+static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
+{ }
+#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
int cpu;
@@ -30,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
s32 *pcount;
- int cpu = get_cpu();
- pcount = per_cpu_ptr(fbc->counters, cpu);
+ preempt_disable();
+ pcount = this_cpu_ptr(fbc->counters);
count = *pcount + amount;
if (count >= batch || count <= -batch) {
spin_lock(&fbc->lock);
@@ -42,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
} else {
*pcount = count;
}
- put_cpu();
+ preempt_enable();
}
EXPORT_SYMBOL(__percpu_counter_add);
@@ -75,7 +118,11 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
fbc->counters = alloc_percpu(s32);
if (!fbc->counters)
return -ENOMEM;
+
+ debug_percpu_counter_activate(fbc);
+
#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc->list);
mutex_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
mutex_unlock(&percpu_counters_lock);
@@ -89,6 +136,8 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
if (!fbc->counters)
return;
+ debug_percpu_counter_deactivate(fbc);
+
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index efd16fa80b1..6f412ab4c24 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
unsigned int height; /* Height from the bottom */
unsigned int count;
struct rcu_head rcu_head;
- void *slots[RADIX_TREE_MAP_SIZE];
+ void __rcu *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 34e3082632d..7c06ee51a29 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
*/
static unsigned long io_tlb_overflow = 32*1024;
-void *io_tlb_overflow_buffer;
+static void *io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
+ io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
for (i = 0; i < io_tlb_nslabs; i++)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
+ io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
/*
* Get the overflow emergency buffer
*/
- io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
+ io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
if (!io_tlb_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n");
if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(bytes);
+ io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
free_bootmem_late(__pa(io_tlb_overflow_buffer),
- io_tlb_overflow);
+ PAGE_ALIGN(io_tlb_overflow));
free_bootmem_late(__pa(io_tlb_orig_addr),
- io_tlb_nslabs * sizeof(phys_addr_t));
+ PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
free_bootmem_late(__pa(io_tlb_list),
- io_tlb_nslabs * sizeof(int));
+ PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
free_bootmem_late(__pa(io_tlb_start),
- io_tlb_nslabs << IO_TLB_SHIFT);
+ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7af9d841c43..c150d3dafff 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -988,8 +988,15 @@ static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
struct printf_spec spec)
{
- if (!ptr)
+ if (!ptr) {
+ /*
+ * Print (null) with the same width as a pointer so it makes
+ * tabular output look nice.
+ */
+ if (spec.field_width == -1)
+ spec.field_width = 2 * sizeof(void *);
return string(buf, end, "(null)", spec);
+ }
switch (*fmt) {
case 'F':
@@ -1031,7 +1038,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
- spec.field_width = 2*sizeof(void *);
+ spec.field_width = 2 * sizeof(void *);
spec.flags |= ZEROPAD;
}
spec.base = 16;
@@ -1497,7 +1504,7 @@ EXPORT_SYMBOL(snprintf);
* @...: Arguments for the format string
*
* The return value is the number of characters written into @buf not including
- * the trailing '\0'. If @size is <= 0 the function returns 0.
+ * the trailing '\0'. If @size is == 0 the function returns 0.
*/
int scnprintf(char *buf, size_t size, const char *fmt, ...)
@@ -1509,7 +1516,11 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...)
i = vsnprintf(buf, size, fmt, args);
va_end(args);
- return (i >= size) ? (size - 1) : i;
+ if (likely(i < size))
+ return i;
+ if (size != 0)
+ return size - 1;
+ return 0;
}
EXPORT_SYMBOL(scnprintf);