From e7cb55b946a2182c347047dc903c6ed0daef100c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:08 +0000 Subject: [PATCH 1/7] kmemleak: Do not use off-slab management with SLAB_NOLEAKTRACE With the slab allocator, if off-slab management is enabled for the kmem_caches used by kmemleak, it leads to recursive calls into kmemleak_alloc(). Off-slab management can be triggered by other config options increasing the slab size, e.g. DEBUG_PAGEALLOC. Reported-by: Tetsuo Handa Reviewed-by: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Catalin Marinas --- mm/slab.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96b..646db308519 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2261,9 +2261,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* * Determine if the slab management is 'on' or 'off' slab. * (bootstrapping cannot cope with offslab caches so don't do - * it too early on.) + * it too early on. Always use on-slab management when + * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) */ - if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) + if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init && + !(flags & SLAB_NOLEAKTRACE)) /* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). From c017b4be3e84176cab10eca5e6c4faeb8cfc6f3e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:09 +0000 Subject: [PATCH 2/7] kmemleak: Simplify the kmemleak_scan_area() function prototype This function was taking non-necessary arguments which can be determined by kmemleak. The patch also modifies the calling sites. Signed-off-by: Catalin Marinas Cc: Pekka Enberg Cc: Christoph Lameter Cc: Rusty Russell --- include/linux/kmemleak.h | 6 ++--- kernel/module.c | 7 ++---- mm/kmemleak.c | 49 +++++++++++++++++----------------------- mm/slab.c | 4 ++-- 4 files changed, 27 insertions(+), 39 deletions(-) diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 3c7497d46ee..99d9a6766f7 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset, size_t size) __ref; extern void kmemleak_not_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; -extern void kmemleak_scan_area(const void *ptr, unsigned long offset, - size_t length, gfp_t gfp) __ref; +extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, @@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr) static inline void kmemleak_ignore(const void *ptr) { } -static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, - size_t length, gfp_t gfp) +static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { } static inline void kmemleak_erase(void **ptr) diff --git a/kernel/module.c b/kernel/module.c index 8b7d8805819..1eb95209707 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2043,9 +2043,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, unsigned int i; /* only scan the sections containing data */ - kmemleak_scan_area(mod->module_core, (unsigned long)mod - - (unsigned long)mod->module_core, - sizeof(struct module), GFP_KERNEL); + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); for (i = 1; i < hdr->e_shnum; i++) { if (!(sechdrs[i].sh_flags & SHF_ALLOC)) @@ -2054,8 +2052,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) continue; - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - - (unsigned long)mod->module_core, + kmemleak_scan_area((void *)sechdrs[i].sh_addr, sechdrs[i].sh_size, GFP_KERNEL); } } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 8bf765c4f58..96106358e04 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -119,8 +119,8 @@ /* scanning area inside a memory block */ struct kmemleak_scan_area { struct hlist_node node; - unsigned long offset; - size_t length; + unsigned long start; + size_t size; }; #define KMEMLEAK_GREY 0 @@ -241,8 +241,6 @@ struct early_log { const void *ptr; /* allocated/freed memory block */ size_t size; /* memory block size */ int min_count; /* minimum reference count */ - unsigned long offset; /* scan area offset */ - size_t length; /* scan area length */ unsigned long trace[MAX_TRACE]; /* stack trace */ unsigned int trace_len; /* stack trace length */ }; @@ -720,14 +718,13 @@ static void make_black_object(unsigned long ptr) * Add a scanning area to the object. If at least one such area is added, * kmemleak will only scan these ranges rather than the whole memory block. */ -static void add_scan_area(unsigned long ptr, unsigned long offset, - size_t length, gfp_t gfp) +static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area; - object = find_and_get_object(ptr, 0); + object = find_and_get_object(ptr, 1); if (!object) { kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", ptr); @@ -741,7 +738,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, } spin_lock_irqsave(&object->lock, flags); - if (offset + length > object->size) { + if (ptr + size > object->pointer + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); @@ -749,8 +746,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset, } INIT_HLIST_NODE(&area->node); - area->offset = offset; - area->length = length; + area->start = ptr; + area->size = size; hlist_add_head(&area->node, &object->area_list); out_unlock: @@ -786,7 +783,7 @@ static void object_no_scan(unsigned long ptr) * processed later once kmemleak is fully initialized. */ static void __init log_early(int op_type, const void *ptr, size_t size, - int min_count, unsigned long offset, size_t length) + int min_count) { unsigned long flags; struct early_log *log; @@ -808,8 +805,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size, log->ptr = ptr; log->size = size; log->min_count = min_count; - log->offset = offset; - log->length = length; if (op_type == KMEMLEAK_ALLOC) log->trace_len = __save_stack_trace(log->trace); crt_early_log++; @@ -858,7 +853,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) create_object((unsigned long)ptr, size, min_count, gfp); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); + log_early(KMEMLEAK_ALLOC, ptr, size, min_count); } EXPORT_SYMBOL_GPL(kmemleak_alloc); @@ -873,7 +868,7 @@ void __ref kmemleak_free(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) delete_object_full((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_FREE, ptr, 0, 0); } EXPORT_SYMBOL_GPL(kmemleak_free); @@ -888,7 +883,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) delete_object_part((unsigned long)ptr, size); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); + log_early(KMEMLEAK_FREE_PART, ptr, size, 0); } EXPORT_SYMBOL_GPL(kmemleak_free_part); @@ -903,7 +898,7 @@ void __ref kmemleak_not_leak(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) make_gray_object((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_not_leak); @@ -919,22 +914,21 @@ void __ref kmemleak_ignore(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) make_black_object((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_IGNORE, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_ignore); /* * Limit the range to be scanned in an allocated memory block. */ -void __ref kmemleak_scan_area(const void *ptr, unsigned long offset, - size_t length, gfp_t gfp) +void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) { pr_debug("%s(0x%p)\n", __func__, ptr); if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) - add_scan_area((unsigned long)ptr, offset, length, gfp); + add_scan_area((unsigned long)ptr, size, gfp); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); + log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); } EXPORT_SYMBOL(kmemleak_scan_area); @@ -948,7 +942,7 @@ void __ref kmemleak_no_scan(const void *ptr) if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) object_no_scan((unsigned long)ptr); else if (atomic_read(&kmemleak_early_log)) - log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); + log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); } EXPORT_SYMBOL(kmemleak_no_scan); @@ -1075,9 +1069,9 @@ static void scan_object(struct kmemleak_object *object) } } else hlist_for_each_entry(area, elem, &object->area_list, node) - scan_block((void *)(object->pointer + area->offset), - (void *)(object->pointer + area->offset - + area->length), object, 0); + scan_block((void *)area->start, + (void *)(area->start + area->size), + object, 0); out: spin_unlock_irqrestore(&object->lock, flags); } @@ -1642,8 +1636,7 @@ void __init kmemleak_init(void) kmemleak_ignore(log->ptr); break; case KMEMLEAK_SCAN_AREA: - kmemleak_scan_area(log->ptr, log->offset, log->length, - GFP_KERNEL); + kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); break; case KMEMLEAK_NO_SCAN: kmemleak_no_scan(log->ptr); diff --git a/mm/slab.c b/mm/slab.c index 646db308519..d2713a944eb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2584,8 +2584,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, * kmemleak does not treat the ->s_mem pointer as a reference * to the object. Otherwise we will not report the leak. */ - kmemleak_scan_area(slabp, offsetof(struct slab, list), - sizeof(struct list_head), local_flags); + kmemleak_scan_area(&slabp->list, sizeof(struct list_head), + local_flags); if (!slabp) return NULL; } else { From a6f5aa1ea05686ad6e84593a00a04161e6dfb3a3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:10 +0000 Subject: [PATCH 3/7] kmemleak: Scan the _ftrace_events section in modules This section contains pointers to allocated objects and not scanning it leads to false positives. Reported-by: Zdenek Kabelac Acked-by: Rusty Russell Signed-off-by: Catalin Marinas --- kernel/module.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/module.c b/kernel/module.c index 1eb95209707..dd29ba43c34 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2380,6 +2380,12 @@ static noinline struct module *load_module(void __user *umod, "_ftrace_events", sizeof(*mod->trace_events), &mod->num_trace_events); + /* + * This section contains pointers to allocated objects in the trace + * code and not scanning it leads to false positives. + */ + kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * + mod->num_trace_events, GFP_KERNEL); #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ From 0587da40be78d3704a48d3e9a619183891727f5f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:11 +0000 Subject: [PATCH 4/7] kmemleak: Release the object lock before calling put_object() The put_object() function may free the object if the use_count dropped to 0. There shouldn't be further accesses to such object unless it is known that the use_count is non-zero. Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 96106358e04..f06c0921e47 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1025,11 +1025,14 @@ static void scan_block(void *_start, void *_end, * added to the gray_list. */ object->count++; - if (color_gray(object)) + if (color_gray(object)) { list_add_tail(&object->gray_list, &gray_list); - else - put_object(object); + spin_unlock_irqrestore(&object->lock, flags); + continue; + } + spin_unlock_irqrestore(&object->lock, flags); + put_object(object); } } From fefdd336b2a2f7617e0c8a0777c731d9ed6454ae Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:12 +0000 Subject: [PATCH 5/7] kmemleak: Show the age of an unreferenced object The jiffies shown for unreferenced objects isn't always meaningful to people debugging kernel memory leaks. This patch adds the age as well to the displayed information. Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f06c0921e47..ce79d91eeef 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -346,11 +346,13 @@ static void print_unreferenced(struct seq_file *seq, struct kmemleak_object *object) { int i; + unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", object->pointer, object->size); - seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", - object->comm, object->pid, object->jiffies); + seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", + object->comm, object->pid, object->jiffies, + msecs_age / 1000, msecs_age % 1000); hex_dump_object(seq, object); seq_printf(seq, " backtrace:\n"); From 04609ccc40c4e8f3eabe8894eb0de881c8b984fd Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:12 +0000 Subject: [PATCH 6/7] kmemleak: Reduce the false positives by checking for modified objects If an object was modified since it was previously suspected as leak, do not report it. The modification check is done by calculating the checksum (CRC32) of such object. Several false positives are caused by objects being removed from linked lists (e.g. allocation pools) and temporarily breaking the reference chain since kmemleak runs concurrently with such list mutation primitives. Signed-off-by: Catalin Marinas --- mm/kmemleak.c | 124 ++++++++++++++++++++++++++++---------------------- 1 file changed, 70 insertions(+), 54 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index ce79d91eeef..002adc3cf3a 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -93,6 +93,7 @@ #include #include #include +#include #include #include @@ -108,7 +109,6 @@ #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ #define SECS_FIRST_SCAN 60 /* delay before the first scan */ #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ -#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ #define BYTES_PER_POINTER sizeof(void *) @@ -149,6 +149,8 @@ struct kmemleak_object { int min_count; /* the total number of pointers found pointing to this object */ int count; + /* checksum for detecting modified objects */ + u32 checksum; /* memory ranges to be scanned inside an object (empty for all) */ struct hlist_head area_list; unsigned long trace[MAX_TRACE]; @@ -164,8 +166,6 @@ struct kmemleak_object { #define OBJECT_REPORTED (1 << 1) /* flag set to not scan the object */ #define OBJECT_NO_SCAN (1 << 2) -/* flag set on newly allocated objects */ -#define OBJECT_NEW (1 << 3) /* number of bytes to print per line; must be 16 or 32 */ #define HEX_ROW_SIZE 16 @@ -321,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object) object->count >= object->min_count; } -static bool color_black(const struct kmemleak_object *object) -{ - return object->min_count == KMEMLEAK_BLACK; -} - /* * Objects are considered unreferenced only if their color is white, they have * not be deleted and have a minimum age to avoid false positives caused by @@ -333,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object) */ static bool unreferenced_object(struct kmemleak_object *object) { - return (object->flags & OBJECT_ALLOCATED) && color_white(object) && + return (color_white(object) && object->flags & OBJECT_ALLOCATED) && time_before_eq(object->jiffies + jiffies_min_age, jiffies_last_scan); } @@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object) pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%lx\n", object->flags); + pr_notice(" checksum = %d\n", object->checksum); pr_notice(" backtrace:\n"); print_stack_trace(&trace, 4); } @@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, INIT_HLIST_HEAD(&object->area_list); spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); - object->flags = OBJECT_ALLOCATED | OBJECT_NEW; + object->flags = OBJECT_ALLOCATED; object->pointer = ptr; object->size = size; object->min_count = min_count; - object->count = -1; /* no color initially */ + object->count = 0; /* white color initially */ object->jiffies = jiffies; + object->checksum = 0; /* task information */ if (in_irq()) { @@ -948,6 +945,20 @@ void __ref kmemleak_no_scan(const void *ptr) } EXPORT_SYMBOL(kmemleak_no_scan); +/* + * Update an object's checksum and return true if it was modified. + */ +static bool update_checksum(struct kmemleak_object *object) +{ + u32 old_csum = object->checksum; + + if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) + return false; + + object->checksum = crc32(0, (void *)object->pointer, object->size); + return object->checksum != old_csum; +} + /* * Memory scanning is a long process and it needs to be interruptable. This * function checks whether such interrupt condition occured. @@ -1081,6 +1092,39 @@ out: spin_unlock_irqrestore(&object->lock, flags); } +/* + * Scan the objects already referenced (gray objects). More objects will be + * referenced and, if there are no memory leaks, all the objects are scanned. + */ +static void scan_gray_list(void) +{ + struct kmemleak_object *object, *tmp; + + /* + * The list traversal is safe for both tail additions and removals + * from inside the loop. The kmemleak objects cannot be freed from + * outside the loop because their use_count was incremented. + */ + object = list_entry(gray_list.next, typeof(*object), gray_list); + while (&object->gray_list != &gray_list) { + cond_resched(); + + /* may add new objects to the list */ + if (!scan_should_stop()) + scan_object(object); + + tmp = list_entry(object->gray_list.next, typeof(*object), + gray_list); + + /* remove the object from the list and release it */ + list_del(&object->gray_list); + put_object(object); + + object = tmp; + } + WARN_ON(!list_empty(&gray_list)); +} + /* * Scan data sections and all the referenced memory blocks allocated via the * kernel's standard allocators. This function must be called with the @@ -1089,10 +1133,9 @@ out: static void kmemleak_scan(void) { unsigned long flags; - struct kmemleak_object *object, *tmp; + struct kmemleak_object *object; int i; int new_leaks = 0; - int gray_list_pass = 0; jiffies_last_scan = jiffies; @@ -1113,7 +1156,6 @@ static void kmemleak_scan(void) #endif /* reset the reference count (whiten the object) */ object->count = 0; - object->flags &= ~OBJECT_NEW; if (color_gray(object) && get_object(object)) list_add_tail(&object->gray_list, &gray_list); @@ -1171,62 +1213,36 @@ static void kmemleak_scan(void) /* * Scan the objects already referenced from the sections scanned - * above. More objects will be referenced and, if there are no memory - * leaks, all the objects will be scanned. The list traversal is safe - * for both tail additions and removals from inside the loop. The - * kmemleak objects cannot be freed from outside the loop because their - * use_count was increased. + * above. */ -repeat: - object = list_entry(gray_list.next, typeof(*object), gray_list); - while (&object->gray_list != &gray_list) { - cond_resched(); - - /* may add new objects to the list */ - if (!scan_should_stop()) - scan_object(object); - - tmp = list_entry(object->gray_list.next, typeof(*object), - gray_list); - - /* remove the object from the list and release it */ - list_del(&object->gray_list); - put_object(object); - - object = tmp; - } - - if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) - goto scan_end; + scan_gray_list(); /* - * Check for new objects allocated during this scanning and add them - * to the gray list. + * Check for new or unreferenced objects modified since the previous + * scan and color them gray until the next scan. */ rcu_read_lock(); list_for_each_entry_rcu(object, &object_list, object_list) { spin_lock_irqsave(&object->lock, flags); - if ((object->flags & OBJECT_NEW) && !color_black(object) && - get_object(object)) { - object->flags &= ~OBJECT_NEW; + if (color_white(object) && (object->flags & OBJECT_ALLOCATED) + && update_checksum(object) && get_object(object)) { + /* color it gray temporarily */ + object->count = object->min_count; list_add_tail(&object->gray_list, &gray_list); } spin_unlock_irqrestore(&object->lock, flags); } rcu_read_unlock(); - if (!list_empty(&gray_list)) - goto repeat; - -scan_end: - WARN_ON(!list_empty(&gray_list)); + /* + * Re-scan the gray list for modified unreferenced objects. + */ + scan_gray_list(); /* - * If scanning was stopped or new objects were being allocated at a - * higher rate than gray list scanning, do not report any new - * unreferenced objects. + * If scanning was stopped do not report any new unreferenced objects. */ - if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) + if (scan_should_stop()) return; /* From b60e26a2f03d963f8c79ad7920d64abc4d38ecbc Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 6 Nov 2009 15:33:45 -0800 Subject: [PATCH 7/7] kmemleak: fix kconfig for crc32 build error kmemleak uses crc32 functions so it needs to select CRC32. Fixes build error: kmemleak.c:(.text+0x7ce62): undefined reference to `crc32_le' Signed-off-by: Randy Dunlap Signed-off-by: Catalin Marinas --- lib/Kconfig.debug | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 30df5865ecb..732b9f6ab1e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -352,6 +352,7 @@ config DEBUG_KMEMLEAK select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT select KALLSYMS + select CRC32 help Say Y here if you want to enable the memory leak detector. The memory allocation/freeing is traced in a way