mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6
* 'kmemleak' of git://linux-arm.org/linux-2.6: kmemleak: Remove alloc_bootmem annotations introduced in the past kmemleak: Add callbacks to the bootmem allocator kmemleak: Allow partial freeing of memory blocks kmemleak: Trace the kmalloc_large* functions in slub kmemleak: Scan objects allocated during a scanning episode kmemleak: Do not acquire scan_mutex in kmemleak_open() kmemleak: Remove the reported leaks number limitation kmemleak: Add more cond_resched() calls in the scanning thread kmemleak: Renice the scanning thread to +10
This commit is contained in:
commit
7638d5322b
7 changed files with 185 additions and 94 deletions
|
@ -27,6 +27,7 @@ extern void kmemleak_init(void);
|
||||||
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
|
||||||
gfp_t gfp);
|
gfp_t gfp);
|
||||||
extern void kmemleak_free(const void *ptr);
|
extern void kmemleak_free(const void *ptr);
|
||||||
|
extern void kmemleak_free_part(const void *ptr, size_t size);
|
||||||
extern void kmemleak_padding(const void *ptr, unsigned long offset,
|
extern void kmemleak_padding(const void *ptr, unsigned long offset,
|
||||||
size_t size);
|
size_t size);
|
||||||
extern void kmemleak_not_leak(const void *ptr);
|
extern void kmemleak_not_leak(const void *ptr);
|
||||||
|
@ -71,6 +72,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
|
||||||
static inline void kmemleak_free(const void *ptr)
|
static inline void kmemleak_free(const void *ptr)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
static inline void kmemleak_free_part(const void *ptr, size_t size)
|
||||||
|
{
|
||||||
|
}
|
||||||
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
|
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/kobject.h>
|
#include <linux/kobject.h>
|
||||||
#include <linux/kmemtrace.h>
|
#include <linux/kmemtrace.h>
|
||||||
|
#include <linux/kmemleak.h>
|
||||||
|
|
||||||
enum stat_item {
|
enum stat_item {
|
||||||
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
ALLOC_FASTPATH, /* Allocation from cpu slab */
|
||||||
|
@ -233,6 +234,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
|
||||||
unsigned int order = get_order(size);
|
unsigned int order = get_order(size);
|
||||||
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
|
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
|
||||||
|
|
||||||
|
kmemleak_alloc(ret, size, 1, flags);
|
||||||
trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
|
trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -36,7 +36,6 @@
|
||||||
#include <linux/pid_namespace.h>
|
#include <linux/pid_namespace.h>
|
||||||
#include <linux/init_task.h>
|
#include <linux/init_task.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <linux/kmemleak.h>
|
|
||||||
|
|
||||||
#define pid_hashfn(nr, ns) \
|
#define pid_hashfn(nr, ns) \
|
||||||
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
|
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
|
||||||
|
@ -513,12 +512,6 @@ void __init pidhash_init(void)
|
||||||
pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
|
pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
|
||||||
if (!pid_hash)
|
if (!pid_hash)
|
||||||
panic("Could not alloc pidhash!\n");
|
panic("Could not alloc pidhash!\n");
|
||||||
/*
|
|
||||||
* pid_hash contains references to allocated struct pid objects and it
|
|
||||||
* must be scanned by kmemleak to avoid false positives.
|
|
||||||
*/
|
|
||||||
kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0,
|
|
||||||
GFP_KERNEL);
|
|
||||||
for (i = 0; i < pidhash_size; i++)
|
for (i = 0; i < pidhash_size; i++)
|
||||||
INIT_HLIST_HEAD(&pid_hash[i]);
|
INIT_HLIST_HEAD(&pid_hash[i]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include <linux/pfn.h>
|
#include <linux/pfn.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/kmemleak.h>
|
||||||
|
|
||||||
#include <asm/bug.h>
|
#include <asm/bug.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
@ -335,6 +336,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
|
||||||
{
|
{
|
||||||
unsigned long start, end;
|
unsigned long start, end;
|
||||||
|
|
||||||
|
kmemleak_free_part(__va(physaddr), size);
|
||||||
|
|
||||||
start = PFN_UP(physaddr);
|
start = PFN_UP(physaddr);
|
||||||
end = PFN_DOWN(physaddr + size);
|
end = PFN_DOWN(physaddr + size);
|
||||||
|
|
||||||
|
@ -354,6 +357,8 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
|
||||||
{
|
{
|
||||||
unsigned long start, end;
|
unsigned long start, end;
|
||||||
|
|
||||||
|
kmemleak_free_part(__va(addr), size);
|
||||||
|
|
||||||
start = PFN_UP(addr);
|
start = PFN_UP(addr);
|
||||||
end = PFN_DOWN(addr + size);
|
end = PFN_DOWN(addr + size);
|
||||||
|
|
||||||
|
@ -516,6 +521,7 @@ find_block:
|
||||||
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
|
region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
|
||||||
start_off);
|
start_off);
|
||||||
memset(region, 0, size);
|
memset(region, 0, size);
|
||||||
|
kmemleak_alloc(region, size, 1, 0);
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
236
mm/kmemleak.c
236
mm/kmemleak.c
|
@ -103,10 +103,10 @@
|
||||||
* Kmemleak configuration and common defines.
|
* Kmemleak configuration and common defines.
|
||||||
*/
|
*/
|
||||||
#define MAX_TRACE 16 /* stack trace length */
|
#define MAX_TRACE 16 /* stack trace length */
|
||||||
#define REPORTS_NR 50 /* maximum number of reported leaks */
|
|
||||||
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
|
#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
|
||||||
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
|
#define SECS_FIRST_SCAN 60 /* delay before the first scan */
|
||||||
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
|
#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
|
||||||
|
#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
|
||||||
|
|
||||||
#define BYTES_PER_POINTER sizeof(void *)
|
#define BYTES_PER_POINTER sizeof(void *)
|
||||||
|
|
||||||
|
@ -158,6 +158,8 @@ struct kmemleak_object {
|
||||||
#define OBJECT_REPORTED (1 << 1)
|
#define OBJECT_REPORTED (1 << 1)
|
||||||
/* flag set to not scan the object */
|
/* flag set to not scan the object */
|
||||||
#define OBJECT_NO_SCAN (1 << 2)
|
#define OBJECT_NO_SCAN (1 << 2)
|
||||||
|
/* flag set on newly allocated objects */
|
||||||
|
#define OBJECT_NEW (1 << 3)
|
||||||
|
|
||||||
/* the list of all allocated objects */
|
/* the list of all allocated objects */
|
||||||
static LIST_HEAD(object_list);
|
static LIST_HEAD(object_list);
|
||||||
|
@ -196,9 +198,6 @@ static int kmemleak_stack_scan = 1;
|
||||||
/* protects the memory scanning, parameters and debug/kmemleak file access */
|
/* protects the memory scanning, parameters and debug/kmemleak file access */
|
||||||
static DEFINE_MUTEX(scan_mutex);
|
static DEFINE_MUTEX(scan_mutex);
|
||||||
|
|
||||||
/* number of leaks reported (for limitation purposes) */
|
|
||||||
static int reported_leaks;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Early object allocation/freeing logging. Kmemleak is initialized after the
|
* Early object allocation/freeing logging. Kmemleak is initialized after the
|
||||||
* kernel allocator. However, both the kernel allocator and kmemleak may
|
* kernel allocator. However, both the kernel allocator and kmemleak may
|
||||||
|
@ -211,6 +210,7 @@ static int reported_leaks;
|
||||||
enum {
|
enum {
|
||||||
KMEMLEAK_ALLOC,
|
KMEMLEAK_ALLOC,
|
||||||
KMEMLEAK_FREE,
|
KMEMLEAK_FREE,
|
||||||
|
KMEMLEAK_FREE_PART,
|
||||||
KMEMLEAK_NOT_LEAK,
|
KMEMLEAK_NOT_LEAK,
|
||||||
KMEMLEAK_IGNORE,
|
KMEMLEAK_IGNORE,
|
||||||
KMEMLEAK_SCAN_AREA,
|
KMEMLEAK_SCAN_AREA,
|
||||||
|
@ -274,6 +274,11 @@ static int color_gray(const struct kmemleak_object *object)
|
||||||
return object->min_count != -1 && object->count >= object->min_count;
|
return object->min_count != -1 && object->count >= object->min_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int color_black(const struct kmemleak_object *object)
|
||||||
|
{
|
||||||
|
return object->min_count == -1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Objects are considered unreferenced only if their color is white, they have
|
* Objects are considered unreferenced only if their color is white, they have
|
||||||
* not be deleted and have a minimum age to avoid false positives caused by
|
* not be deleted and have a minimum age to avoid false positives caused by
|
||||||
|
@ -451,7 +456,7 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
|
||||||
INIT_HLIST_HEAD(&object->area_list);
|
INIT_HLIST_HEAD(&object->area_list);
|
||||||
spin_lock_init(&object->lock);
|
spin_lock_init(&object->lock);
|
||||||
atomic_set(&object->use_count, 1);
|
atomic_set(&object->use_count, 1);
|
||||||
object->flags = OBJECT_ALLOCATED;
|
object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
|
||||||
object->pointer = ptr;
|
object->pointer = ptr;
|
||||||
object->size = size;
|
object->size = size;
|
||||||
object->min_count = min_count;
|
object->min_count = min_count;
|
||||||
|
@ -519,27 +524,17 @@ out:
|
||||||
* Remove the metadata (struct kmemleak_object) for a memory block from the
|
* Remove the metadata (struct kmemleak_object) for a memory block from the
|
||||||
* object_list and object_tree_root and decrement its use_count.
|
* object_list and object_tree_root and decrement its use_count.
|
||||||
*/
|
*/
|
||||||
static void delete_object(unsigned long ptr)
|
static void __delete_object(struct kmemleak_object *object)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct kmemleak_object *object;
|
|
||||||
|
|
||||||
write_lock_irqsave(&kmemleak_lock, flags);
|
write_lock_irqsave(&kmemleak_lock, flags);
|
||||||
object = lookup_object(ptr, 0);
|
|
||||||
if (!object) {
|
|
||||||
#ifdef DEBUG
|
|
||||||
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
|
|
||||||
ptr);
|
|
||||||
#endif
|
|
||||||
write_unlock_irqrestore(&kmemleak_lock, flags);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
prio_tree_remove(&object_tree_root, &object->tree_node);
|
prio_tree_remove(&object_tree_root, &object->tree_node);
|
||||||
list_del_rcu(&object->object_list);
|
list_del_rcu(&object->object_list);
|
||||||
write_unlock_irqrestore(&kmemleak_lock, flags);
|
write_unlock_irqrestore(&kmemleak_lock, flags);
|
||||||
|
|
||||||
WARN_ON(!(object->flags & OBJECT_ALLOCATED));
|
WARN_ON(!(object->flags & OBJECT_ALLOCATED));
|
||||||
WARN_ON(atomic_read(&object->use_count) < 1);
|
WARN_ON(atomic_read(&object->use_count) < 2);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Locking here also ensures that the corresponding memory block
|
* Locking here also ensures that the corresponding memory block
|
||||||
|
@ -551,6 +546,64 @@ static void delete_object(unsigned long ptr)
|
||||||
put_object(object);
|
put_object(object);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Look up the metadata (struct kmemleak_object) corresponding to ptr and
|
||||||
|
* delete it.
|
||||||
|
*/
|
||||||
|
static void delete_object_full(unsigned long ptr)
|
||||||
|
{
|
||||||
|
struct kmemleak_object *object;
|
||||||
|
|
||||||
|
object = find_and_get_object(ptr, 0);
|
||||||
|
if (!object) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
|
||||||
|
ptr);
|
||||||
|
#endif
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
__delete_object(object);
|
||||||
|
put_object(object);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Look up the metadata (struct kmemleak_object) corresponding to ptr and
|
||||||
|
* delete it. If the memory block is partially freed, the function may create
|
||||||
|
* additional metadata for the remaining parts of the block.
|
||||||
|
*/
|
||||||
|
static void delete_object_part(unsigned long ptr, size_t size)
|
||||||
|
{
|
||||||
|
struct kmemleak_object *object;
|
||||||
|
unsigned long start, end;
|
||||||
|
|
||||||
|
object = find_and_get_object(ptr, 1);
|
||||||
|
if (!object) {
|
||||||
|
#ifdef DEBUG
|
||||||
|
kmemleak_warn("Partially freeing unknown object at 0x%08lx "
|
||||||
|
"(size %zu)\n", ptr, size);
|
||||||
|
#endif
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
__delete_object(object);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create one or two objects that may result from the memory block
|
||||||
|
* split. Note that partial freeing is only done by free_bootmem() and
|
||||||
|
* this happens before kmemleak_init() is called. The path below is
|
||||||
|
* only executed during early log recording in kmemleak_init(), so
|
||||||
|
* GFP_KERNEL is enough.
|
||||||
|
*/
|
||||||
|
start = object->pointer;
|
||||||
|
end = object->pointer + object->size;
|
||||||
|
if (ptr > start)
|
||||||
|
create_object(start, ptr - start, object->min_count,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (ptr + size < end)
|
||||||
|
create_object(ptr + size, end - ptr - size, object->min_count,
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
put_object(object);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Make a object permanently as gray-colored so that it can no longer be
|
* Make a object permanently as gray-colored so that it can no longer be
|
||||||
* reported as a leak. This is used in general to mark a false positive.
|
* reported as a leak. This is used in general to mark a false positive.
|
||||||
|
@ -715,12 +768,27 @@ void kmemleak_free(const void *ptr)
|
||||||
pr_debug("%s(0x%p)\n", __func__, ptr);
|
pr_debug("%s(0x%p)\n", __func__, ptr);
|
||||||
|
|
||||||
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
delete_object((unsigned long)ptr);
|
delete_object_full((unsigned long)ptr);
|
||||||
else if (atomic_read(&kmemleak_early_log))
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
|
log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(kmemleak_free);
|
EXPORT_SYMBOL_GPL(kmemleak_free);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Partial memory freeing function callback. This function is usually called
|
||||||
|
* from bootmem allocator when (part of) a memory block is freed.
|
||||||
|
*/
|
||||||
|
void kmemleak_free_part(const void *ptr, size_t size)
|
||||||
|
{
|
||||||
|
pr_debug("%s(0x%p)\n", __func__, ptr);
|
||||||
|
|
||||||
|
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
|
||||||
|
delete_object_part((unsigned long)ptr, size);
|
||||||
|
else if (atomic_read(&kmemleak_early_log))
|
||||||
|
log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(kmemleak_free_part);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mark an already allocated memory block as a false positive. This will cause
|
* Mark an already allocated memory block as a false positive. This will cause
|
||||||
* the block to no longer be reported as leak and always be scanned.
|
* the block to no longer be reported as leak and always be scanned.
|
||||||
|
@ -807,7 +875,7 @@ static int scan_should_stop(void)
|
||||||
* found to the gray list.
|
* found to the gray list.
|
||||||
*/
|
*/
|
||||||
static void scan_block(void *_start, void *_end,
|
static void scan_block(void *_start, void *_end,
|
||||||
struct kmemleak_object *scanned)
|
struct kmemleak_object *scanned, int allow_resched)
|
||||||
{
|
{
|
||||||
unsigned long *ptr;
|
unsigned long *ptr;
|
||||||
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
|
unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
|
||||||
|
@ -818,6 +886,8 @@ static void scan_block(void *_start, void *_end,
|
||||||
unsigned long pointer = *ptr;
|
unsigned long pointer = *ptr;
|
||||||
struct kmemleak_object *object;
|
struct kmemleak_object *object;
|
||||||
|
|
||||||
|
if (allow_resched)
|
||||||
|
cond_resched();
|
||||||
if (scan_should_stop())
|
if (scan_should_stop())
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -881,12 +951,12 @@ static void scan_object(struct kmemleak_object *object)
|
||||||
goto out;
|
goto out;
|
||||||
if (hlist_empty(&object->area_list))
|
if (hlist_empty(&object->area_list))
|
||||||
scan_block((void *)object->pointer,
|
scan_block((void *)object->pointer,
|
||||||
(void *)(object->pointer + object->size), object);
|
(void *)(object->pointer + object->size), object, 0);
|
||||||
else
|
else
|
||||||
hlist_for_each_entry(area, elem, &object->area_list, node)
|
hlist_for_each_entry(area, elem, &object->area_list, node)
|
||||||
scan_block((void *)(object->pointer + area->offset),
|
scan_block((void *)(object->pointer + area->offset),
|
||||||
(void *)(object->pointer + area->offset
|
(void *)(object->pointer + area->offset
|
||||||
+ area->length), object);
|
+ area->length), object, 0);
|
||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&object->lock, flags);
|
spin_unlock_irqrestore(&object->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -903,6 +973,7 @@ static void kmemleak_scan(void)
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
int i;
|
int i;
|
||||||
int new_leaks = 0;
|
int new_leaks = 0;
|
||||||
|
int gray_list_pass = 0;
|
||||||
|
|
||||||
jiffies_last_scan = jiffies;
|
jiffies_last_scan = jiffies;
|
||||||
|
|
||||||
|
@ -923,6 +994,7 @@ static void kmemleak_scan(void)
|
||||||
#endif
|
#endif
|
||||||
/* reset the reference count (whiten the object) */
|
/* reset the reference count (whiten the object) */
|
||||||
object->count = 0;
|
object->count = 0;
|
||||||
|
object->flags &= ~OBJECT_NEW;
|
||||||
if (color_gray(object) && get_object(object))
|
if (color_gray(object) && get_object(object))
|
||||||
list_add_tail(&object->gray_list, &gray_list);
|
list_add_tail(&object->gray_list, &gray_list);
|
||||||
|
|
||||||
|
@ -931,14 +1003,14 @@ static void kmemleak_scan(void)
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
/* data/bss scanning */
|
/* data/bss scanning */
|
||||||
scan_block(_sdata, _edata, NULL);
|
scan_block(_sdata, _edata, NULL, 1);
|
||||||
scan_block(__bss_start, __bss_stop, NULL);
|
scan_block(__bss_start, __bss_stop, NULL, 1);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* per-cpu sections scanning */
|
/* per-cpu sections scanning */
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
scan_block(__per_cpu_start + per_cpu_offset(i),
|
scan_block(__per_cpu_start + per_cpu_offset(i),
|
||||||
__per_cpu_end + per_cpu_offset(i), NULL);
|
__per_cpu_end + per_cpu_offset(i), NULL, 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -960,7 +1032,7 @@ static void kmemleak_scan(void)
|
||||||
/* only scan if page is in use */
|
/* only scan if page is in use */
|
||||||
if (page_count(page) == 0)
|
if (page_count(page) == 0)
|
||||||
continue;
|
continue;
|
||||||
scan_block(page, page + 1, NULL);
|
scan_block(page, page + 1, NULL, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -972,7 +1044,8 @@ static void kmemleak_scan(void)
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
for_each_process(task)
|
for_each_process(task)
|
||||||
scan_block(task_stack_page(task),
|
scan_block(task_stack_page(task),
|
||||||
task_stack_page(task) + THREAD_SIZE, NULL);
|
task_stack_page(task) + THREAD_SIZE,
|
||||||
|
NULL, 0);
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -984,6 +1057,7 @@ static void kmemleak_scan(void)
|
||||||
* kmemleak objects cannot be freed from outside the loop because their
|
* kmemleak objects cannot be freed from outside the loop because their
|
||||||
* use_count was increased.
|
* use_count was increased.
|
||||||
*/
|
*/
|
||||||
|
repeat:
|
||||||
object = list_entry(gray_list.next, typeof(*object), gray_list);
|
object = list_entry(gray_list.next, typeof(*object), gray_list);
|
||||||
while (&object->gray_list != &gray_list) {
|
while (&object->gray_list != &gray_list) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -1001,12 +1075,38 @@ static void kmemleak_scan(void)
|
||||||
|
|
||||||
object = tmp;
|
object = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
|
||||||
|
goto scan_end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check for new objects allocated during this scanning and add them
|
||||||
|
* to the gray list.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
list_for_each_entry_rcu(object, &object_list, object_list) {
|
||||||
|
spin_lock_irqsave(&object->lock, flags);
|
||||||
|
if ((object->flags & OBJECT_NEW) && !color_black(object) &&
|
||||||
|
get_object(object)) {
|
||||||
|
object->flags &= ~OBJECT_NEW;
|
||||||
|
list_add_tail(&object->gray_list, &gray_list);
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&object->lock, flags);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (!list_empty(&gray_list))
|
||||||
|
goto repeat;
|
||||||
|
|
||||||
|
scan_end:
|
||||||
WARN_ON(!list_empty(&gray_list));
|
WARN_ON(!list_empty(&gray_list));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If scanning was stopped do not report any new unreferenced objects.
|
* If scanning was stopped or new objects were being allocated at a
|
||||||
|
* higher rate than gray list scanning, do not report any new
|
||||||
|
* unreferenced objects.
|
||||||
*/
|
*/
|
||||||
if (scan_should_stop())
|
if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1039,6 +1139,7 @@ static int kmemleak_scan_thread(void *arg)
|
||||||
static int first_run = 1;
|
static int first_run = 1;
|
||||||
|
|
||||||
pr_info("Automatic memory scanning thread started\n");
|
pr_info("Automatic memory scanning thread started\n");
|
||||||
|
set_user_nice(current, 10);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait before the first scan to allow the system to fully initialize.
|
* Wait before the first scan to allow the system to fully initialize.
|
||||||
|
@ -1101,11 +1202,11 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
|
||||||
{
|
{
|
||||||
struct kmemleak_object *object;
|
struct kmemleak_object *object;
|
||||||
loff_t n = *pos;
|
loff_t n = *pos;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (!n)
|
err = mutex_lock_interruptible(&scan_mutex);
|
||||||
reported_leaks = 0;
|
if (err < 0)
|
||||||
if (reported_leaks >= REPORTS_NR)
|
return ERR_PTR(err);
|
||||||
return NULL;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(object, &object_list, object_list) {
|
list_for_each_entry_rcu(object, &object_list, object_list) {
|
||||||
|
@ -1131,8 +1232,6 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||||
struct list_head *n = &prev_obj->object_list;
|
struct list_head *n = &prev_obj->object_list;
|
||||||
|
|
||||||
++(*pos);
|
++(*pos);
|
||||||
if (reported_leaks >= REPORTS_NR)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_continue_rcu(n, &object_list) {
|
list_for_each_continue_rcu(n, &object_list) {
|
||||||
|
@ -1141,7 +1240,7 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
out:
|
|
||||||
put_object(prev_obj);
|
put_object(prev_obj);
|
||||||
return next_obj;
|
return next_obj;
|
||||||
}
|
}
|
||||||
|
@ -1151,8 +1250,15 @@ out:
|
||||||
*/
|
*/
|
||||||
static void kmemleak_seq_stop(struct seq_file *seq, void *v)
|
static void kmemleak_seq_stop(struct seq_file *seq, void *v)
|
||||||
{
|
{
|
||||||
if (v)
|
if (!IS_ERR(v)) {
|
||||||
put_object(v);
|
/*
|
||||||
|
* kmemleak_seq_start may return ERR_PTR if the scan_mutex
|
||||||
|
* waiting was interrupted, so only release it if !IS_ERR.
|
||||||
|
*/
|
||||||
|
mutex_unlock(&scan_mutex);
|
||||||
|
if (v)
|
||||||
|
put_object(v);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1164,10 +1270,8 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&object->lock, flags);
|
spin_lock_irqsave(&object->lock, flags);
|
||||||
if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
|
if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
|
||||||
print_unreferenced(seq, object);
|
print_unreferenced(seq, object);
|
||||||
reported_leaks++;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&object->lock, flags);
|
spin_unlock_irqrestore(&object->lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1181,36 +1285,15 @@ static const struct seq_operations kmemleak_seq_ops = {
|
||||||
|
|
||||||
static int kmemleak_open(struct inode *inode, struct file *file)
|
static int kmemleak_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
if (!atomic_read(&kmemleak_enabled))
|
if (!atomic_read(&kmemleak_enabled))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
ret = mutex_lock_interruptible(&scan_mutex);
|
return seq_open(file, &kmemleak_seq_ops);
|
||||||
if (ret < 0)
|
|
||||||
goto out;
|
|
||||||
if (file->f_mode & FMODE_READ) {
|
|
||||||
ret = seq_open(file, &kmemleak_seq_ops);
|
|
||||||
if (ret < 0)
|
|
||||||
goto scan_unlock;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
scan_unlock:
|
|
||||||
mutex_unlock(&scan_mutex);
|
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kmemleak_release(struct inode *inode, struct file *file)
|
static int kmemleak_release(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
return seq_release(inode, file);
|
||||||
|
|
||||||
if (file->f_mode & FMODE_READ)
|
|
||||||
seq_release(inode, file);
|
|
||||||
mutex_unlock(&scan_mutex);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1230,15 +1313,17 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
|
||||||
{
|
{
|
||||||
char buf[64];
|
char buf[64];
|
||||||
int buf_size;
|
int buf_size;
|
||||||
|
int ret;
|
||||||
if (!atomic_read(&kmemleak_enabled))
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
buf_size = min(size, (sizeof(buf) - 1));
|
buf_size = min(size, (sizeof(buf) - 1));
|
||||||
if (strncpy_from_user(buf, user_buf, buf_size) < 0)
|
if (strncpy_from_user(buf, user_buf, buf_size) < 0)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
buf[buf_size] = 0;
|
buf[buf_size] = 0;
|
||||||
|
|
||||||
|
ret = mutex_lock_interruptible(&scan_mutex);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
if (strncmp(buf, "off", 3) == 0)
|
if (strncmp(buf, "off", 3) == 0)
|
||||||
kmemleak_disable();
|
kmemleak_disable();
|
||||||
else if (strncmp(buf, "stack=on", 8) == 0)
|
else if (strncmp(buf, "stack=on", 8) == 0)
|
||||||
|
@ -1251,11 +1336,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
|
||||||
stop_scan_thread();
|
stop_scan_thread();
|
||||||
else if (strncmp(buf, "scan=", 5) == 0) {
|
else if (strncmp(buf, "scan=", 5) == 0) {
|
||||||
unsigned long secs;
|
unsigned long secs;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = strict_strtoul(buf + 5, 0, &secs);
|
ret = strict_strtoul(buf + 5, 0, &secs);
|
||||||
if (err < 0)
|
if (ret < 0)
|
||||||
return err;
|
goto out;
|
||||||
stop_scan_thread();
|
stop_scan_thread();
|
||||||
if (secs) {
|
if (secs) {
|
||||||
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
|
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
|
||||||
|
@ -1264,7 +1348,12 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
|
||||||
} else if (strncmp(buf, "scan", 4) == 0)
|
} else if (strncmp(buf, "scan", 4) == 0)
|
||||||
kmemleak_scan();
|
kmemleak_scan();
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&scan_mutex);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/* ignore the rest of the buffer, only one command at a time */
|
/* ignore the rest of the buffer, only one command at a time */
|
||||||
*ppos += size;
|
*ppos += size;
|
||||||
|
@ -1293,7 +1382,7 @@ static int kmemleak_cleanup_thread(void *arg)
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(object, &object_list, object_list)
|
list_for_each_entry_rcu(object, &object_list, object_list)
|
||||||
delete_object(object->pointer);
|
delete_object_full(object->pointer);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
mutex_unlock(&scan_mutex);
|
mutex_unlock(&scan_mutex);
|
||||||
|
|
||||||
|
@ -1388,6 +1477,9 @@ void __init kmemleak_init(void)
|
||||||
case KMEMLEAK_FREE:
|
case KMEMLEAK_FREE:
|
||||||
kmemleak_free(log->ptr);
|
kmemleak_free(log->ptr);
|
||||||
break;
|
break;
|
||||||
|
case KMEMLEAK_FREE_PART:
|
||||||
|
kmemleak_free_part(log->ptr, log->size);
|
||||||
|
break;
|
||||||
case KMEMLEAK_NOT_LEAK:
|
case KMEMLEAK_NOT_LEAK:
|
||||||
kmemleak_not_leak(log->ptr);
|
kmemleak_not_leak(log->ptr);
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -4745,8 +4745,10 @@ void *__init alloc_large_system_hash(const char *tablename,
|
||||||
* some pages at the end of hash table which
|
* some pages at the end of hash table which
|
||||||
* alloc_pages_exact() automatically does
|
* alloc_pages_exact() automatically does
|
||||||
*/
|
*/
|
||||||
if (get_order(size) < MAX_ORDER)
|
if (get_order(size) < MAX_ORDER) {
|
||||||
table = alloc_pages_exact(size, GFP_ATOMIC);
|
table = alloc_pages_exact(size, GFP_ATOMIC);
|
||||||
|
kmemleak_alloc(table, size, 1, GFP_ATOMIC);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} while (!table && size > PAGE_SIZE && --log2qty);
|
} while (!table && size > PAGE_SIZE && --log2qty);
|
||||||
|
|
||||||
|
@ -4764,16 +4766,6 @@ void *__init alloc_large_system_hash(const char *tablename,
|
||||||
if (_hash_mask)
|
if (_hash_mask)
|
||||||
*_hash_mask = (1 << log2qty) - 1;
|
*_hash_mask = (1 << log2qty) - 1;
|
||||||
|
|
||||||
/*
|
|
||||||
* If hashdist is set, the table allocation is done with __vmalloc()
|
|
||||||
* which invokes the kmemleak_alloc() callback. This function may also
|
|
||||||
* be called before the slab and kmemleak are initialised when
|
|
||||||
* kmemleak simply buffers the request to be executed later
|
|
||||||
* (GFP_ATOMIC flag ignored in this case).
|
|
||||||
*/
|
|
||||||
if (!hashdist)
|
|
||||||
kmemleak_alloc(table, size, 1, GFP_ATOMIC);
|
|
||||||
|
|
||||||
return table;
|
return table;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
10
mm/slub.c
10
mm/slub.c
|
@ -21,7 +21,6 @@
|
||||||
#include <linux/kmemcheck.h>
|
#include <linux/kmemcheck.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/cpuset.h>
|
#include <linux/cpuset.h>
|
||||||
#include <linux/kmemleak.h>
|
|
||||||
#include <linux/mempolicy.h>
|
#include <linux/mempolicy.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/debugobjects.h>
|
#include <linux/debugobjects.h>
|
||||||
|
@ -2835,13 +2834,15 @@ EXPORT_SYMBOL(__kmalloc);
|
||||||
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
void *ptr = NULL;
|
||||||
|
|
||||||
flags |= __GFP_COMP | __GFP_NOTRACK;
|
flags |= __GFP_COMP | __GFP_NOTRACK;
|
||||||
page = alloc_pages_node(node, flags, get_order(size));
|
page = alloc_pages_node(node, flags, get_order(size));
|
||||||
if (page)
|
if (page)
|
||||||
return page_address(page);
|
ptr = page_address(page);
|
||||||
else
|
|
||||||
return NULL;
|
kmemleak_alloc(ptr, size, 1, flags);
|
||||||
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
@ -2926,6 +2927,7 @@ void kfree(const void *x)
|
||||||
page = virt_to_head_page(x);
|
page = virt_to_head_page(x);
|
||||||
if (unlikely(!PageSlab(page))) {
|
if (unlikely(!PageSlab(page))) {
|
||||||
BUG_ON(!PageCompound(page));
|
BUG_ON(!PageCompound(page));
|
||||||
|
kmemleak_free(x);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue