Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6

* 'kmemleak' of git://linux-arm.org/linux-2.6:
  kmemleak: Inform kmemleak about pid_hash
  kmemleak: Do not warn if an unknown object is freed
  kmemleak: Do not report new leaked objects if the scanning was stopped
  kmemleak: Slightly change the policy on newly allocated objects
  kmemleak: Do not trigger a scan when reading the debug/kmemleak file
  kmemleak: Simplify the reports logged by the scanning thread
  kmemleak: Enable task stacks scanning by default
  kmemleak: Allow the early log buffer to be configurable.
This commit is contained in:
Linus Torvalds 2009-06-30 19:04:53 -07:00
commit e83c2b0ff3
4 changed files with 106 additions and 104 deletions

View file

@ -16,13 +16,17 @@ Usage
-----
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
thread scans the memory every 10 minutes (by default) and prints any new
unreferenced objects found. To trigger an intermediate scan and display
all the possible memory leaks:
thread scans the memory every 10 minutes (by default) and prints the
number of new unreferenced objects found. To display the details of all
the possible memory leaks:
# mount -t debugfs nodev /sys/kernel/debug/
# cat /sys/kernel/debug/kmemleak
To trigger an intermediate memory scan:
# echo scan > /sys/kernel/debug/kmemleak
Note that the orphan objects are listed in the order they were allocated
and one object at the beginning of the list may cause other subsequent
objects to be reported as orphan.
@ -31,16 +35,21 @@ Memory scanning parameters can be modified at run-time by writing to the
/sys/kernel/debug/kmemleak file. The following parameters are supported:
off - disable kmemleak (irreversible)
stack=on - enable the task stacks scanning
stack=on - enable the task stacks scanning (default)
stack=off - disable the tasks stacks scanning
scan=on - start the automatic memory scanning thread
scan=on - start the automatic memory scanning thread (default)
scan=off - stop the automatic memory scanning thread
scan=<secs> - set the automatic memory scanning period in seconds (0
to disable it)
scan=<secs> - set the automatic memory scanning period in seconds
(default 600, 0 to stop the automatic scanning)
scan - trigger a memory scan
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
the kernel command line.
Memory may be allocated or freed before kmemleak is initialised and
these actions are stored in an early log buffer. The size of this buffer
is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
Basic Algorithm
---------------

View file

@ -36,6 +36,7 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
#include <linux/kmemleak.h>
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@ -512,6 +513,12 @@ void __init pidhash_init(void)
pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
if (!pid_hash)
panic("Could not alloc pidhash!\n");
/*
* pid_hash contains references to allocated struct pid objects and it
* must be scanned by kmemleak to avoid false positives.
*/
kmemleak_alloc(pid_hash, pidhash_size * sizeof(*(pid_hash)), 0,
GFP_KERNEL);
for (i = 0; i < pidhash_size; i++)
INIT_HLIST_HEAD(&pid_hash[i]);
}

View file

@ -359,6 +359,18 @@ config DEBUG_KMEMLEAK
In order to access the kmemleak file, debugfs needs to be
mounted (usually at /sys/kernel/debug).
config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
int "Maximum kmemleak early log entries"
depends on DEBUG_KMEMLEAK
range 200 2000
default 400
help
Kmemleak must track all the memory allocations to avoid
reporting false positives. Since memory may be allocated or
freed before kmemleak is initialised, an early log buffer is
used to store these actions. If kmemleak reports "early log
buffer exceeded", please increase this value.
config DEBUG_KMEMLEAK_TEST
tristate "Simple test for the kernel memory leak detector"
depends on DEBUG_KMEMLEAK

View file

@ -48,10 +48,10 @@
* scanned. This list is only modified during a scanning episode when the
* scan_mutex is held. At the end of a scan, the gray_list is always empty.
* Note that the kmemleak_object.use_count is incremented when an object is
* added to the gray_list and therefore cannot be freed
* - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs
* file together with modifications to the memory scanning parameters
* including the scan_thread pointer
* added to the gray_list and therefore cannot be freed. This mutex also
* prevents multiple users of the "kmemleak" debugfs file together with
* modifications to the memory scanning parameters including the scan_thread
* pointer
*
* The kmemleak_object structures have a use_count incremented or decremented
* using the get_object()/put_object() functions. When the use_count becomes
@ -190,15 +190,15 @@ static unsigned long max_addr;
static unsigned long next_scan_yield;
static struct task_struct *scan_thread;
static unsigned long jiffies_scan_yield;
/* used to avoid reporting of recently allocated objects */
static unsigned long jiffies_min_age;
static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */
static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */
static int kmemleak_stack_scan;
/* mutex protecting the memory scanning */
static int kmemleak_stack_scan = 1;
/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */
static DEFINE_MUTEX(kmemleak_mutex);
/* number of leaks reported (for limitation purposes) */
static int reported_leaks;
@ -235,7 +235,7 @@ struct early_log {
};
/* early logging buffer and current position */
static struct early_log early_log[200];
static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE];
static int crt_early_log;
static void kmemleak_disable(void);
@ -278,15 +278,6 @@ static int color_gray(const struct kmemleak_object *object)
return object->min_count != -1 && object->count >= object->min_count;
}
/*
* Objects are considered referenced if their color is gray and they have not
* been deleted.
*/
static int referenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_gray(object);
}
/*
* Objects are considered unreferenced only if their color is white, they have
* not be deleted and have a minimum age to avoid false positives caused by
@ -295,42 +286,28 @@ static int referenced_object(struct kmemleak_object *object)
static int unreferenced_object(struct kmemleak_object *object)
{
return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
time_is_before_eq_jiffies(object->jiffies + jiffies_min_age);
time_before_eq(object->jiffies + jiffies_min_age,
jiffies_last_scan);
}
/*
* Printing of the (un)referenced objects information, either to the seq file
* or to the kernel log. The print_referenced/print_unreferenced functions
* must be called with the object->lock held.
* Printing of the unreferenced objects information to the seq file. The
* print_unreferenced function must be called with the object->lock held.
*/
#define print_helper(seq, x...) do { \
struct seq_file *s = (seq); \
if (s) \
seq_printf(s, x); \
else \
pr_info(x); \
} while (0)
static void print_referenced(struct kmemleak_object *object)
{
pr_info("referenced object 0x%08lx (size %zu)\n",
object->pointer, object->size);
}
static void print_unreferenced(struct seq_file *seq,
struct kmemleak_object *object)
{
int i;
print_helper(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
print_helper(seq, " backtrace:\n");
seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
object->pointer, object->size);
seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
object->comm, object->pid, object->jiffies);
seq_printf(seq, " backtrace:\n");
for (i = 0; i < object->trace_len; i++) {
void *ptr = (void *)object->trace[i];
print_helper(seq, " [<%p>] %pS\n", ptr, ptr);
seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
}
}
@ -554,8 +531,10 @@ static void delete_object(unsigned long ptr)
write_lock_irqsave(&kmemleak_lock, flags);
object = lookup_object(ptr, 0);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Freeing unknown object at 0x%08lx\n",
ptr);
#endif
write_unlock_irqrestore(&kmemleak_lock, flags);
return;
}
@ -571,8 +550,6 @@ static void delete_object(unsigned long ptr)
* cannot be freed when it is being scanned.
*/
spin_lock_irqsave(&object->lock, flags);
if (object->flags & OBJECT_REPORTED)
print_referenced(object);
object->flags &= ~OBJECT_ALLOCATED;
spin_unlock_irqrestore(&object->lock, flags);
put_object(object);
@ -696,7 +673,8 @@ static void log_early(int op_type, const void *ptr, size_t size,
struct early_log *log;
if (crt_early_log >= ARRAY_SIZE(early_log)) {
kmemleak_stop("Early log buffer exceeded\n");
pr_warning("Early log buffer exceeded\n");
kmemleak_disable();
return;
}
@ -952,6 +930,9 @@ static void kmemleak_scan(void)
struct kmemleak_object *object, *tmp;
struct task_struct *task;
int i;
int new_leaks = 0;
jiffies_last_scan = jiffies;
/* prepare the kmemleak_object's */
rcu_read_lock();
@ -1049,6 +1030,32 @@ static void kmemleak_scan(void)
object = tmp;
}
WARN_ON(!list_empty(&gray_list));
/*
* If scanning was stopped do not report any new unreferenced objects.
*/
if (scan_should_stop())
return;
/*
* Scanning result reporting.
*/
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
spin_lock_irqsave(&object->lock, flags);
if (unreferenced_object(object) &&
!(object->flags & OBJECT_REPORTED)) {
object->flags |= OBJECT_REPORTED;
new_leaks++;
}
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
if (new_leaks)
pr_info("%d new suspected memory leaks (see "
"/sys/kernel/debug/kmemleak)\n", new_leaks);
}
/*
@ -1070,36 +1077,12 @@ static int kmemleak_scan_thread(void *arg)
}
while (!kthread_should_stop()) {
struct kmemleak_object *object;
signed long timeout = jiffies_scan_wait;
mutex_lock(&scan_mutex);
kmemleak_scan();
reported_leaks = 0;
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list) {
unsigned long flags;
if (reported_leaks >= REPORTS_NR)
break;
spin_lock_irqsave(&object->lock, flags);
if (!(object->flags & OBJECT_REPORTED) &&
unreferenced_object(object)) {
print_unreferenced(NULL, object);
object->flags |= OBJECT_REPORTED;
reported_leaks++;
} else if ((object->flags & OBJECT_REPORTED) &&
referenced_object(object)) {
print_referenced(object);
object->flags &= ~OBJECT_REPORTED;
}
spin_unlock_irqrestore(&object->lock, flags);
}
rcu_read_unlock();
mutex_unlock(&scan_mutex);
/* wait before the next scan */
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@ -1112,7 +1095,7 @@ static int kmemleak_scan_thread(void *arg)
/*
* Start the automatic memory scanning thread. This function must be called
* with the kmemleak_mutex held.
* with the scan_mutex held.
*/
void start_scan_thread(void)
{
@ -1127,7 +1110,7 @@ void start_scan_thread(void)
/*
* Stop the automatic memory scanning thread. This function must be called
* with the kmemleak_mutex held.
* with the scan_mutex held.
*/
void stop_scan_thread(void)
{
@ -1147,10 +1130,8 @@ static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
struct kmemleak_object *object;
loff_t n = *pos;
if (!n) {
kmemleak_scan();
if (!n)
reported_leaks = 0;
}
if (reported_leaks >= REPORTS_NR)
return NULL;
@ -1211,11 +1192,10 @@ static int kmemleak_seq_show(struct seq_file *seq, void *v)
unsigned long flags;
spin_lock_irqsave(&object->lock, flags);
if (!unreferenced_object(object))
goto out;
print_unreferenced(seq, object);
reported_leaks++;
out:
if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) {
print_unreferenced(seq, object);
reported_leaks++;
}
spin_unlock_irqrestore(&object->lock, flags);
return 0;
}
@ -1234,13 +1214,10 @@ static int kmemleak_open(struct inode *inode, struct file *file)
if (!atomic_read(&kmemleak_enabled))
return -EBUSY;
ret = mutex_lock_interruptible(&kmemleak_mutex);
ret = mutex_lock_interruptible(&scan_mutex);
if (ret < 0)
goto out;
if (file->f_mode & FMODE_READ) {
ret = mutex_lock_interruptible(&scan_mutex);
if (ret < 0)
goto kmemleak_unlock;
ret = seq_open(file, &kmemleak_seq_ops);
if (ret < 0)
goto scan_unlock;
@ -1249,8 +1226,6 @@ static int kmemleak_open(struct inode *inode, struct file *file)
scan_unlock:
mutex_unlock(&scan_mutex);
kmemleak_unlock:
mutex_unlock(&kmemleak_mutex);
out:
return ret;
}
@ -1259,11 +1234,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
{
int ret = 0;
if (file->f_mode & FMODE_READ) {
if (file->f_mode & FMODE_READ)
seq_release(inode, file);
mutex_unlock(&scan_mutex);
}
mutex_unlock(&kmemleak_mutex);
mutex_unlock(&scan_mutex);
return ret;
}
@ -1278,6 +1251,7 @@ static int kmemleak_release(struct inode *inode, struct file *file)
* scan=off - stop the automatic memory scanning thread
* scan=... - set the automatic memory scanning period in seconds (0 to
* disable it)
* scan - trigger a memory scan
*/
static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
size_t size, loff_t *ppos)
@ -1315,7 +1289,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
start_scan_thread();
}
} else
} else if (strncmp(buf, "scan", 4) == 0)
kmemleak_scan();
else
return -EINVAL;
/* ignore the rest of the buffer, only one command at a time */
@ -1340,11 +1316,9 @@ static int kmemleak_cleanup_thread(void *arg)
{
struct kmemleak_object *object;
mutex_lock(&kmemleak_mutex);
stop_scan_thread();
mutex_unlock(&kmemleak_mutex);
mutex_lock(&scan_mutex);
stop_scan_thread();
rcu_read_lock();
list_for_each_entry_rcu(object, &object_list, object_list)
delete_object(object->pointer);
@ -1486,9 +1460,9 @@ static int __init kmemleak_late_init(void)
&kmemleak_fops);
if (!dentry)
pr_warning("Failed to create the debugfs kmemleak file\n");
mutex_lock(&kmemleak_mutex);
mutex_lock(&scan_mutex);
start_scan_thread();
mutex_unlock(&kmemleak_mutex);
mutex_unlock(&scan_mutex);
pr_info("Kernel memory leak detector initialized\n");