[PATCH] kretprobe instance recycled by parent process

When kretprobe probes the schedule() function, if the probed process exits
then schedule() will never return, so some kretprobe instances will never
be recycled.

In this patch the parent process will recycle retprobe instances of the
probed function and there will be no memory leak of kretprobe instances.

Signed-off-by: bibo mao <bibo.mao@intel.com>
Cc: Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
bibo mao 2006-03-26 01:38:20 -08:00 committed by Linus Torvalds
parent c9becf58d9
commit c6fd91f0bd
6 changed files with 14 additions and 32 deletions

View file

@ -38,7 +38,6 @@
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@ -364,13 +363,6 @@ void exit_thread(void)
struct task_struct *tsk = current;
struct thread_struct *t = &tsk->thread;
/*
* Remove function-return probe instances associated with this task
* and put them back on the free list. Do not insert an exit probe for
* this function, it will be disabled by kprobe_flush_task if you do.
*/
kprobe_flush_task(tsk);
/* The process may have allocated an io port bitmap... nuke it. */
if (unlikely(NULL != t->io_bitmap_ptr)) {
int cpu = get_cpu();

View file

@ -30,7 +30,6 @@
#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kprobes.h>
#include <asm/cpu.h>
#include <asm/delay.h>
@ -738,13 +737,6 @@ void
exit_thread (void)
{
/*
* Remove function-return probe instances associated with this task
* and put them back on the free list. Do not insert an exit probe for
* this function, it will be disabled by kprobe_flush_task if you do.
*/
kprobe_flush_task(current);
ia64_drop_fpu(current);
#ifdef CONFIG_PERFMON
/* if needed, stop monitoring and flush state to perfmon context */

View file

@ -35,7 +35,6 @@
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <linux/utsname.h>
#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@ -460,7 +459,6 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
kprobe_flush_task(current);
discard_lazy_cpu_state();
}

View file

@ -35,8 +35,8 @@
#include <linux/ptrace.h>
#include <linux/utsname.h>
#include <linux/random.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@ -353,13 +353,6 @@ void exit_thread(void)
struct task_struct *me = current;
struct thread_struct *t = &me->thread;
/*
* Remove function-return probe instances associated with this task
* and put them back on the free list. Do not insert an exit probe for
* this function, it will be disabled by kprobe_flush_task if you do.
*/
kprobe_flush_task(me);
if (me->thread.io_bitmap_ptr) {
struct tss_struct *tss = &per_cpu(init_tss, get_cpu());

View file

@ -323,10 +323,10 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
}
/*
* This function is called from exit_thread or flush_thread when task tk's
* stack is being recycled so that we can recycle any function-return probe
* instances associated with this task. These left over instances represent
* probed functions that have been called but will never return.
* This function is called from finish_task_switch when task tk becomes dead,
* so that we can recycle any function-return probe instances associated
* with this task. These left over instances represent probed functions
* that have been called but will never return.
*/
void __kprobes kprobe_flush_task(struct task_struct *tk)
{
@ -336,7 +336,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
unsigned long flags = 0;
spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
head = kretprobe_inst_table_head(tk);
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task == tk)
recycle_rp_inst(ri);

View file

@ -49,6 +49,7 @@
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/acct.h>
#include <linux/kprobes.h>
#include <asm/tlb.h>
#include <asm/unistd.h>
@ -1546,9 +1547,15 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)
finish_lock_switch(rq, prev);
if (mm)
mmdrop(mm);
if (unlikely(prev_task_flags & PF_DEAD))
if (unlikely(prev_task_flags & PF_DEAD)) {
/*
* Remove function-return probe instances associated with this
* task and put them back on the free list.
*/
kprobe_flush_task(prev);
put_task_struct(prev);
}
}
/**
* schedule_tail - first thing a freshly forked thread must call.