kprobes: disable preempt for module_text_address() and kernel_text_address()

__register_kprobe() can be preempted after checking probing address but
before module_text_address() or try_module_get(), and in this interval
the module can be unloaded.  In that case, try_module_get(probed_mod)
will access to invalid address, or kprobe will probe invalid address.

This patch uses preempt_disable() to protect it and uses
__module_text_address() and __kernel_text_address().

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Masami Hiramatsu 2008-11-12 13:26:51 -08:00 committed by Linus Torvalds
parent 3b1b3f6e57
commit a189d0350f

View file

@ -613,30 +613,37 @@ static int __kprobes __register_kprobe(struct kprobe *p,
return -EINVAL; return -EINVAL;
p->addr = addr; p->addr = addr;
if (!kernel_text_address((unsigned long) p->addr) || preempt_disable();
in_kprobes_functions((unsigned long) p->addr)) if (!__kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr)) {
preempt_enable();
return -EINVAL; return -EINVAL;
}
p->mod_refcounted = 0; p->mod_refcounted = 0;
/* /*
* Check if are we probing a module. * Check if are we probing a module.
*/ */
probed_mod = module_text_address((unsigned long) p->addr); probed_mod = __module_text_address((unsigned long) p->addr);
if (probed_mod) { if (probed_mod) {
struct module *calling_mod = module_text_address(called_from); struct module *calling_mod;
calling_mod = __module_text_address(called_from);
/* /*
* We must allow modules to probe themself and in this case * We must allow modules to probe themself and in this case
* avoid incrementing the module refcount, so as to allow * avoid incrementing the module refcount, so as to allow
* unloading of self probing modules. * unloading of self probing modules.
*/ */
if (calling_mod && calling_mod != probed_mod) { if (calling_mod && calling_mod != probed_mod) {
if (unlikely(!try_module_get(probed_mod))) if (unlikely(!try_module_get(probed_mod))) {
preempt_enable();
return -EINVAL; return -EINVAL;
}
p->mod_refcounted = 1; p->mod_refcounted = 1;
} else } else
probed_mod = NULL; probed_mod = NULL;
} }
preempt_enable();
p->nmissed = 0; p->nmissed = 0;
INIT_LIST_HEAD(&p->list); INIT_LIST_HEAD(&p->list);
@ -718,6 +725,10 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
struct kprobe *old_p; struct kprobe *old_p;
if (p->mod_refcounted) { if (p->mod_refcounted) {
/*
* Since we've already incremented refcount,
* we don't need to disable preemption.
*/
mod = module_text_address((unsigned long)p->addr); mod = module_text_address((unsigned long)p->addr);
if (mod) if (mod)
module_put(mod); module_put(mod);