kprobes: Clean up insn_pages by using list instead of hlist

Use struct list instead of struct hlist for managing
insn_pages, because insn_pages doesn't use hash table.

Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20090630210814.17851.64651.stgit@localhost.localdomain>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Masami Hiramatsu 2009-06-30 17:08:14 -04:00 committed by Ingo Molnar
parent 4a2bb6fcc8
commit c5cb5a2d8d

View file

@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
struct kprobe_insn_page { struct kprobe_insn_page {
struct hlist_node hlist; struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */ kprobe_opcode_t *insns; /* Page of instruction slots */
char slot_used[INSNS_PER_PAGE]; char slot_used[INSNS_PER_PAGE];
int nused; int nused;
@ -117,7 +117,7 @@ enum kprobe_slot_state {
}; };
static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
static struct hlist_head kprobe_insn_pages; static LIST_HEAD(kprobe_insn_pages);
static int kprobe_garbage_slots; static int kprobe_garbage_slots;
static int collect_garbage_slots(void); static int collect_garbage_slots(void);
@ -152,10 +152,9 @@ loop_end:
static kprobe_opcode_t __kprobes *__get_insn_slot(void) static kprobe_opcode_t __kprobes *__get_insn_slot(void)
{ {
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip;
struct hlist_node *pos;
retry: retry:
hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->nused < INSNS_PER_PAGE) { if (kip->nused < INSNS_PER_PAGE) {
int i; int i;
for (i = 0; i < INSNS_PER_PAGE; i++) { for (i = 0; i < INSNS_PER_PAGE; i++) {
@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
kfree(kip); kfree(kip);
return NULL; return NULL;
} }
INIT_HLIST_NODE(&kip->hlist); INIT_LIST_HEAD(&kip->list);
hlist_add_head(&kip->hlist, &kprobe_insn_pages); list_add(&kip->list, &kprobe_insn_pages);
memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
kip->slot_used[0] = SLOT_USED; kip->slot_used[0] = SLOT_USED;
kip->nused = 1; kip->nused = 1;
@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
* so as not to have to set it up again the * so as not to have to set it up again the
* next time somebody inserts a probe. * next time somebody inserts a probe.
*/ */
hlist_del(&kip->hlist); if (!list_is_singular(&kprobe_insn_pages)) {
if (hlist_empty(&kprobe_insn_pages)) { list_del(&kip->list);
INIT_HLIST_NODE(&kip->hlist);
hlist_add_head(&kip->hlist,
&kprobe_insn_pages);
} else {
module_free(NULL, kip->insns); module_free(NULL, kip->insns);
kfree(kip); kfree(kip);
} }
@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
static int __kprobes collect_garbage_slots(void) static int __kprobes collect_garbage_slots(void)
{ {
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip, *next;
struct hlist_node *pos, *next;
/* Ensure no-one is preepmted on the garbages */ /* Ensure no-one is preepmted on the garbages */
if (check_safety()) if (check_safety())
return -EAGAIN; return -EAGAIN;
hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
int i; int i;
if (kip->ngarbage == 0) if (kip->ngarbage == 0)
continue; continue;
@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
{ {
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip;
struct hlist_node *pos;
mutex_lock(&kprobe_insn_mutex); mutex_lock(&kprobe_insn_mutex);
hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { list_for_each_entry(kip, &kprobe_insn_pages, list) {
if (kip->insns <= slot && if (kip->insns <= slot &&
slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
int i = (slot - kip->insns) / MAX_INSN_SIZE; int i = (slot - kip->insns) / MAX_INSN_SIZE;
if (dirty) { if (dirty) {
kip->slot_used[i] = SLOT_DIRTY; kip->slot_used[i] = SLOT_DIRTY;
kip->ngarbage++; kip->ngarbage++;
} else { } else
collect_one_slot(kip, i); collect_one_slot(kip, i);
}
break; break;
} }
} }