perf sched: Clean up PID sorting logic

Use a sort list for thread atoms insertion as well - instead of
hardcoded for PID.

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2009-09-11 12:12:54 +02:00
parent b1ffe8f3e0
commit b5fae128e4
2 changed files with 51 additions and 45 deletions

View file

@ -144,7 +144,7 @@ struct task_atoms {
u64 total_runtime; u64 total_runtime;
}; };
typedef int (*sort_thread_lat)(struct task_atoms *, struct task_atoms *); typedef int (*sort_fn_t)(struct task_atoms *, struct task_atoms *);
static struct rb_root atom_root, sorted_atom_root; static struct rb_root atom_root, sorted_atom_root;
@ -869,41 +869,22 @@ static struct trace_sched_handler replay_ops = {
.fork_event = replay_fork_event, .fork_event = replay_fork_event,
}; };
static struct task_atoms *
thread_atoms_search(struct rb_root *root, struct thread *thread)
{
struct rb_node *node = root->rb_node;
while (node) {
struct task_atoms *atoms;
atoms = container_of(node, struct task_atoms, node);
if (thread->pid > atoms->thread->pid)
node = node->rb_left;
else if (thread->pid < atoms->thread->pid)
node = node->rb_right;
else {
return atoms;
}
}
return NULL;
}
struct sort_dimension { struct sort_dimension {
const char *name; const char *name;
sort_thread_lat cmp; sort_fn_t cmp;
struct list_head list; struct list_head list;
}; };
static LIST_HEAD(cmp_pid); static LIST_HEAD(cmp_pid);
static int static int
thread_lat_cmp(struct list_head *list, struct task_atoms *l, thread_lat_cmp(struct list_head *list, struct task_atoms *l, struct task_atoms *r)
struct task_atoms *r)
{ {
struct sort_dimension *sort; struct sort_dimension *sort;
int ret = 0; int ret = 0;
BUG_ON(list_empty(list));
list_for_each_entry(sort, list, list) { list_for_each_entry(sort, list, list) {
ret = sort->cmp(l, r); ret = sort->cmp(l, r);
if (ret) if (ret)
@ -913,6 +894,32 @@ thread_lat_cmp(struct list_head *list, struct task_atoms *l,
return ret; return ret;
} }
static struct task_atoms *
thread_atoms_search(struct rb_root *root, struct thread *thread,
struct list_head *sort_list)
{
struct rb_node *node = root->rb_node;
struct task_atoms key = { .thread = thread };
while (node) {
struct task_atoms *atoms;
int cmp;
atoms = container_of(node, struct task_atoms, node);
cmp = thread_lat_cmp(sort_list, &key, atoms);
if (cmp > 0)
node = node->rb_left;
else if (cmp < 0)
node = node->rb_right;
else {
BUG_ON(thread != atoms->thread);
return atoms;
}
}
return NULL;
}
static void static void
__thread_latency_insert(struct rb_root *root, struct task_atoms *data, __thread_latency_insert(struct rb_root *root, struct task_atoms *data,
struct list_head *sort_list) struct list_head *sort_list)
@ -1049,18 +1056,18 @@ latency_switch_event(struct trace_switch_event *switch_event,
sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
in_atoms = thread_atoms_search(&atom_root, sched_in); in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
if (!in_atoms) { if (!in_atoms) {
thread_atoms_insert(sched_in); thread_atoms_insert(sched_in);
in_atoms = thread_atoms_search(&atom_root, sched_in); in_atoms = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
if (!in_atoms) if (!in_atoms)
die("in-atom: Internal tree error"); die("in-atom: Internal tree error");
} }
out_atoms = thread_atoms_search(&atom_root, sched_out); out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_atoms) { if (!out_atoms) {
thread_atoms_insert(sched_out); thread_atoms_insert(sched_out);
out_atoms = thread_atoms_search(&atom_root, sched_out); out_atoms = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
if (!out_atoms) if (!out_atoms)
die("out-atom: Internal tree error"); die("out-atom: Internal tree error");
} }
@ -1085,7 +1092,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
return; return;
wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
atoms = thread_atoms_search(&atom_root, wakee); atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
if (!atoms) { if (!atoms) {
thread_atoms_insert(wakee); thread_atoms_insert(wakee);
return; return;
@ -1136,7 +1143,6 @@ static void output_lat_thread(struct task_atoms *atom_list)
static int pid_cmp(struct task_atoms *l, struct task_atoms *r) static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
{ {
if (l->thread->pid < r->thread->pid) if (l->thread->pid < r->thread->pid)
return -1; return -1;
if (l->thread->pid > r->thread->pid) if (l->thread->pid > r->thread->pid)
@ -1146,8 +1152,8 @@ static int pid_cmp(struct task_atoms *l, struct task_atoms *r)
} }
static struct sort_dimension pid_sort_dimension = { static struct sort_dimension pid_sort_dimension = {
.name = "pid", .name = "pid",
.cmp = pid_cmp, .cmp = pid_cmp,
}; };
static int avg_cmp(struct task_atoms *l, struct task_atoms *r) static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
@ -1172,8 +1178,8 @@ static int avg_cmp(struct task_atoms *l, struct task_atoms *r)
} }
static struct sort_dimension avg_sort_dimension = { static struct sort_dimension avg_sort_dimension = {
.name = "avg", .name = "avg",
.cmp = avg_cmp, .cmp = avg_cmp,
}; };
static int max_cmp(struct task_atoms *l, struct task_atoms *r) static int max_cmp(struct task_atoms *l, struct task_atoms *r)
@ -1187,8 +1193,8 @@ static int max_cmp(struct task_atoms *l, struct task_atoms *r)
} }
static struct sort_dimension max_sort_dimension = { static struct sort_dimension max_sort_dimension = {
.name = "max", .name = "max",
.cmp = max_cmp, .cmp = max_cmp,
}; };
static int switch_cmp(struct task_atoms *l, struct task_atoms *r) static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
@ -1202,8 +1208,8 @@ static int switch_cmp(struct task_atoms *l, struct task_atoms *r)
} }
static struct sort_dimension switch_sort_dimension = { static struct sort_dimension switch_sort_dimension = {
.name = "switch", .name = "switch",
.cmp = switch_cmp, .cmp = switch_cmp,
}; };
static int runtime_cmp(struct task_atoms *l, struct task_atoms *r) static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
@ -1217,8 +1223,8 @@ static int runtime_cmp(struct task_atoms *l, struct task_atoms *r)
} }
static struct sort_dimension runtime_sort_dimension = { static struct sort_dimension runtime_sort_dimension = {
.name = "runtime", .name = "runtime",
.cmp = runtime_cmp, .cmp = runtime_cmp,
}; };
static struct sort_dimension *available_sorts[] = { static struct sort_dimension *available_sorts[] = {
@ -1666,8 +1672,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used)
argc = parse_options(argc, argv, latency_options, latency_usage, 0); argc = parse_options(argc, argv, latency_options, latency_usage, 0);
if (argc) if (argc)
usage_with_options(latency_usage, latency_options); usage_with_options(latency_usage, latency_options);
setup_sorting();
} }
setup_sorting();
__cmd_lat(); __cmd_lat();
} else if (!strncmp(argv[0], "rep", 3)) { } else if (!strncmp(argv[0], "rep", 3)) {
trace_handler = &replay_ops; trace_handler = &replay_ops;

View file

@ -4,10 +4,10 @@
#include "symbol.h" #include "symbol.h"
struct thread { struct thread {
struct rb_node rb_node; struct rb_node rb_node;
struct list_head maps; struct list_head maps;
pid_t pid; pid_t pid;
char *comm; char *comm;
}; };
int thread__set_comm(struct thread *self, const char *comm); int thread__set_comm(struct thread *self, const char *comm);