2009-08-14 10:21:53 +00:00
|
|
|
#include "../perf.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include "thread.h"
|
|
|
|
#include "util.h"
|
2009-08-18 15:04:03 +00:00
|
|
|
#include "debug.h"
|
2009-08-14 10:21:53 +00:00
|
|
|
|
2009-10-13 14:16:29 +00:00
|
|
|
static struct rb_root threads;
|
|
|
|
static struct thread *last_match;
|
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
void thread__init(struct thread *self, pid_t pid)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
self->pid = pid;
|
|
|
|
self->comm = NULL;
|
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i) {
|
|
|
|
self->maps[i] = RB_ROOT;
|
|
|
|
INIT_LIST_HEAD(&self->removed_maps[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-08 19:04:17 +00:00
|
|
|
static struct thread *thread__new(pid_t pid)
|
2009-08-14 10:21:53 +00:00
|
|
|
{
|
2009-11-24 14:05:16 +00:00
|
|
|
struct thread *self = zalloc(sizeof(*self));
|
2009-08-14 10:21:53 +00:00
|
|
|
|
|
|
|
if (self != NULL) {
|
2009-11-27 18:29:20 +00:00
|
|
|
thread__init(self, pid);
|
2009-10-08 19:04:17 +00:00
|
|
|
self->comm = malloc(32);
|
|
|
|
if (self->comm)
|
|
|
|
snprintf(self->comm, 32, ":%d", self->pid);
|
2009-08-14 10:21:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return self;
|
|
|
|
}
|
|
|
|
|
|
|
|
int thread__set_comm(struct thread *self, const char *comm)
|
|
|
|
{
|
|
|
|
if (self->comm)
|
|
|
|
free(self->comm);
|
|
|
|
self->comm = strdup(comm);
|
|
|
|
return self->comm ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
perf tools: Bind callchains to the first sort dimension column
Currently, the callchains are displayed using a constant left
margin. So depending on the current sort dimension
configuration, callchains may appear to be well attached to the
first sort dimension column field which is mostly the case,
except when the first dimension of sorting is done by comm,
because these are right aligned.
This patch binds the callchain to the first letter in the first
column, whatever type of column it is (dso, comm, symbol).
Before:
0.80% perf [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
| | __fsnotify_parent
After:
0.80% perf [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
| | __fsnotify_parent
Also, for clarity, we don't put anymore the callchain as is but:
- If we have a top level ancestor in the callchain, start it
with a first ascii hook.
Before:
0.80% perf [kernel] [k] __lock_acquire
__lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
[..] [..]
After:
0.80% perf [kernel] [k] __lock_acquire
|
--- __lock_acquire
lock_acquire
|
|--58.33%-- _spin_lock
| |
| |--28.57%-- inotify_should_send_event
| | fsnotify
[..] [..]
- Otherwise, if we have several top level ancestors, then
display these like we did before:
1.69% Xorg
|
|--21.21%-- vread_hpet
| 0x7fffd85b46fc
| 0x7fffd85b494d
| 0x7f4fafb4e54d
|
|--15.15%-- exaOffscreenAlloc
|
|--9.09%-- I830WaitLpRing
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Anton Blanchard <anton@samba.org>
LKML-Reference: <1256246604-17156-2-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-10-22 21:23:23 +00:00
|
|
|
int thread__comm_len(struct thread *self)
|
|
|
|
{
|
|
|
|
if (!self->comm_len) {
|
|
|
|
if (!self->comm)
|
|
|
|
return 0;
|
|
|
|
self->comm_len = strlen(self->comm);
|
|
|
|
}
|
|
|
|
|
|
|
|
return self->comm_len;
|
|
|
|
}
|
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
static const char *map_type__name[MAP__NR_TYPES] = {
|
|
|
|
[MAP__FUNCTION] = "Functions",
|
|
|
|
};
|
|
|
|
|
|
|
|
static size_t __thread__fprintf_maps(struct thread *self,
|
|
|
|
enum map_type type, FILE *fp)
|
2009-08-14 10:21:53 +00:00
|
|
|
{
|
2009-11-27 18:29:20 +00:00
|
|
|
size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
|
2009-09-28 17:48:46 +00:00
|
|
|
struct rb_node *nd;
|
2009-08-14 10:21:53 +00:00
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) {
|
|
|
|
struct map *pos = rb_entry(nd, struct map, rb_node);
|
|
|
|
printed += fprintf(fp, "Map:");
|
|
|
|
printed += map__fprintf(pos, fp);
|
|
|
|
if (verbose > 1) {
|
|
|
|
printed += dso__fprintf(pos->dso, type, fp);
|
|
|
|
printed += fprintf(fp, "--\n");
|
|
|
|
}
|
2009-09-28 17:48:46 +00:00
|
|
|
}
|
2009-08-14 10:21:53 +00:00
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t thread__fprintf_maps(struct thread *self, FILE *fp)
|
|
|
|
{
|
|
|
|
size_t printed = 0, i;
|
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
|
|
|
printed += __thread__fprintf_maps(self, i, fp);
|
|
|
|
return printed;
|
|
|
|
}
|
2009-10-02 06:29:58 +00:00
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
static size_t __thread__fprintf_removed_maps(struct thread *self,
|
|
|
|
enum map_type type, FILE *fp)
|
|
|
|
{
|
|
|
|
struct map *pos;
|
|
|
|
size_t printed = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(pos, &self->removed_maps[type], node) {
|
|
|
|
printed += fprintf(fp, "Map:");
|
|
|
|
printed += map__fprintf(pos, fp);
|
|
|
|
if (verbose > 1) {
|
|
|
|
printed += dso__fprintf(pos->dso, type, fp);
|
|
|
|
printed += fprintf(fp, "--\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return printed;
|
|
|
|
}
|
2009-10-02 06:29:58 +00:00
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
static size_t thread__fprintf_removed_maps(struct thread *self, FILE *fp)
|
|
|
|
{
|
|
|
|
size_t printed = 0, i;
|
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
|
|
|
printed += __thread__fprintf_removed_maps(self, i, fp);
|
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t thread__fprintf(struct thread *self, FILE *fp)
|
|
|
|
{
|
|
|
|
size_t printed = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
|
|
|
|
printed += thread__fprintf_removed_maps(self, fp);
|
|
|
|
printed += fprintf(fp, "Removed maps:\n");
|
|
|
|
return printed + thread__fprintf_removed_maps(self, fp);
|
2009-08-14 10:21:53 +00:00
|
|
|
}
|
|
|
|
|
2009-10-13 14:16:29 +00:00
|
|
|
struct thread *threads__findnew(pid_t pid)
|
2009-08-14 10:21:53 +00:00
|
|
|
{
|
2009-10-13 14:16:29 +00:00
|
|
|
struct rb_node **p = &threads.rb_node;
|
2009-08-14 10:21:53 +00:00
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct thread *th;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Font-end cache - PID lookups come in blocks,
|
|
|
|
* so most of the time we dont have to look up
|
|
|
|
* the full rbtree:
|
|
|
|
*/
|
2009-10-13 14:16:29 +00:00
|
|
|
if (last_match && last_match->pid == pid)
|
|
|
|
return last_match;
|
2009-08-14 10:21:53 +00:00
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
th = rb_entry(parent, struct thread, rb_node);
|
|
|
|
|
|
|
|
if (th->pid == pid) {
|
2009-10-13 14:16:29 +00:00
|
|
|
last_match = th;
|
2009-08-14 10:21:53 +00:00
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pid < th->pid)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
2009-10-08 19:04:17 +00:00
|
|
|
th = thread__new(pid);
|
2009-08-14 10:21:53 +00:00
|
|
|
if (th != NULL) {
|
|
|
|
rb_link_node(&th->rb_node, parent, p);
|
2009-10-13 14:16:29 +00:00
|
|
|
rb_insert_color(&th->rb_node, &threads);
|
|
|
|
last_match = th;
|
2009-08-14 10:21:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return th;
|
|
|
|
}
|
|
|
|
|
2009-10-13 14:16:29 +00:00
|
|
|
struct thread *register_idle_thread(void)
|
2009-08-31 04:45:18 +00:00
|
|
|
{
|
2009-10-13 14:16:29 +00:00
|
|
|
struct thread *thread = threads__findnew(0);
|
2009-08-31 04:45:18 +00:00
|
|
|
|
2009-09-16 12:12:36 +00:00
|
|
|
if (!thread || thread__set_comm(thread, "swapper")) {
|
2009-08-31 04:45:18 +00:00
|
|
|
fprintf(stderr, "problem inserting idle task.\n");
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return thread;
|
|
|
|
}
|
|
|
|
|
2009-09-28 17:48:46 +00:00
|
|
|
static void thread__remove_overlappings(struct thread *self, struct map *map)
|
2009-08-14 10:21:53 +00:00
|
|
|
{
|
2009-11-27 18:29:20 +00:00
|
|
|
struct rb_root *root = &self->maps[map->type];
|
|
|
|
struct rb_node *next = rb_first(root);
|
2009-09-28 17:48:46 +00:00
|
|
|
|
|
|
|
while (next) {
|
|
|
|
struct map *pos = rb_entry(next, struct map, rb_node);
|
|
|
|
next = rb_next(&pos->rb_node);
|
|
|
|
|
|
|
|
if (!map__overlap(pos, map))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (verbose >= 2) {
|
2009-10-21 19:34:06 +00:00
|
|
|
fputs("overlapping maps:\n", stderr);
|
|
|
|
map__fprintf(map, stderr);
|
|
|
|
map__fprintf(pos, stderr);
|
2009-09-28 17:48:46 +00:00
|
|
|
}
|
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
rb_erase(&pos->rb_node, root);
|
2009-10-02 06:29:58 +00:00
|
|
|
/*
|
|
|
|
* We may have references to this map, for instance in some
|
|
|
|
* hist_entry instances, so just move them to a separate
|
|
|
|
* list.
|
|
|
|
*/
|
2009-11-27 18:29:20 +00:00
|
|
|
list_add_tail(&pos->node, &self->removed_maps[map->type]);
|
2009-08-14 10:21:53 +00:00
|
|
|
}
|
2009-09-28 17:48:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void maps__insert(struct rb_root *maps, struct map *map)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &maps->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
const u64 ip = map->start;
|
|
|
|
struct map *m;
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
m = rb_entry(parent, struct map, rb_node);
|
|
|
|
if (ip < m->start)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&map->rb_node, parent, p);
|
|
|
|
rb_insert_color(&map->rb_node, maps);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct map *maps__find(struct rb_root *maps, u64 ip)
|
|
|
|
{
|
|
|
|
struct rb_node **p = &maps->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct map *m;
|
|
|
|
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
m = rb_entry(parent, struct map, rb_node);
|
|
|
|
if (ip < m->start)
|
|
|
|
p = &(*p)->rb_left;
|
|
|
|
else if (ip > m->end)
|
|
|
|
p = &(*p)->rb_right;
|
|
|
|
else
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-08-14 10:21:53 +00:00
|
|
|
|
2009-09-28 17:48:46 +00:00
|
|
|
void thread__insert_map(struct thread *self, struct map *map)
|
|
|
|
{
|
|
|
|
thread__remove_overlappings(self, map);
|
2009-11-27 18:29:20 +00:00
|
|
|
maps__insert(&self->maps[map->type], map);
|
2009-08-14 10:21:53 +00:00
|
|
|
}
|
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
static int thread__clone_maps(struct thread *self, struct thread *parent,
|
|
|
|
enum map_type type)
|
2009-08-14 10:21:53 +00:00
|
|
|
{
|
2009-09-28 17:48:46 +00:00
|
|
|
struct rb_node *nd;
|
2009-11-27 18:29:20 +00:00
|
|
|
for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
|
|
|
|
struct map *map = rb_entry(nd, struct map, rb_node);
|
|
|
|
struct map *new = map__clone(map);
|
|
|
|
if (new == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
thread__insert_map(self, new);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int thread__fork(struct thread *self, struct thread *parent)
|
|
|
|
{
|
|
|
|
int i;
|
2009-08-14 10:21:53 +00:00
|
|
|
|
|
|
|
if (self->comm)
|
|
|
|
free(self->comm);
|
|
|
|
self->comm = strdup(parent->comm);
|
|
|
|
if (!self->comm)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2009-11-27 18:29:20 +00:00
|
|
|
for (i = 0; i < MAP__NR_TYPES; ++i)
|
|
|
|
if (thread__clone_maps(self, parent, i) < 0)
|
2009-08-14 10:21:53 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-13 14:16:29 +00:00
|
|
|
size_t threads__fprintf(FILE *fp)
|
2009-08-14 10:21:53 +00:00
|
|
|
{
|
|
|
|
size_t ret = 0;
|
|
|
|
struct rb_node *nd;
|
|
|
|
|
2009-10-13 14:16:29 +00:00
|
|
|
for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
|
2009-08-14 10:21:53 +00:00
|
|
|
struct thread *pos = rb_entry(nd, struct thread, rb_node);
|
|
|
|
|
|
|
|
ret += thread__fprintf(pos, fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
perf tools: Consolidate symbol resolving across all tools
Now we have a very high level routine for simple tools to
process IP sample events:
int event__preprocess_sample(const event_t *self,
struct addr_location *al,
symbol_filter_t filter)
It receives the event itself and will insert new threads in the
global threads list and resolve the map and symbol, filling all
this info into the new addr_location struct, so that tools like
annotate and report can further process the event by creating
hist_entries in their specific way (with or without callgraphs,
etc).
It in turn uses the new next layer function:
void thread__find_addr_location(struct thread *self, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
This one will, given a thread (userspace or the kernel kthread
one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE
too in the near future) at the given cpumode, taking vdsos into
account (userspace hit, but kernel symbol) and will fill all
these details in the addr_location given.
Tools that need a more compact API for plain function
resolution, like 'kmem', can use this other one:
struct symbol *thread__find_function(struct thread *self, u64 addr,
symbol_filter_t filter)
So, to resolve a kernel symbol, that is all the 'kmem' tool
needs, its just a matter of calling:
sym = thread__find_function(kthread, addr, NULL);
The 'filter' parameter is needed because we do lazy
parsing/loading of ELF symtabs or /proc/kallsyms.
With this we remove more code duplication all around, which is
always good, huh? :-)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: John Kacur <jkacur@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-27 18:29:23 +00:00
|
|
|
|
|
|
|
struct symbol *thread__find_symbol(struct thread *self,
|
|
|
|
enum map_type type, u64 addr,
|
|
|
|
symbol_filter_t filter)
|
|
|
|
{
|
|
|
|
struct map *map = thread__find_map(self, type, addr);
|
|
|
|
|
|
|
|
if (map != NULL)
|
|
|
|
return map__find_symbol(map, map->map_ip(map, addr), filter);
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|