tracing: Fix function graph trace_pipe to properly display failed entries

There is a case where the graph tracer might get confused and omits
displaying of a single record.  This applies mostly with the trace_pipe
since it is unlikely that the trace_seq buffer will overflow with the
trace file.

As the function_graph tracer goes through the trace entries keeping a
pointer to the current record:

current ->  func1 ENTRY
            func2 ENTRY
            func2 RETURN
            func1 RETURN

When an function ENTRY is encountered, it moves the pointer to the
next entry to check if the function is a nested or leaf function.

            func1 ENTRY
current ->  func2 ENTRY
            func2 RETURN
            func1 RETURN

If the rest of the writing of the function fills the trace_seq buffer,
then the trace_pipe read will ignore this entry. The next read will
Now start at the current location, but the first entry (func1) will
be discarded.

This patch keeps a copy of the current entry in the iterator private
storage and will keep track of when the trace_seq buffer fills. When
the trace_seq buffer fills, it will reuse the copy of the entry in the
next iteration.

[
  This patch has been largely modified by Steven Rostedt in order to
  clean it up and simplify it. The original idea and concept was from
  Jirka and for that, this patch will go under his name to give him
  the credit he deserves. But because this was modify by Steven Rostedt
  anything wrong with the patch should be blamed on Steven.
]

Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <1259067458-27143-1-git-send-email-jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
Jiri Olsa 2009-11-24 13:57:38 +01:00 committed by Steven Rostedt
parent d184b31c0e
commit be1eca3931

View file

@ -14,9 +14,20 @@
#include "trace.h" #include "trace.h"
#include "trace_output.h" #include "trace_output.h"
struct fgraph_data { struct fgraph_cpu_data {
pid_t last_pid; pid_t last_pid;
int depth; int depth;
int ignore;
};
struct fgraph_data {
struct fgraph_cpu_data *cpu_data;
/* Place to preserve last processed entry. */
struct ftrace_graph_ent_entry ent;
struct ftrace_graph_ret_entry ret;
int failed;
int cpu;
}; };
#define TRACE_GRAPH_INDENT 2 #define TRACE_GRAPH_INDENT 2
@ -384,7 +395,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
if (!data) if (!data)
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
last_pid = &(per_cpu_ptr(data, cpu)->last_pid); last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
if (*last_pid == pid) if (*last_pid == pid)
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
@ -435,27 +446,50 @@ static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter, get_return_for_leaf(struct trace_iterator *iter,
struct ftrace_graph_ent_entry *curr) struct ftrace_graph_ent_entry *curr)
{ {
struct ring_buffer_iter *ring_iter; struct fgraph_data *data = iter->private;
struct ring_buffer_iter *ring_iter = NULL;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ftrace_graph_ret_entry *next; struct ftrace_graph_ret_entry *next;
ring_iter = iter->buffer_iter[iter->cpu]; /*
* If the previous output failed to write to the seq buffer,
* then we just reuse the data from before.
*/
if (data && data->failed) {
curr = &data->ent;
next = &data->ret;
} else {
/* First peek to compare current entry and the next one */ ring_iter = iter->buffer_iter[iter->cpu];
if (ring_iter)
event = ring_buffer_iter_peek(ring_iter, NULL); /* First peek to compare current entry and the next one */
else { if (ring_iter)
/* We need to consume the current entry to see the next one */ event = ring_buffer_iter_peek(ring_iter, NULL);
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); else {
event = ring_buffer_peek(iter->tr->buffer, iter->cpu, /*
NULL); * We need to consume the current entry to see
* the next one.
*/
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
NULL);
}
if (!event)
return NULL;
next = ring_buffer_event_data(event);
if (data) {
/*
* Save current and next entries for later reference
* if the output fails.
*/
data->ent = *curr;
data->ret = *next;
}
} }
if (!event)
return NULL;
next = ring_buffer_event_data(event);
if (next->ent.type != TRACE_GRAPH_RET) if (next->ent.type != TRACE_GRAPH_RET)
return NULL; return NULL;
@ -640,7 +674,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
if (data) { if (data) {
int cpu = iter->cpu; int cpu = iter->cpu;
int *depth = &(per_cpu_ptr(data, cpu)->depth); int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
/* /*
* Comments display at + 1 to depth. Since * Comments display at + 1 to depth. Since
@ -688,7 +722,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
if (data) { if (data) {
int cpu = iter->cpu; int cpu = iter->cpu;
int *depth = &(per_cpu_ptr(data, cpu)->depth); int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
*depth = call->depth; *depth = call->depth;
} }
@ -782,19 +816,34 @@ static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
struct trace_iterator *iter) struct trace_iterator *iter)
{ {
int cpu = iter->cpu; struct fgraph_data *data = iter->private;
struct ftrace_graph_ent *call = &field->graph_ent; struct ftrace_graph_ent *call = &field->graph_ent;
struct ftrace_graph_ret_entry *leaf_ret; struct ftrace_graph_ret_entry *leaf_ret;
static enum print_line_t ret;
int cpu = iter->cpu;
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
leaf_ret = get_return_for_leaf(iter, field); leaf_ret = get_return_for_leaf(iter, field);
if (leaf_ret) if (leaf_ret)
return print_graph_entry_leaf(iter, field, leaf_ret, s); ret = print_graph_entry_leaf(iter, field, leaf_ret, s);
else else
return print_graph_entry_nested(iter, field, s, cpu); ret = print_graph_entry_nested(iter, field, s, cpu);
if (data) {
/*
* If we failed to write our output, then we need to make
* note of it. Because we already consumed our entry.
*/
if (s->full) {
data->failed = 1;
data->cpu = cpu;
} else
data->failed = 0;
}
return ret;
} }
static enum print_line_t static enum print_line_t
@ -810,7 +859,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (data) { if (data) {
int cpu = iter->cpu; int cpu = iter->cpu;
int *depth = &(per_cpu_ptr(data, cpu)->depth); int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
/* /*
* Comments display at + 1 to depth. This is the * Comments display at + 1 to depth. This is the
@ -873,7 +922,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
int i; int i;
if (data) if (data)
depth = per_cpu_ptr(data, iter->cpu)->depth; depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
if (print_graph_prologue(iter, s, 0, 0)) if (print_graph_prologue(iter, s, 0, 0))
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
@ -941,8 +990,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
enum print_line_t enum print_line_t
print_graph_function(struct trace_iterator *iter) print_graph_function(struct trace_iterator *iter)
{ {
struct ftrace_graph_ent_entry *field;
struct fgraph_data *data = iter->private;
struct trace_entry *entry = iter->ent; struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
int cpu = iter->cpu;
int ret;
if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
return TRACE_TYPE_HANDLED;
}
/*
* If the last output failed, there's a possibility we need
* to print out the missing entry which would never go out.
*/
if (data && data->failed) {
field = &data->ent;
iter->cpu = data->cpu;
ret = print_graph_entry(field, s, iter);
if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
ret = TRACE_TYPE_NO_CONSUME;
}
iter->cpu = cpu;
return ret;
}
switch (entry->type) { switch (entry->type) {
case TRACE_GRAPH_ENT: { case TRACE_GRAPH_ENT: {
@ -952,7 +1026,7 @@ print_graph_function(struct trace_iterator *iter)
* sizeof(struct ftrace_graph_ent_entry) is very small, * sizeof(struct ftrace_graph_ent_entry) is very small,
* it can be safely saved at the stack. * it can be safely saved at the stack.
*/ */
struct ftrace_graph_ent_entry *field, saved; struct ftrace_graph_ent_entry saved;
trace_assign_type(field, entry); trace_assign_type(field, entry);
saved = *field; saved = *field;
return print_graph_entry(&saved, s, iter); return print_graph_entry(&saved, s, iter);
@ -1030,31 +1104,54 @@ static void print_graph_headers(struct seq_file *s)
static void graph_trace_open(struct trace_iterator *iter) static void graph_trace_open(struct trace_iterator *iter)
{ {
/* pid and depth on the last trace processed */ /* pid and depth on the last trace processed */
struct fgraph_data *data = alloc_percpu(struct fgraph_data); struct fgraph_data *data;
int cpu; int cpu;
iter->private = NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
pr_warning("function graph tracer: not enough memory\n"); goto out_err;
else
for_each_possible_cpu(cpu) { data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); if (!data->cpu_data)
int *depth = &(per_cpu_ptr(data, cpu)->depth); goto out_err_free;
*pid = -1;
*depth = 0; for_each_possible_cpu(cpu) {
} pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
*pid = -1;
*depth = 0;
*ignore = 0;
}
iter->private = data; iter->private = data;
return;
out_err_free:
kfree(data);
out_err:
pr_warning("function graph tracer: not enough memory\n");
} }
static void graph_trace_close(struct trace_iterator *iter) static void graph_trace_close(struct trace_iterator *iter)
{ {
free_percpu(iter->private); struct fgraph_data *data = iter->private;
if (data) {
free_percpu(data->cpu_data);
kfree(data);
}
} }
static struct tracer graph_trace __read_mostly = { static struct tracer graph_trace __read_mostly = {
.name = "function_graph", .name = "function_graph",
.open = graph_trace_open, .open = graph_trace_open,
.pipe_open = graph_trace_open,
.close = graph_trace_close, .close = graph_trace_close,
.pipe_close = graph_trace_close,
.wait_pipe = poll_wait_pipe, .wait_pipe = poll_wait_pipe,
.init = graph_trace_init, .init = graph_trace_init,
.reset = graph_trace_reset, .reset = graph_trace_reset,