mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
38697053fa
With the new ring buffer infrastructure in ftrace, I'm trying to make ftrace a little more light weight. This patch converts a lot of the local_irq_save/restore into preempt_disable/enable. The original preempt count in a lot of cases has to be sent in as a parameter so that it can be recorded correctly. Some places were recording it incorrectly before anyway. This is also laying the ground work to make ftrace a little bit more reentrant, and remove all locking. The function tracers must still protect from reentrancy. Note: All the function tracers must be careful when using preempt_disable. It must do the following: resched = need_resched(); preempt_disable_notrace(); [...] if (resched) preempt_enable_no_resched_notrace(); else preempt_enable_notrace(); The reason is that if this function traces schedule() itself, the preempt_enable_notrace() will cause a schedule, which will lead us into a recursive failure. If we needed to reschedule before calling preempt_disable, we should have already scheduled. Since we did not, this is most likely that we should not and are probably inside a schedule function. If resched was not set, we still need to catch the need resched flag being set when preemption was off and the if case at the end will catch that for us. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
107 lines
2.2 KiB
C
107 lines
2.2 KiB
C
/*
|
|
* ring buffer based initcalls tracer
|
|
*
|
|
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
|
*
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
#include "trace.h"
|
|
|
|
static struct trace_array *boot_trace;
|
|
static int trace_boot_enabled;
|
|
|
|
|
|
/* Should be started after do_pre_smp_initcalls() in init/main.c */
|
|
void start_boot_trace(void)
|
|
{
|
|
trace_boot_enabled = 1;
|
|
}
|
|
|
|
void stop_boot_trace(struct trace_array *tr)
|
|
{
|
|
trace_boot_enabled = 0;
|
|
}
|
|
|
|
static void boot_trace_init(struct trace_array *tr)
|
|
{
|
|
int cpu;
|
|
boot_trace = tr;
|
|
|
|
trace_boot_enabled = 0;
|
|
|
|
for_each_cpu_mask(cpu, cpu_possible_map)
|
|
tracing_reset(tr, cpu);
|
|
}
|
|
|
|
static void boot_trace_ctrl_update(struct trace_array *tr)
|
|
{
|
|
if (tr->ctrl)
|
|
start_boot_trace();
|
|
else
|
|
stop_boot_trace(tr);
|
|
}
|
|
|
|
static enum print_line_t initcall_print_line(struct trace_iterator *iter)
|
|
{
|
|
int ret;
|
|
struct trace_entry *entry = iter->ent;
|
|
struct trace_boot *field = (struct trace_boot *)entry;
|
|
struct boot_trace *it = &field->initcall;
|
|
struct trace_seq *s = &iter->seq;
|
|
|
|
if (entry->type == TRACE_BOOT) {
|
|
ret = trace_seq_printf(s, "%pF called from %i "
|
|
"returned %d after %lld msecs\n",
|
|
it->func, it->caller, it->result,
|
|
it->duration);
|
|
if (ret)
|
|
return TRACE_TYPE_HANDLED;
|
|
else
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
}
|
|
return TRACE_TYPE_UNHANDLED;
|
|
}
|
|
|
|
struct tracer boot_tracer __read_mostly =
|
|
{
|
|
.name = "initcall",
|
|
.init = boot_trace_init,
|
|
.reset = stop_boot_trace,
|
|
.ctrl_update = boot_trace_ctrl_update,
|
|
.print_line = initcall_print_line,
|
|
};
|
|
|
|
|
|
void trace_boot(struct boot_trace *it)
|
|
{
|
|
struct ring_buffer_event *event;
|
|
struct trace_boot *entry;
|
|
struct trace_array_cpu *data;
|
|
unsigned long irq_flags;
|
|
struct trace_array *tr = boot_trace;
|
|
|
|
if (!trace_boot_enabled)
|
|
return;
|
|
|
|
preempt_disable();
|
|
data = tr->data[smp_processor_id()];
|
|
|
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
|
&irq_flags);
|
|
if (!event)
|
|
goto out;
|
|
entry = ring_buffer_event_data(event);
|
|
tracing_generic_entry_update(&entry->ent, 0, 0);
|
|
entry->ent.type = TRACE_BOOT;
|
|
entry->initcall = *it;
|
|
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
|
|
|
trace_wake_up();
|
|
|
|
out:
|
|
preempt_enable();
|
|
}
|