mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
tracing/function-return-tracer: change the name into function-graph-tracer
Impact: cleanup This patch changes the name of the "return function tracer" into function-graph-tracer which is a more suitable name for a tracing which makes one able to retrieve the ordered call stack during the code flow. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
509dceef64
commit
fb52607afc
17 changed files with 173 additions and 72 deletions
|
@ -29,7 +29,7 @@ config X86
|
|||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_RET_TRACER if X86_32
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if X86_32
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
|
||||
select HAVE_ARCH_KGDB if !X86_VOYAGER
|
||||
|
|
|
@ -28,7 +28,7 @@ struct dyn_arch_ftrace {
|
|||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -51,6 +51,6 @@ struct ftrace_ret_stack {
|
|||
extern void return_to_handler(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_FUNCTION_RET_TRACER */
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
#endif /* _ASM_X86_FTRACE_H */
|
||||
|
|
|
@ -14,7 +14,7 @@ CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
|
|||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
endif
|
||||
|
||||
ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
# Don't trace __switch_to() but let it for function tracer
|
||||
CFLAGS_REMOVE_process_32.o = -pg
|
||||
endif
|
||||
|
@ -70,7 +70,7 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
|
|||
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
|
||||
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||
obj-$(CONFIG_FUNCTION_RET_TRACER) += ftrace.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
|
||||
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
||||
|
|
|
@ -1188,9 +1188,9 @@ ENTRY(mcount)
|
|||
|
||||
cmpl $ftrace_stub, ftrace_trace_function
|
||||
jnz trace
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
cmpl $ftrace_stub, ftrace_function_return
|
||||
jnz ftrace_return_caller
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
cmpl $ftrace_stub, ftrace_graph_function
|
||||
jnz ftrace_graph_caller
|
||||
#endif
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
|
@ -1215,8 +1215,8 @@ END(mcount)
|
|||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
ENTRY(ftrace_return_caller)
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
cmpl $0, function_trace_stop
|
||||
jne ftrace_stub
|
||||
|
||||
|
@ -1230,7 +1230,7 @@ ENTRY(ftrace_return_caller)
|
|||
popl %ecx
|
||||
popl %eax
|
||||
ret
|
||||
END(ftrace_return_caller)
|
||||
END(ftrace_graph_caller)
|
||||
|
||||
.globl return_to_handler
|
||||
return_to_handler:
|
||||
|
|
|
@ -323,7 +323,7 @@ int __init ftrace_dyn_arch_init(void *data)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
|
@ -389,11 +389,11 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
|
|||
*/
|
||||
unsigned long ftrace_return_to_handler(void)
|
||||
{
|
||||
struct ftrace_retfunc trace;
|
||||
struct ftrace_graph_ret trace;
|
||||
pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
|
||||
&trace.overrun);
|
||||
trace.rettime = cpu_clock(raw_smp_processor_id());
|
||||
ftrace_function_return(&trace);
|
||||
ftrace_graph_function(&trace);
|
||||
|
||||
return trace.ret;
|
||||
}
|
||||
|
@ -440,12 +440,12 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
|||
);
|
||||
|
||||
if (WARN_ON(faulted)) {
|
||||
unregister_ftrace_return();
|
||||
unregister_ftrace_graph();
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!__kernel_text_address(old))) {
|
||||
unregister_ftrace_return();
|
||||
unregister_ftrace_graph();
|
||||
*parent = old;
|
||||
return;
|
||||
}
|
||||
|
@ -456,4 +456,4 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
|||
*parent = old;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FUNCTION_RET_TRACER */
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
|
|
@ -115,8 +115,8 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
|
|||
extern void ftrace_caller(void);
|
||||
extern void ftrace_call(void);
|
||||
extern void mcount_call(void);
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
extern void ftrace_return_caller(void);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
extern void ftrace_graph_caller(void);
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@ -315,7 +315,7 @@ ftrace_init_module(struct module *mod,
|
|||
/*
|
||||
* Structure that defines a return function trace.
|
||||
*/
|
||||
struct ftrace_retfunc {
|
||||
struct ftrace_graph_ret {
|
||||
unsigned long ret; /* Return address */
|
||||
unsigned long func; /* Current function */
|
||||
unsigned long long calltime;
|
||||
|
@ -324,22 +324,22 @@ struct ftrace_retfunc {
|
|||
unsigned long overrun;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define FTRACE_RETFUNC_DEPTH 50
|
||||
#define FTRACE_RETSTACK_ALLOC_SIZE 32
|
||||
/* Type of a callback handler of tracing return function */
|
||||
typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
|
||||
typedef void (*trace_function_graph_t)(struct ftrace_graph_ret *);
|
||||
|
||||
extern int register_ftrace_return(trace_function_return_t func);
|
||||
extern int register_ftrace_graph(trace_function_graph_t func);
|
||||
/* The current handler in use */
|
||||
extern trace_function_return_t ftrace_function_return;
|
||||
extern void unregister_ftrace_return(void);
|
||||
extern trace_function_graph_t ftrace_graph_function;
|
||||
extern void unregister_ftrace_graph(void);
|
||||
|
||||
extern void ftrace_retfunc_init_task(struct task_struct *t);
|
||||
extern void ftrace_retfunc_exit_task(struct task_struct *t);
|
||||
extern void ftrace_graph_init_task(struct task_struct *t);
|
||||
extern void ftrace_graph_exit_task(struct task_struct *t);
|
||||
#else
|
||||
static inline void ftrace_retfunc_init_task(struct task_struct *t) { }
|
||||
static inline void ftrace_retfunc_exit_task(struct task_struct *t) { }
|
||||
static inline void ftrace_graph_init_task(struct task_struct *t) { }
|
||||
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_FTRACE_H */
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define _LINUX_FTRACE_IRQ_H
|
||||
|
||||
|
||||
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_RET_TRACER)
|
||||
#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
|
||||
extern void ftrace_nmi_enter(void);
|
||||
extern void ftrace_nmi_exit(void);
|
||||
#else
|
||||
|
|
|
@ -1365,7 +1365,7 @@ struct task_struct {
|
|||
unsigned long default_timer_slack_ns;
|
||||
|
||||
struct list_head *scm_work_list;
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* Index of current stored adress in ret_stack */
|
||||
int curr_ret_stack;
|
||||
/* Stack of return addresses for return function tracing */
|
||||
|
|
|
@ -21,7 +21,7 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
|
|||
CFLAGS_REMOVE_sched_clock.o = -pg
|
||||
CFLAGS_REMOVE_sched.o = -pg
|
||||
endif
|
||||
ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
|
||||
CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
|
||||
endif
|
||||
|
|
|
@ -140,7 +140,7 @@ void free_task(struct task_struct *tsk)
|
|||
prop_local_destroy_single(&tsk->dirties);
|
||||
free_thread_info(tsk->stack);
|
||||
rt_mutex_debug_task_free(tsk);
|
||||
ftrace_retfunc_exit_task(tsk);
|
||||
ftrace_graph_exit_task(tsk);
|
||||
free_task_struct(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(free_task);
|
||||
|
@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
total_forks++;
|
||||
spin_unlock(¤t->sighand->siglock);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
ftrace_retfunc_init_task(p);
|
||||
ftrace_graph_init_task(p);
|
||||
proc_fork_connector(p);
|
||||
cgroup_post_fork(p);
|
||||
return p;
|
||||
|
|
|
@ -5901,7 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
|||
* The idle tasks have their own, simple scheduling class:
|
||||
*/
|
||||
idle->sched_class = &idle_sched_class;
|
||||
ftrace_retfunc_init_task(idle);
|
||||
ftrace_graph_init_task(idle);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -12,7 +12,7 @@ config NOP_TRACER
|
|||
config HAVE_FUNCTION_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FUNCTION_RET_TRACER
|
||||
config HAVE_FUNCTION_GRAPH_TRACER
|
||||
bool
|
||||
|
||||
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
|
@ -63,15 +63,18 @@ config FUNCTION_TRACER
|
|||
(the bootup default), then the overhead of the instructions is very
|
||||
small and not measurable even in micro-benchmarks.
|
||||
|
||||
config FUNCTION_RET_TRACER
|
||||
bool "Kernel Function return Tracer"
|
||||
depends on HAVE_FUNCTION_RET_TRACER
|
||||
config FUNCTION_GRAPH_TRACER
|
||||
bool "Kernel Function Graph Tracer"
|
||||
depends on HAVE_FUNCTION_GRAPH_TRACER
|
||||
depends on FUNCTION_TRACER
|
||||
help
|
||||
Enable the kernel to trace a function at its return.
|
||||
It's first purpose is to trace the duration of functions.
|
||||
This is done by setting the current return address on the thread
|
||||
info structure of the current task.
|
||||
Enable the kernel to trace a function at both its return
|
||||
and its entry.
|
||||
It's first purpose is to trace the duration of functions and
|
||||
draw a call graph for each thread with some informations like
|
||||
the return value.
|
||||
This is done by setting the current return address on the current
|
||||
task structure into a stack of calls.
|
||||
|
||||
config IRQSOFF_TRACER
|
||||
bool "Interrupts-off Latency Tracer"
|
||||
|
|
|
@ -29,7 +29,7 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
|
|||
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
|
||||
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
||||
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
||||
obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
||||
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
||||
obj-$(CONFIG_BTS_TRACER) += trace_bts.o
|
||||
|
||||
|
|
|
@ -395,11 +395,11 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
|||
unsigned long ip, fl;
|
||||
unsigned long ftrace_addr;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
|
||||
ftrace_addr = (unsigned long)ftrace_caller;
|
||||
else
|
||||
ftrace_addr = (unsigned long)ftrace_return_caller;
|
||||
ftrace_addr = (unsigned long)ftrace_graph_caller;
|
||||
#else
|
||||
ftrace_addr = (unsigned long)ftrace_caller;
|
||||
#endif
|
||||
|
@ -1496,13 +1496,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
|
||||
static atomic_t ftrace_retfunc_active;
|
||||
|
||||
/* The callback that hooks the return of a function */
|
||||
trace_function_return_t ftrace_function_return =
|
||||
(trace_function_return_t)ftrace_stub;
|
||||
trace_function_graph_t ftrace_graph_function =
|
||||
(trace_function_graph_t)ftrace_stub;
|
||||
|
||||
|
||||
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
|
||||
|
@ -1549,7 +1549,7 @@ free:
|
|||
}
|
||||
|
||||
/* Allocate a return stack for each task */
|
||||
static int start_return_tracing(void)
|
||||
static int start_graph_tracing(void)
|
||||
{
|
||||
struct ftrace_ret_stack **ret_stack_list;
|
||||
int ret;
|
||||
|
@ -1569,7 +1569,7 @@ static int start_return_tracing(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int register_ftrace_return(trace_function_return_t func)
|
||||
int register_ftrace_graph(trace_function_graph_t func)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1584,13 +1584,13 @@ int register_ftrace_return(trace_function_return_t func)
|
|||
goto out;
|
||||
}
|
||||
atomic_inc(&ftrace_retfunc_active);
|
||||
ret = start_return_tracing();
|
||||
ret = start_graph_tracing();
|
||||
if (ret) {
|
||||
atomic_dec(&ftrace_retfunc_active);
|
||||
goto out;
|
||||
}
|
||||
ftrace_tracing_type = FTRACE_TYPE_RETURN;
|
||||
ftrace_function_return = func;
|
||||
ftrace_graph_function = func;
|
||||
ftrace_startup();
|
||||
|
||||
out:
|
||||
|
@ -1598,12 +1598,12 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void unregister_ftrace_return(void)
|
||||
void unregister_ftrace_graph(void)
|
||||
{
|
||||
mutex_lock(&ftrace_sysctl_lock);
|
||||
|
||||
atomic_dec(&ftrace_retfunc_active);
|
||||
ftrace_function_return = (trace_function_return_t)ftrace_stub;
|
||||
ftrace_graph_function = (trace_function_graph_t)ftrace_stub;
|
||||
ftrace_shutdown();
|
||||
/* Restore normal tracing type */
|
||||
ftrace_tracing_type = FTRACE_TYPE_ENTER;
|
||||
|
@ -1612,7 +1612,7 @@ void unregister_ftrace_return(void)
|
|||
}
|
||||
|
||||
/* Allocate a return stack for newly created task */
|
||||
void ftrace_retfunc_init_task(struct task_struct *t)
|
||||
void ftrace_graph_init_task(struct task_struct *t)
|
||||
{
|
||||
if (atomic_read(&ftrace_retfunc_active)) {
|
||||
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
||||
|
@ -1626,7 +1626,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
|
|||
t->ret_stack = NULL;
|
||||
}
|
||||
|
||||
void ftrace_retfunc_exit_task(struct task_struct *t)
|
||||
void ftrace_graph_exit_task(struct task_struct *t)
|
||||
{
|
||||
struct ftrace_ret_stack *ret_stack = t->ret_stack;
|
||||
|
||||
|
|
|
@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
|
|||
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
static void __trace_function_return(struct trace_array *tr,
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static void __trace_function_graph(struct trace_array *tr,
|
||||
struct trace_array_cpu *data,
|
||||
struct ftrace_retfunc *trace,
|
||||
struct ftrace_graph_ret *trace,
|
||||
unsigned long flags,
|
||||
int pc)
|
||||
{
|
||||
struct ring_buffer_event *event;
|
||||
struct ftrace_ret_entry *entry;
|
||||
struct ftrace_graph_entry *entry;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
|
@ -1177,8 +1177,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
void trace_function_return(struct ftrace_retfunc *trace)
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
void trace_function_graph(struct ftrace_graph_ret *trace)
|
||||
{
|
||||
struct trace_array *tr = &global_trace;
|
||||
struct trace_array_cpu *data;
|
||||
|
@ -1193,12 +1193,12 @@ void trace_function_return(struct ftrace_retfunc *trace)
|
|||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
pc = preempt_count();
|
||||
__trace_function_return(tr, data, trace, flags, pc);
|
||||
__trace_function_graph(tr, data, trace, flags, pc);
|
||||
}
|
||||
atomic_dec(&data->disabled);
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_RET_TRACER */
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
static struct ftrace_ops trace_ops __read_mostly =
|
||||
{
|
||||
|
@ -2001,7 +2001,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
|
|||
break;
|
||||
}
|
||||
case TRACE_FN_RET: {
|
||||
return print_return_function(iter);
|
||||
return print_graph_function(iter);
|
||||
break;
|
||||
}
|
||||
case TRACE_BRANCH: {
|
||||
|
|
|
@ -57,7 +57,7 @@ struct ftrace_entry {
|
|||
};
|
||||
|
||||
/* Function return entry */
|
||||
struct ftrace_ret_entry {
|
||||
struct ftrace_graph_entry {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
unsigned long parent_ip;
|
||||
|
@ -264,7 +264,7 @@ extern void __ftrace_bad_type(void);
|
|||
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
|
||||
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
|
||||
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
|
||||
IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
|
||||
IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\
|
||||
IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
|
||||
__ftrace_bad_type(); \
|
||||
} while (0)
|
||||
|
@ -398,7 +398,7 @@ void trace_function(struct trace_array *tr,
|
|||
unsigned long parent_ip,
|
||||
unsigned long flags, int pc);
|
||||
void
|
||||
trace_function_return(struct ftrace_retfunc *trace);
|
||||
trace_function_graph(struct ftrace_graph_ret *trace);
|
||||
|
||||
void trace_bts(struct trace_array *tr,
|
||||
unsigned long from,
|
||||
|
@ -489,11 +489,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
|
|||
extern unsigned long trace_flags;
|
||||
|
||||
/* Standard output formatting function used for function return traces */
|
||||
#ifdef CONFIG_FUNCTION_RET_TRACER
|
||||
extern enum print_line_t print_return_function(struct trace_iterator *iter);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
extern enum print_line_t print_graph_function(struct trace_iterator *iter);
|
||||
#else
|
||||
static inline enum print_line_t
|
||||
print_return_function(struct trace_iterator *iter)
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
|
98
kernel/trace/trace_functions_graph.c
Normal file
98
kernel/trace/trace_functions_graph.c
Normal file
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
*
|
||||
* Function graph tracer.
|
||||
* Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
|
||||
* Mostly borrowed from function tracer which
|
||||
* is Copyright (c) Steven Rostedt <srostedt@redhat.com>
|
||||
*
|
||||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
|
||||
#define TRACE_GRAPH_PRINT_OVERRUN 0x1
|
||||
static struct tracer_opt trace_opts[] = {
|
||||
/* Display overruns or not */
|
||||
{ TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
|
||||
{ } /* Empty entry */
|
||||
};
|
||||
|
||||
static struct tracer_flags tracer_flags = {
|
||||
.val = 0, /* Don't display overruns by default */
|
||||
.opts = trace_opts
|
||||
};
|
||||
|
||||
|
||||
static int graph_trace_init(struct trace_array *tr)
|
||||
{
|
||||
int cpu;
|
||||
for_each_online_cpu(cpu)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
return register_ftrace_graph(&trace_function_graph);
|
||||
}
|
||||
|
||||
static void graph_trace_reset(struct trace_array *tr)
|
||||
{
|
||||
unregister_ftrace_graph();
|
||||
}
|
||||
|
||||
|
||||
enum print_line_t
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct ftrace_graph_entry *field;
|
||||
int ret;
|
||||
|
||||
if (entry->type == TRACE_FN_RET) {
|
||||
trace_assign_type(field, entry);
|
||||
ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = seq_print_ip_sym(s, field->ip,
|
||||
trace_flags & TRACE_ITER_SYM_MASK);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " (%llu ns)",
|
||||
field->rettime - field->calltime);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
|
||||
ret = trace_seq_printf(s, " (Overruns: %lu)",
|
||||
field->overrun);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
ret = trace_seq_printf(s, "\n");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
}
|
||||
|
||||
static struct tracer graph_trace __read_mostly = {
|
||||
.name = "function-graph",
|
||||
.init = graph_trace_init,
|
||||
.reset = graph_trace_reset,
|
||||
.print_line = print_graph_function,
|
||||
.flags = &tracer_flags,
|
||||
};
|
||||
|
||||
static __init int init_graph_trace(void)
|
||||
{
|
||||
return register_tracer(&graph_trace);
|
||||
}
|
||||
|
||||
device_initcall(init_graph_trace);
|
Loading…
Reference in a new issue