2008-05-12 19:20:42 +00:00
|
|
|
#ifndef _LINUX_FTRACE_H
|
|
|
|
#define _LINUX_FTRACE_H
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
2008-05-16 08:41:53 +00:00
|
|
|
#include <linux/fs.h>
|
2008-10-02 19:00:07 +00:00
|
|
|
#include <linux/ktime.h>
|
2008-09-23 10:32:08 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/types.h>
|
2008-12-09 21:14:27 +00:00
|
|
|
#include <linux/module.h>
|
2008-10-02 11:26:05 +00:00
|
|
|
#include <linux/kallsyms.h>
|
2008-12-03 20:36:57 +00:00
|
|
|
#include <linux/bitops.h>
|
2008-12-04 22:51:23 +00:00
|
|
|
#include <linux/sched.h>
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-10-06 23:06:12 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-10-02 15:45:47 +00:00
|
|
|
|
2008-05-12 19:20:43 +00:00
|
|
|
extern int ftrace_enabled;
|
|
|
|
extern int
|
|
|
|
ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|
|
|
struct file *filp, void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
|
|
|
|
|
|
|
|
struct ftrace_ops {
|
|
|
|
ftrace_func_t func;
|
|
|
|
struct ftrace_ops *next;
|
|
|
|
};
|
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
extern int function_trace_stop;
|
|
|
|
|
2008-11-16 05:02:06 +00:00
|
|
|
/*
|
|
|
|
* Type of the current tracing.
|
|
|
|
*/
|
|
|
|
enum ftrace_tracing_type_t {
|
|
|
|
FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
|
|
|
|
FTRACE_TYPE_RETURN, /* Hook the return of the function */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Current tracing type, default is FTRACE_TYPE_ENTER */
|
|
|
|
extern enum ftrace_tracing_type_t ftrace_tracing_type;
|
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
/**
|
|
|
|
* ftrace_stop - stop function tracer.
|
|
|
|
*
|
|
|
|
* A quick way to stop the function tracer. Note this an on off switch,
|
|
|
|
* it is not something that is recursive like preempt_disable.
|
|
|
|
* This does not disable the calling of mcount, it only stops the
|
|
|
|
* calling of functions from mcount.
|
|
|
|
*/
|
|
|
|
static inline void ftrace_stop(void)
|
|
|
|
{
|
|
|
|
function_trace_stop = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ftrace_start - start the function tracer.
|
|
|
|
*
|
|
|
|
* This function is the inverse of ftrace_stop. This does not enable
|
|
|
|
* the function tracing if the function tracer is disabled. This only
|
|
|
|
* sets the function tracer flag to continue calling the functions
|
|
|
|
* from mcount.
|
|
|
|
*/
|
|
|
|
static inline void ftrace_start(void)
|
|
|
|
{
|
|
|
|
function_trace_stop = 0;
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
/*
|
|
|
|
* The ftrace_ops must be a static and should also
|
|
|
|
* be read_mostly. These functions do modify read_mostly variables
|
|
|
|
* so use them sparely. Never free an ftrace_op or modify the
|
|
|
|
* next pointer after it has been registered. Even after unregistering
|
|
|
|
* it, the next pointer may still be used internally.
|
|
|
|
*/
|
|
|
|
int register_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
int unregister_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
void clear_ftrace_function(void);
|
|
|
|
|
|
|
|
extern void ftrace_stub(unsigned long a0, unsigned long a1);
|
|
|
|
|
2008-10-06 23:06:12 +00:00
|
|
|
#else /* !CONFIG_FUNCTION_TRACER */
|
2008-05-12 19:20:42 +00:00
|
|
|
# define register_ftrace_function(ops) do { } while (0)
|
|
|
|
# define unregister_ftrace_function(ops) do { } while (0)
|
|
|
|
# define clear_ftrace_function(ops) do { } while (0)
|
2008-10-23 13:33:02 +00:00
|
|
|
static inline void ftrace_kill(void) { }
|
2008-11-05 21:05:44 +00:00
|
|
|
static inline void ftrace_stop(void) { }
|
|
|
|
static inline void ftrace_start(void) { }
|
2008-10-06 23:06:12 +00:00
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-12-17 04:06:40 +00:00
|
|
|
#ifdef CONFIG_STACK_TRACER
|
|
|
|
extern int stack_tracer_enabled;
|
|
|
|
int
|
|
|
|
stack_trace_sysctl(struct ctl_table *table, int write,
|
|
|
|
struct file *file, void __user *buffer, size_t *lenp,
|
|
|
|
loff_t *ppos);
|
|
|
|
#endif
|
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:20:42 +00:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2008-11-15 00:21:19 +00:00
|
|
|
/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
|
|
|
|
#include <asm/ftrace.h>
|
|
|
|
|
2008-05-12 19:20:43 +00:00
|
|
|
enum {
|
2008-05-12 19:20:48 +00:00
|
|
|
FTRACE_FL_FREE = (1 << 0),
|
|
|
|
FTRACE_FL_FAILED = (1 << 1),
|
|
|
|
FTRACE_FL_FILTER = (1 << 2),
|
|
|
|
FTRACE_FL_ENABLED = (1 << 3),
|
2008-05-22 15:46:33 +00:00
|
|
|
FTRACE_FL_NOTRACE = (1 << 4),
|
2008-06-01 16:17:30 +00:00
|
|
|
FTRACE_FL_CONVERTED = (1 << 5),
|
2008-06-21 18:17:53 +00:00
|
|
|
FTRACE_FL_FROZEN = (1 << 6),
|
2008-05-12 19:20:43 +00:00
|
|
|
};
|
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:20:42 +00:00
|
|
|
struct dyn_ftrace {
|
2008-10-23 13:33:07 +00:00
|
|
|
struct list_head list;
|
|
|
|
unsigned long ip; /* address of mcount call-site */
|
|
|
|
unsigned long flags;
|
2008-11-15 00:21:19 +00:00
|
|
|
struct dyn_arch_ftrace arch;
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:20:42 +00:00
|
|
|
};
|
|
|
|
|
2008-05-12 19:20:44 +00:00
|
|
|
int ftrace_force_update(void);
|
2008-05-12 19:20:45 +00:00
|
|
|
void ftrace_set_filter(unsigned char *buf, int len, int reset);
|
2008-05-12 19:20:44 +00:00
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-12 19:20:42 +00:00
|
|
|
/* defined in arch */
|
2008-05-12 19:20:43 +00:00
|
|
|
extern int ftrace_ip_converted(unsigned long ip);
|
2008-05-12 19:20:43 +00:00
|
|
|
extern int ftrace_dyn_arch_init(void *data);
|
|
|
|
extern int ftrace_update_ftrace_func(ftrace_func_t func);
|
|
|
|
extern void ftrace_caller(void);
|
|
|
|
extern void ftrace_call(void);
|
|
|
|
extern void mcount_call(void);
|
2009-01-09 03:29:42 +00:00
|
|
|
|
|
|
|
#ifndef FTRACE_ADDR
|
|
|
|
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
|
|
|
|
#endif
|
2008-11-25 20:07:04 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
extern void ftrace_graph_caller(void);
|
2008-11-26 05:16:24 +00:00
|
|
|
extern int ftrace_enable_ftrace_graph_caller(void);
|
|
|
|
extern int ftrace_disable_ftrace_graph_caller(void);
|
|
|
|
#else
|
|
|
|
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
|
|
|
|
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
|
2008-11-16 05:02:06 +00:00
|
|
|
#endif
|
ftrace: user update and disable dynamic ftrace daemon
In dynamic ftrace, the mcount function starts off pointing to a stub
function that just returns.
On start up, the call to the stub is modified to point to a "record_ip"
function. The job of the record_ip function is to add the function to
a pre-allocated hash list. If the function is already there, it simply is
ignored, otherwise it is added to the list.
Later, a ftraced daemon wakes up and calls kstop_machine if any functions
have been recorded, and changes the calls to the recorded functions to
a simple nop. If no functions were recorded, the daemon goes back to sleep.
The daemon wakes up once a second to see if it needs to update any newly
recorded functions into nops. Usually it does not, but if a lot of code
has been executed for the first time in the kernel, the ftraced daemon
will call kstop_machine to update those into nops.
The problem currently is that there's no way to stop the daemon from doing
this, and it can cause unneeded latencies (800us which for some is bothersome).
This patch adds a new file /debugfs/tracing/ftraced_enabled. If the daemon
is active, reading this will return "enabled\n" and "disabled\n" when the
daemon is not running. To disable the daemon, the user can echo "0" or
"disable" into this file, and "1" or "enable" to re-enable the daemon.
Since the daemon is used to convert the functions into nops to increase
the performance of the system, I also added that anytime something is
written into the ftraced_enabled file, kstop_machine will run if there
are new functions that have been detected that need to be converted.
This way the user can disable the daemon but still be able to control the
conversion of the mcount calls to nops by simply,
"echo 0 > /debugfs/tracing/ftraced_enabled"
when they need to do more conversions.
To see the number of converted functions:
"cat /debugfs/tracing/dyn_ftrace_total_info"
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-05-28 00:48:37 +00:00
|
|
|
|
2008-11-15 00:21:19 +00:00
|
|
|
/**
|
|
|
|
* ftrace_make_nop - convert code into top
|
|
|
|
* @mod: module structure if called by module load initialization
|
|
|
|
* @rec: the mcount call site record
|
|
|
|
* @addr: the address that the call site should be calling
|
|
|
|
*
|
|
|
|
* This is a very sensitive operation and great care needs
|
|
|
|
* to be taken by the arch. The operation should carefully
|
|
|
|
* read the location, check to see if what is read is indeed
|
|
|
|
* what we expect it to be, and then on success of the compare,
|
|
|
|
* it should write to the location.
|
|
|
|
*
|
|
|
|
* The code segment at @rec->ip should be a caller to @addr
|
|
|
|
*
|
|
|
|
* Return must be:
|
|
|
|
* 0 on success
|
|
|
|
* -EFAULT on error reading the location
|
|
|
|
* -EINVAL on a failed compare of the contents
|
|
|
|
* -EPERM on error writing to the location
|
|
|
|
* Any other value will be considered a failure.
|
|
|
|
*/
|
|
|
|
extern int ftrace_make_nop(struct module *mod,
|
|
|
|
struct dyn_ftrace *rec, unsigned long addr);
|
2008-10-31 04:03:22 +00:00
|
|
|
|
2008-10-23 13:32:59 +00:00
|
|
|
/**
|
2008-11-15 00:21:19 +00:00
|
|
|
* ftrace_make_call - convert a nop call site into a call to addr
|
|
|
|
* @rec: the mcount call site record
|
|
|
|
* @addr: the address that the call site should call
|
2008-10-23 13:32:59 +00:00
|
|
|
*
|
|
|
|
* This is a very sensitive operation and great care needs
|
|
|
|
* to be taken by the arch. The operation should carefully
|
|
|
|
* read the location, check to see if what is read is indeed
|
|
|
|
* what we expect it to be, and then on success of the compare,
|
|
|
|
* it should write to the location.
|
|
|
|
*
|
2008-11-15 00:21:19 +00:00
|
|
|
* The code segment at @rec->ip should be a nop
|
|
|
|
*
|
2008-10-23 13:32:59 +00:00
|
|
|
* Return must be:
|
|
|
|
* 0 on success
|
|
|
|
* -EFAULT on error reading the location
|
|
|
|
* -EINVAL on a failed compare of the contents
|
|
|
|
* -EPERM on error writing to the location
|
|
|
|
* Any other value will be considered a failure.
|
|
|
|
*/
|
2008-11-15 00:21:19 +00:00
|
|
|
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
|
|
|
|
|
|
|
|
|
|
|
|
/* May be defined in arch */
|
|
|
|
extern int ftrace_arch_read_dyn_info(char *buf, int size);
|
2008-10-23 13:32:59 +00:00
|
|
|
|
2008-06-21 18:17:53 +00:00
|
|
|
extern int skip_trace(unsigned long ip);
|
|
|
|
|
2008-09-06 05:06:03 +00:00
|
|
|
extern void ftrace_release(void *start, unsigned long size);
|
|
|
|
|
|
|
|
extern void ftrace_disable_daemon(void);
|
|
|
|
extern void ftrace_enable_daemon(void);
|
2008-05-12 19:20:44 +00:00
|
|
|
#else
|
2008-06-21 18:17:53 +00:00
|
|
|
# define skip_trace(ip) ({ 0; })
|
2008-05-12 19:20:49 +00:00
|
|
|
# define ftrace_force_update() ({ 0; })
|
|
|
|
# define ftrace_set_filter(buf, len, reset) do { } while (0)
|
ftrace: user update and disable dynamic ftrace daemon
In dynamic ftrace, the mcount function starts off pointing to a stub
function that just returns.
On start up, the call to the stub is modified to point to a "record_ip"
function. The job of the record_ip function is to add the function to
a pre-allocated hash list. If the function is already there, it simply is
ignored, otherwise it is added to the list.
Later, a ftraced daemon wakes up and calls kstop_machine if any functions
have been recorded, and changes the calls to the recorded functions to
a simple nop. If no functions were recorded, the daemon goes back to sleep.
The daemon wakes up once a second to see if it needs to update any newly
recorded functions into nops. Usually it does not, but if a lot of code
has been executed for the first time in the kernel, the ftraced daemon
will call kstop_machine to update those into nops.
The problem currently is that there's no way to stop the daemon from doing
this, and it can cause unneeded latencies (800us which for some is bothersome).
This patch adds a new file /debugfs/tracing/ftraced_enabled. If the daemon
is active, reading this will return "enabled\n" and "disabled\n" when the
daemon is not running. To disable the daemon, the user can echo "0" or
"disable" into this file, and "1" or "enable" to re-enable the daemon.
Since the daemon is used to convert the functions into nops to increase
the performance of the system, I also added that anytime something is
written into the ftraced_enabled file, kstop_machine will run if there
are new functions that have been detected that need to be converted.
This way the user can disable the daemon but still be able to control the
conversion of the mcount calls to nops by simply,
"echo 0 > /debugfs/tracing/ftraced_enabled"
when they need to do more conversions.
To see the number of converted functions:
"cat /debugfs/tracing/dyn_ftrace_total_info"
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-05-28 00:48:37 +00:00
|
|
|
# define ftrace_disable_daemon() do { } while (0)
|
|
|
|
# define ftrace_enable_daemon() do { } while (0)
|
2008-09-06 05:06:03 +00:00
|
|
|
static inline void ftrace_release(void *start, unsigned long size) { }
|
2008-06-21 18:17:53 +00:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
2008-05-12 19:20:42 +00:00
|
|
|
|
2008-05-12 19:20:49 +00:00
|
|
|
/* totally disable ftrace - can not re-enable after this */
|
|
|
|
void ftrace_kill(void);
|
|
|
|
|
2008-05-12 19:20:43 +00:00
|
|
|
static inline void tracer_disable(void)
|
|
|
|
{
|
2008-10-06 23:06:12 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-05-12 19:20:43 +00:00
|
|
|
ftrace_enabled = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-08-18 08:24:56 +00:00
|
|
|
/*
|
|
|
|
* Ftrace disable/restore without lock. Some synchronization mechanism
|
2008-08-15 07:40:25 +00:00
|
|
|
* must be used to prevent ftrace_enabled to be changed between
|
2008-08-18 08:24:56 +00:00
|
|
|
* disable/restore.
|
|
|
|
*/
|
2008-08-15 07:40:25 +00:00
|
|
|
static inline int __ftrace_enabled_save(void)
|
|
|
|
{
|
2008-10-06 23:06:12 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-08-15 07:40:25 +00:00
|
|
|
int saved_ftrace_enabled = ftrace_enabled;
|
|
|
|
ftrace_enabled = 0;
|
|
|
|
return saved_ftrace_enabled;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __ftrace_enabled_restore(int enabled)
|
|
|
|
{
|
2008-10-06 23:06:12 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-08-15 07:40:25 +00:00
|
|
|
ftrace_enabled = enabled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
|
|
/* TODO: need to fix this for ARM */
|
|
|
|
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
|
|
|
# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
|
|
|
|
# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
|
|
|
|
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
|
|
|
|
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
|
|
|
|
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
|
2008-05-12 19:20:51 +00:00
|
|
|
# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
|
2008-05-12 19:20:42 +00:00
|
|
|
#else
|
|
|
|
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
|
|
|
# define CALLER_ADDR1 0UL
|
|
|
|
# define CALLER_ADDR2 0UL
|
|
|
|
# define CALLER_ADDR3 0UL
|
|
|
|
# define CALLER_ADDR4 0UL
|
|
|
|
# define CALLER_ADDR5 0UL
|
2008-05-12 19:20:51 +00:00
|
|
|
# define CALLER_ADDR6 0UL
|
2008-05-12 19:20:42 +00:00
|
|
|
#endif
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
2008-02-25 12:38:05 +00:00
|
|
|
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
|
|
|
|
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
|
2008-05-12 19:20:42 +00:00
|
|
|
#else
|
|
|
|
# define time_hardirqs_on(a0, a1) do { } while (0)
|
|
|
|
# define time_hardirqs_off(a0, a1) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
2008-02-25 12:38:05 +00:00
|
|
|
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
|
|
|
|
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
|
2008-05-12 19:20:42 +00:00
|
|
|
#else
|
|
|
|
# define trace_preempt_on(a0, a1) do { } while (0)
|
|
|
|
# define trace_preempt_off(a0, a1) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2008-05-27 23:22:08 +00:00
|
|
|
#ifdef CONFIG_TRACING
|
2008-10-23 23:26:08 +00:00
|
|
|
extern int ftrace_dump_on_oops;
|
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
extern void tracing_start(void);
|
|
|
|
extern void tracing_stop(void);
|
2008-11-21 17:59:38 +00:00
|
|
|
extern void ftrace_off_permanent(void);
|
2008-11-05 21:05:44 +00:00
|
|
|
|
2008-05-12 19:21:15 +00:00
|
|
|
extern void
|
|
|
|
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
|
2008-08-01 20:45:49 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* ftrace_printk - printf formatting in the ftrace buffer
|
|
|
|
* @fmt: the printf format for printing
|
|
|
|
*
|
|
|
|
* Note: __ftrace_printk is an internal function for ftrace_printk and
|
|
|
|
* the @ip is passed in via the ftrace_printk macro.
|
|
|
|
*
|
|
|
|
* This function allows a kernel developer to debug fast path sections
|
|
|
|
* that printk is not appropriate for. By scattering in various
|
|
|
|
* printk like tracing in the code, a developer can quickly see
|
|
|
|
* where problems are occurring.
|
|
|
|
*
|
|
|
|
* This is intended as a debugging tool for the developer only.
|
|
|
|
* Please refrain from leaving ftrace_printks scattered around in
|
|
|
|
* your code.
|
|
|
|
*/
|
|
|
|
# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
|
2008-08-01 16:26:41 +00:00
|
|
|
extern int
|
|
|
|
__ftrace_printk(unsigned long ip, const char *fmt, ...)
|
|
|
|
__attribute__ ((format (printf, 2, 3)));
|
2009-01-23 14:06:23 +00:00
|
|
|
# define ftrace_vprintk(fmt, ap) __ftrace_printk(_THIS_IP_, fmt, ap)
|
|
|
|
extern int
|
|
|
|
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
|
2008-07-31 02:36:46 +00:00
|
|
|
extern void ftrace_dump(void);
|
2008-05-12 19:21:15 +00:00
|
|
|
#else
|
|
|
|
static inline void
|
|
|
|
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
|
2008-08-01 16:26:41 +00:00
|
|
|
static inline int
|
2008-12-20 09:15:14 +00:00
|
|
|
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
|
2008-08-15 15:48:02 +00:00
|
|
|
|
2008-11-05 21:05:44 +00:00
|
|
|
static inline void tracing_start(void) { }
|
|
|
|
static inline void tracing_stop(void) { }
|
2008-11-21 17:59:38 +00:00
|
|
|
static inline void ftrace_off_permanent(void) { }
|
2008-08-15 15:48:02 +00:00
|
|
|
static inline int
|
|
|
|
ftrace_printk(const char *fmt, ...)
|
2008-08-01 16:26:41 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2009-01-23 14:06:23 +00:00
|
|
|
static inline int
|
|
|
|
ftrace_vprintk(const char *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-07-31 02:36:46 +00:00
|
|
|
static inline void ftrace_dump(void) { }
|
2008-05-12 19:21:15 +00:00
|
|
|
#endif
|
|
|
|
|
2008-08-14 19:45:08 +00:00
|
|
|
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
|
|
|
extern void ftrace_init(void);
|
2008-11-15 00:21:19 +00:00
|
|
|
extern void ftrace_init_module(struct module *mod,
|
|
|
|
unsigned long *start, unsigned long *end);
|
2008-08-14 19:45:08 +00:00
|
|
|
#else
|
|
|
|
static inline void ftrace_init(void) { }
|
2008-08-14 19:45:09 +00:00
|
|
|
static inline void
|
2008-11-15 00:21:19 +00:00
|
|
|
ftrace_init_module(struct module *mod,
|
|
|
|
unsigned long *start, unsigned long *end) { }
|
2008-08-14 19:45:08 +00:00
|
|
|
#endif
|
|
|
|
|
2008-11-24 00:49:58 +00:00
|
|
|
enum {
|
|
|
|
POWER_NONE = 0,
|
|
|
|
POWER_CSTATE = 1,
|
|
|
|
POWER_PSTATE = 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct power_trace {
|
|
|
|
#ifdef CONFIG_POWER_TRACER
|
|
|
|
ktime_t stamp;
|
|
|
|
ktime_t end;
|
|
|
|
int type;
|
|
|
|
int state;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_POWER_TRACER
|
|
|
|
extern void trace_power_start(struct power_trace *it, unsigned int type,
|
|
|
|
unsigned int state);
|
|
|
|
extern void trace_power_mark(struct power_trace *it, unsigned int type,
|
|
|
|
unsigned int state);
|
|
|
|
extern void trace_power_end(struct power_trace *it);
|
|
|
|
#else
|
|
|
|
static inline void trace_power_start(struct power_trace *it, unsigned int type,
|
|
|
|
unsigned int state) { }
|
|
|
|
static inline void trace_power_mark(struct power_trace *it, unsigned int type,
|
|
|
|
unsigned int state) { }
|
|
|
|
static inline void trace_power_end(struct power_trace *it) { }
|
|
|
|
#endif
|
|
|
|
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-11-25 23:57:25 +00:00
|
|
|
/*
|
|
|
|
* Structure that defines an entry function trace.
|
|
|
|
*/
|
|
|
|
struct ftrace_graph_ent {
|
|
|
|
unsigned long func; /* Current function */
|
|
|
|
int depth;
|
|
|
|
};
|
2008-08-01 16:26:41 +00:00
|
|
|
|
2008-11-11 06:03:45 +00:00
|
|
|
/*
|
|
|
|
* Structure that defines a return function trace.
|
|
|
|
*/
|
2008-11-25 20:07:04 +00:00
|
|
|
struct ftrace_graph_ret {
|
2008-11-11 06:03:45 +00:00
|
|
|
unsigned long func; /* Current function */
|
|
|
|
unsigned long long calltime;
|
|
|
|
unsigned long long rettime;
|
2008-11-17 02:22:41 +00:00
|
|
|
/* Number of functions that overran the depth limit for current task */
|
|
|
|
unsigned long overrun;
|
2008-11-25 23:57:25 +00:00
|
|
|
int depth;
|
2008-11-11 06:03:45 +00:00
|
|
|
};
|
|
|
|
|
2008-11-25 20:07:04 +00:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2008-12-06 02:40:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sometimes we don't want to trace a function with the function
|
|
|
|
* graph tracer but we want them to keep traced by the usual function
|
|
|
|
* tracer if the function graph tracer is not configured.
|
|
|
|
*/
|
|
|
|
#define __notrace_funcgraph notrace
|
|
|
|
|
2008-12-09 22:54:20 +00:00
|
|
|
/*
|
|
|
|
* We want to which function is an entrypoint of a hardirq.
|
|
|
|
* That will help us to put a signal on output.
|
|
|
|
*/
|
|
|
|
#define __irq_entry __attribute__((__section__(".irqentry.text")))
|
|
|
|
|
|
|
|
/* Limits of hardirq entrypoints */
|
|
|
|
extern char __irqentry_text_start[];
|
|
|
|
extern char __irqentry_text_end[];
|
|
|
|
|
2008-11-23 05:22:56 +00:00
|
|
|
#define FTRACE_RETFUNC_DEPTH 50
|
|
|
|
#define FTRACE_RETSTACK_ALLOC_SIZE 32
|
2008-11-25 23:57:25 +00:00
|
|
|
/* Type of the callback handlers for tracing function graph*/
|
|
|
|
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
|
2008-12-03 04:50:05 +00:00
|
|
|
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
|
2008-11-25 23:57:25 +00:00
|
|
|
|
|
|
|
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
|
trace_func_graph_ent_t entryfunc);
|
|
|
|
|
2008-12-03 04:50:02 +00:00
|
|
|
extern void ftrace_graph_stop(void);
|
|
|
|
|
2008-11-25 23:57:25 +00:00
|
|
|
/* The current handlers in use */
|
|
|
|
extern trace_func_graph_ret_t ftrace_graph_return;
|
|
|
|
extern trace_func_graph_ent_t ftrace_graph_entry;
|
2008-11-11 06:03:45 +00:00
|
|
|
|
2008-11-25 20:07:04 +00:00
|
|
|
extern void unregister_ftrace_graph(void);
|
2008-11-23 05:22:56 +00:00
|
|
|
|
2008-11-25 20:07:04 +00:00
|
|
|
extern void ftrace_graph_init_task(struct task_struct *t);
|
|
|
|
extern void ftrace_graph_exit_task(struct task_struct *t);
|
2008-12-04 22:51:23 +00:00
|
|
|
|
|
|
|
static inline int task_curr_ret_stack(struct task_struct *t)
|
|
|
|
{
|
|
|
|
return t->curr_ret_stack;
|
|
|
|
}
|
2008-12-06 02:43:41 +00:00
|
|
|
|
|
|
|
static inline void pause_graph_tracing(void)
|
|
|
|
{
|
|
|
|
atomic_inc(¤t->tracing_graph_pause);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unpause_graph_tracing(void)
|
|
|
|
{
|
|
|
|
atomic_dec(¤t->tracing_graph_pause);
|
|
|
|
}
|
2008-11-23 08:18:56 +00:00
|
|
|
#else
|
2008-12-06 02:40:00 +00:00
|
|
|
|
|
|
|
#define __notrace_funcgraph
|
2008-12-09 22:54:20 +00:00
|
|
|
#define __irq_entry
|
2008-12-06 02:40:00 +00:00
|
|
|
|
2008-11-25 20:07:04 +00:00
|
|
|
static inline void ftrace_graph_init_task(struct task_struct *t) { }
|
|
|
|
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
|
2008-12-04 22:51:23 +00:00
|
|
|
|
|
|
|
static inline int task_curr_ret_stack(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2008-12-06 02:43:41 +00:00
|
|
|
|
|
|
|
static inline void pause_graph_tracing(void) { }
|
|
|
|
static inline void unpause_graph_tracing(void) { }
|
2008-11-11 06:03:45 +00:00
|
|
|
#endif
|
|
|
|
|
2008-12-03 20:36:57 +00:00
|
|
|
#ifdef CONFIG_TRACING
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
/* flags for current->trace */
|
|
|
|
enum {
|
|
|
|
TSK_TRACE_FL_TRACE_BIT = 0,
|
|
|
|
TSK_TRACE_FL_GRAPH_BIT = 1,
|
|
|
|
};
|
|
|
|
enum {
|
|
|
|
TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
|
|
|
|
TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void set_tsk_trace_trace(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_tsk_trace_trace(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int test_tsk_trace_trace(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
return tsk->trace & TSK_TRACE_FL_TRACE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_tsk_trace_graph(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_tsk_trace_graph(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int test_tsk_trace_graph(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
return tsk->trace & TSK_TRACE_FL_GRAPH;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_TRACING */
|
|
|
|
|
2009-01-19 09:31:01 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_HW_BRANCH_TRACER
|
|
|
|
|
|
|
|
void trace_hw_branch(u64 from, u64 to);
|
|
|
|
void trace_hw_branch_oops(void);
|
|
|
|
|
|
|
|
#else /* CONFIG_HW_BRANCH_TRACER */
|
|
|
|
|
|
|
|
static inline void trace_hw_branch(u64 from, u64 to) {}
|
|
|
|
static inline void trace_hw_branch_oops(void) {}
|
|
|
|
|
|
|
|
#endif /* CONFIG_HW_BRANCH_TRACER */
|
|
|
|
|
2008-05-12 19:20:42 +00:00
|
|
|
#endif /* _LINUX_FTRACE_H */
|