mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
47788c58e6
Impact: fix build warnings and possibe compat misbehavior on IA64 Building a kernel on ia64 might trigger these ugly build warnings: CC arch/ia64/ia32/sys_ia32.o In file included from arch/ia64/ia32/sys_ia32.c:55: arch/ia64/ia32/ia32priv.h:290:1: warning: "elf_check_arch" redefined In file included from include/linux/elf.h:7, from include/linux/module.h:14, from include/linux/ftrace.h:8, from include/linux/syscalls.h:68, from arch/ia64/ia32/sys_ia32.c:18: arch/ia64/include/asm/elf.h:19:1: warning: this is the location of the previous definition [...] sys_ia32.c includes linux/syscalls.h which in turn includes linux/ftrace.h to import the syscalls tracing prototypes. But including ftrace.h can pull too much things for a low level file, especially on ia64 where the ia32 private headers conflict with higher level headers. Now we isolate the syscall tracing headers in their own lightweight file. Reported-by: Tony Luck <tony.luck@intel.com> Tested-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Jason Baron <jbaron@redhat.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Jiaying Zhang <jiayingz@google.com> Cc: Michael Rubin <mrubin@google.com> Cc: Martin Bligh <mbligh@google.com> Cc: Michael Davidson <md@google.com> LKML-Reference: <20090408184058.GB6017@nowhere> Signed-off-by: Ingo Molnar <mingo@elte.hu>
250 lines
5.3 KiB
C
250 lines
5.3 KiB
C
#include <trace/syscall.h>
|
|
#include <linux/kernel.h>
|
|
#include <asm/syscall.h>
|
|
|
|
#include "trace_output.h"
|
|
#include "trace.h"
|
|
|
|
/* Keep a counter of the syscall tracing users */
|
|
static int refcount;
|
|
|
|
/* Prevent from races on thread flags toggling */
|
|
static DEFINE_MUTEX(syscall_trace_lock);
|
|
|
|
/* Option to display the parameters types */
|
|
enum {
|
|
TRACE_SYSCALLS_OPT_TYPES = 0x1,
|
|
};
|
|
|
|
static struct tracer_opt syscalls_opts[] = {
|
|
{ TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
|
|
{ }
|
|
};
|
|
|
|
static struct tracer_flags syscalls_flags = {
|
|
.val = 0, /* By default: no parameters types */
|
|
.opts = syscalls_opts
|
|
};
|
|
|
|
enum print_line_t
|
|
print_syscall_enter(struct trace_iterator *iter, int flags)
|
|
{
|
|
struct trace_seq *s = &iter->seq;
|
|
struct trace_entry *ent = iter->ent;
|
|
struct syscall_trace_enter *trace;
|
|
struct syscall_metadata *entry;
|
|
int i, ret, syscall;
|
|
|
|
trace_assign_type(trace, ent);
|
|
|
|
syscall = trace->nr;
|
|
|
|
entry = syscall_nr_to_meta(syscall);
|
|
if (!entry)
|
|
goto end;
|
|
|
|
ret = trace_seq_printf(s, "%s(", entry->name);
|
|
if (!ret)
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
for (i = 0; i < entry->nb_args; i++) {
|
|
/* parameter types */
|
|
if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
|
|
ret = trace_seq_printf(s, "%s ", entry->types[i]);
|
|
if (!ret)
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
}
|
|
/* parameter values */
|
|
ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
|
|
trace->args[i],
|
|
i == entry->nb_args - 1 ? ")" : ",");
|
|
if (!ret)
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
}
|
|
|
|
end:
|
|
trace_seq_printf(s, "\n");
|
|
return TRACE_TYPE_HANDLED;
|
|
}
|
|
|
|
enum print_line_t
|
|
print_syscall_exit(struct trace_iterator *iter, int flags)
|
|
{
|
|
struct trace_seq *s = &iter->seq;
|
|
struct trace_entry *ent = iter->ent;
|
|
struct syscall_trace_exit *trace;
|
|
int syscall;
|
|
struct syscall_metadata *entry;
|
|
int ret;
|
|
|
|
trace_assign_type(trace, ent);
|
|
|
|
syscall = trace->nr;
|
|
|
|
entry = syscall_nr_to_meta(syscall);
|
|
if (!entry) {
|
|
trace_seq_printf(s, "\n");
|
|
return TRACE_TYPE_HANDLED;
|
|
}
|
|
|
|
ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
|
|
trace->ret);
|
|
if (!ret)
|
|
return TRACE_TYPE_PARTIAL_LINE;
|
|
|
|
return TRACE_TYPE_HANDLED;
|
|
}
|
|
|
|
void start_ftrace_syscalls(void)
|
|
{
|
|
unsigned long flags;
|
|
struct task_struct *g, *t;
|
|
|
|
mutex_lock(&syscall_trace_lock);
|
|
|
|
/* Don't enable the flag on the tasks twice */
|
|
if (++refcount != 1)
|
|
goto unlock;
|
|
|
|
arch_init_ftrace_syscalls();
|
|
read_lock_irqsave(&tasklist_lock, flags);
|
|
|
|
do_each_thread(g, t) {
|
|
set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
|
|
} while_each_thread(g, t);
|
|
|
|
read_unlock_irqrestore(&tasklist_lock, flags);
|
|
|
|
unlock:
|
|
mutex_unlock(&syscall_trace_lock);
|
|
}
|
|
|
|
void stop_ftrace_syscalls(void)
|
|
{
|
|
unsigned long flags;
|
|
struct task_struct *g, *t;
|
|
|
|
mutex_lock(&syscall_trace_lock);
|
|
|
|
/* There are perhaps still some users */
|
|
if (--refcount)
|
|
goto unlock;
|
|
|
|
read_lock_irqsave(&tasklist_lock, flags);
|
|
|
|
do_each_thread(g, t) {
|
|
clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
|
|
} while_each_thread(g, t);
|
|
|
|
read_unlock_irqrestore(&tasklist_lock, flags);
|
|
|
|
unlock:
|
|
mutex_unlock(&syscall_trace_lock);
|
|
}
|
|
|
|
void ftrace_syscall_enter(struct pt_regs *regs)
|
|
{
|
|
struct syscall_trace_enter *entry;
|
|
struct syscall_metadata *sys_data;
|
|
struct ring_buffer_event *event;
|
|
int size;
|
|
int syscall_nr;
|
|
|
|
syscall_nr = syscall_get_nr(current, regs);
|
|
|
|
sys_data = syscall_nr_to_meta(syscall_nr);
|
|
if (!sys_data)
|
|
return;
|
|
|
|
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
|
|
|
|
event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
|
|
0, 0);
|
|
if (!event)
|
|
return;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
entry->nr = syscall_nr;
|
|
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
|
|
|
|
trace_current_buffer_unlock_commit(event, 0, 0);
|
|
trace_wake_up();
|
|
}
|
|
|
|
void ftrace_syscall_exit(struct pt_regs *regs)
|
|
{
|
|
struct syscall_trace_exit *entry;
|
|
struct syscall_metadata *sys_data;
|
|
struct ring_buffer_event *event;
|
|
int syscall_nr;
|
|
|
|
syscall_nr = syscall_get_nr(current, regs);
|
|
|
|
sys_data = syscall_nr_to_meta(syscall_nr);
|
|
if (!sys_data)
|
|
return;
|
|
|
|
event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
|
|
sizeof(*entry), 0, 0);
|
|
if (!event)
|
|
return;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
entry->nr = syscall_nr;
|
|
entry->ret = syscall_get_return_value(current, regs);
|
|
|
|
trace_current_buffer_unlock_commit(event, 0, 0);
|
|
trace_wake_up();
|
|
}
|
|
|
|
static int init_syscall_tracer(struct trace_array *tr)
|
|
{
|
|
start_ftrace_syscalls();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void reset_syscall_tracer(struct trace_array *tr)
|
|
{
|
|
stop_ftrace_syscalls();
|
|
tracing_reset_online_cpus(tr);
|
|
}
|
|
|
|
static struct trace_event syscall_enter_event = {
|
|
.type = TRACE_SYSCALL_ENTER,
|
|
.trace = print_syscall_enter,
|
|
};
|
|
|
|
static struct trace_event syscall_exit_event = {
|
|
.type = TRACE_SYSCALL_EXIT,
|
|
.trace = print_syscall_exit,
|
|
};
|
|
|
|
static struct tracer syscall_tracer __read_mostly = {
|
|
.name = "syscall",
|
|
.init = init_syscall_tracer,
|
|
.reset = reset_syscall_tracer,
|
|
.flags = &syscalls_flags,
|
|
};
|
|
|
|
__init int register_ftrace_syscalls(void)
|
|
{
|
|
int ret;
|
|
|
|
ret = register_ftrace_event(&syscall_enter_event);
|
|
if (!ret) {
|
|
printk(KERN_WARNING "event %d failed to register\n",
|
|
syscall_enter_event.type);
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
ret = register_ftrace_event(&syscall_exit_event);
|
|
if (!ret) {
|
|
printk(KERN_WARNING "event %d failed to register\n",
|
|
syscall_exit_event.type);
|
|
WARN_ON_ONCE(1);
|
|
}
|
|
|
|
return register_tracer(&syscall_tracer);
|
|
}
|
|
device_initcall(register_ftrace_syscalls);
|