Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/urgent

This commit is contained in:
Ingo Molnar 2009-09-19 19:21:15 +02:00
commit 2df2881804
3 changed files with 36 additions and 87 deletions

View file

@ -2414,11 +2414,9 @@ unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
__g_next(struct seq_file *m, loff_t *pos)
{
unsigned long *array = m->private;
if (*pos >= ftrace_graph_count)
return NULL;
return &array[*pos];
return &ftrace_graph_funcs[*pos];
}
static void *
@ -2482,17 +2480,11 @@ ftrace_graph_open(struct inode *inode, struct file *file)
ftrace_graph_count = 0;
memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
}
if (file->f_mode & FMODE_READ) {
ret = seq_open(file, &ftrace_graph_seq_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = ftrace_graph_funcs;
}
} else
file->private_data = ftrace_graph_funcs;
mutex_unlock(&graph_lock);
if (file->f_mode & FMODE_READ)
ret = seq_open(file, &ftrace_graph_seq_ops);
return ret;
}
@ -2560,7 +2552,6 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_parser parser;
unsigned long *array;
size_t read = 0;
ssize_t ret;
@ -2574,12 +2565,6 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
goto out;
}
if (file->f_mode & FMODE_READ) {
struct seq_file *m = file->private_data;
array = m->private;
} else
array = file->private_data;
if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
ret = -ENOMEM;
goto out;
@ -2591,7 +2576,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
parser.buffer[parser.idx] = 0;
/* we allow only one expression at a time */
ret = ftrace_set_func(array, &ftrace_graph_count,
ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
parser.buffer);
if (ret)
goto out;

View file

@ -125,13 +125,13 @@ int ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
#define BOOTUP_TRACER_SIZE 100
static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static int __init set_ftrace(char *str)
{
strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
@ -241,13 +241,6 @@ static struct tracer *trace_types __read_mostly;
/* current_trace points to the tracer that is currently active */
static struct tracer *current_trace __read_mostly;
/*
* max_tracer_type_len is used to simplify the allocating of
* buffers to read userspace tracer names. We keep track of
* the longest tracer name registered.
*/
static int max_tracer_type_len;
/*
* trace_types_lock is used to protect the trace_types list.
* This lock is also used to keep user access serialized.
@ -619,7 +612,6 @@ __releases(kernel_lock)
__acquires(kernel_lock)
{
struct tracer *t;
int len;
int ret = 0;
if (!type->name) {
@ -627,6 +619,11 @@ __acquires(kernel_lock)
return -1;
}
if (strlen(type->name) > MAX_TRACER_SIZE) {
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
return -1;
}
/*
* When this gets called we hold the BKL which means that
* preemption is disabled. Various trace selftests however
@ -641,7 +638,7 @@ __acquires(kernel_lock)
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
pr_info("Trace %s already registered\n",
pr_info("Tracer %s already registered\n",
type->name);
ret = -1;
goto out;
@ -692,9 +689,6 @@ __acquires(kernel_lock)
type->next = trace_types;
trace_types = type;
len = strlen(type->name);
if (len > max_tracer_type_len)
max_tracer_type_len = len;
out:
tracing_selftest_running = false;
@ -703,7 +697,7 @@ __acquires(kernel_lock)
if (ret || !default_bootup_tracer)
goto out_unlock;
if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE))
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
goto out_unlock;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
@ -725,14 +719,13 @@ __acquires(kernel_lock)
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
int len;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
pr_info("Trace %s not registered\n", type->name);
pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
@ -745,17 +738,7 @@ void unregister_tracer(struct tracer *type)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
if (strlen(type->name) != max_tracer_type_len)
goto out;
max_tracer_type_len = 0;
for (t = &trace_types; *t; t = &(*t)->next) {
len = strlen((*t)->name);
if (len > max_tracer_type_len)
max_tracer_type_len = len;
}
out:
out:
mutex_unlock(&trace_types_lock);
}
@ -2604,7 +2587,7 @@ static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[max_tracer_type_len+2];
char buf[MAX_TRACER_SIZE+2];
int r;
mutex_lock(&trace_types_lock);
@ -2754,15 +2737,15 @@ static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[max_tracer_type_len+1];
char buf[MAX_TRACER_SIZE+1];
int i;
size_t ret;
int err;
ret = cnt;
if (cnt > max_tracer_type_len)
cnt = max_tracer_type_len;
if (cnt > MAX_TRACER_SIZE)
cnt = MAX_TRACER_SIZE;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;

View file

@ -271,42 +271,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct list_head *list = m->private;
struct ftrace_event_call *call;
struct ftrace_event_call *call = v;
(*pos)++;
for (;;) {
if (list == &ftrace_events)
return NULL;
call = list_entry(list, struct ftrace_event_call, list);
list_for_each_entry_continue(call, &ftrace_events, list) {
/*
* The ftrace subsystem is for showing formats only.
* They can not be enabled or disabled via the event files.
*/
if (call->regfunc)
break;
list = list->next;
return call;
}
m->private = list->next;
return call;
return NULL;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_event_call *call = NULL;
struct ftrace_event_call *call;
loff_t l;
mutex_lock(&event_mutex);
m->private = ftrace_events.next;
call = list_entry(&ftrace_events, struct ftrace_event_call, list);
for (l = 0; l <= *pos; ) {
call = t_next(m, NULL, &l);
call = t_next(m, call, &l);
if (!call)
break;
}
@ -316,37 +306,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void *
s_next(struct seq_file *m, void *v, loff_t *pos)
{
struct list_head *list = m->private;
struct ftrace_event_call *call;
struct ftrace_event_call *call = v;
(*pos)++;
retry:
if (list == &ftrace_events)
return NULL;
call = list_entry(list, struct ftrace_event_call, list);
if (!call->enabled) {
list = list->next;
goto retry;
list_for_each_entry_continue(call, &ftrace_events, list) {
if (call->enabled)
return call;
}
m->private = list->next;
return call;
return NULL;
}
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_event_call *call = NULL;
struct ftrace_event_call *call;
loff_t l;
mutex_lock(&event_mutex);
m->private = ftrace_events.next;
call = list_entry(&ftrace_events, struct ftrace_event_call, list);
for (l = 0; l <= *pos; ) {
call = s_next(m, NULL, &l);
call = s_next(m, call, &l);
if (!call)
break;
}