mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
cpumask: convert kernel trace functions further
Impact: Reduce future memory usage, use new cpumask API. Since the last patch was created and acked, more old cpumask users slipped into kernel/trace. Mostly trivial conversions, except struct trace_iterator's "started" member becomes a cpumask_var_t. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
9e01c1b74c
commit
4462344ee9
6 changed files with 16 additions and 10 deletions
|
@ -1811,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
|
|||
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
|
||||
return;
|
||||
|
||||
if (cpu_isset(iter->cpu, iter->started))
|
||||
if (cpumask_test_cpu(iter->cpu, iter->started))
|
||||
return;
|
||||
|
||||
cpu_set(iter->cpu, iter->started);
|
||||
cpumask_set_cpu(iter->cpu, iter->started);
|
||||
trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
|
||||
}
|
||||
|
||||
|
@ -3114,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|||
if (!iter)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
||||
kfree(iter);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
/* trace pipe does not show start of buffer */
|
||||
cpus_setall(iter->started);
|
||||
cpumask_setall(iter->started);
|
||||
|
||||
iter->tr = &global_trace;
|
||||
iter->trace = current_trace;
|
||||
|
@ -3134,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
|
|||
{
|
||||
struct trace_iterator *iter = file->private_data;
|
||||
|
||||
free_cpumask_var(iter->started);
|
||||
kfree(iter);
|
||||
atomic_dec(&tracing_reader);
|
||||
|
||||
|
|
|
@ -368,7 +368,7 @@ struct trace_iterator {
|
|||
loff_t pos;
|
||||
long idx;
|
||||
|
||||
cpumask_t started;
|
||||
cpumask_var_t started;
|
||||
};
|
||||
|
||||
int tracing_is_enabled(void);
|
||||
|
|
|
@ -42,7 +42,7 @@ static int boot_trace_init(struct trace_array *tr)
|
|||
int cpu;
|
||||
boot_trace = tr;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
tracing_reset(tr, cpu);
|
||||
|
||||
tracing_sched_switch_assign_trace(tr);
|
||||
|
|
|
@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
|
|||
int i;
|
||||
int ret;
|
||||
int log10_this = log10_cpu(cpu);
|
||||
int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
|
||||
int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -46,7 +46,7 @@ static void bts_trace_start(struct trace_array *tr)
|
|||
|
||||
tracing_reset_online_cpus(tr);
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ static void bts_trace_stop(struct trace_array *tr)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ static void trace_bts_prepare(struct trace_iterator *iter)
|
|||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr)
|
|||
|
||||
trace_power_enabled = 1;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
tracing_reset(tr, cpu);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue