mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
tracing/hw-branch-tracing: convert bts-tracer mutex to a spinlock
Impact: fix CPU hotplug lockup bts_hotcpu_handler() is called with irqs disabled, so using mutex_lock() is a no-no. All the BTS codepaths here are atomic (they do not schedule), so using a spinlock is the right solution. Cc: Markus Metzger <markus.t.metzger@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
499aa86dcb
commit
2d542cf342
1 changed files with 28 additions and 29 deletions
|
@ -3,17 +3,15 @@
|
|||
*
|
||||
* Copyright (C) 2008-2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
#include <asm/ds.h>
|
||||
|
||||
|
@ -23,16 +21,17 @@
|
|||
|
||||
#define SIZEOF_BTS (1 << 13)
|
||||
|
||||
/* The tracer mutex protects the below per-cpu tracer array.
|
||||
It needs to be held to:
|
||||
- start tracing on all cpus
|
||||
- stop tracing on all cpus
|
||||
- start tracing on a single hotplug cpu
|
||||
- stop tracing on a single hotplug cpu
|
||||
- read the trace from all cpus
|
||||
- read the trace from a single cpu
|
||||
*/
|
||||
static DEFINE_MUTEX(bts_tracer_mutex);
|
||||
/*
|
||||
* The tracer lock protects the below per-cpu tracer array.
|
||||
* It needs to be held to:
|
||||
* - start tracing on all cpus
|
||||
* - stop tracing on all cpus
|
||||
* - start tracing on a single hotplug cpu
|
||||
* - stop tracing on a single hotplug cpu
|
||||
* - read the trace from all cpus
|
||||
* - read the trace from a single cpu
|
||||
*/
|
||||
static DEFINE_SPINLOCK(bts_tracer_lock);
|
||||
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
|
||||
static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
|
||||
|
||||
|
@ -47,7 +46,7 @@ static struct trace_array *hw_branch_trace __read_mostly;
|
|||
* Start tracing on the current cpu.
|
||||
* The argument is ignored.
|
||||
*
|
||||
* pre: bts_tracer_mutex must be locked.
|
||||
* pre: bts_tracer_lock must be locked.
|
||||
*/
|
||||
static void bts_trace_start_cpu(void *arg)
|
||||
{
|
||||
|
@ -66,19 +65,19 @@ static void bts_trace_start_cpu(void *arg)
|
|||
|
||||
static void bts_trace_start(struct trace_array *tr)
|
||||
{
|
||||
mutex_lock(&bts_tracer_mutex);
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
on_each_cpu(bts_trace_start_cpu, NULL, 1);
|
||||
trace_hw_branches_enabled = 1;
|
||||
|
||||
mutex_unlock(&bts_tracer_mutex);
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop tracing on the current cpu.
|
||||
* The argument is ignored.
|
||||
*
|
||||
* pre: bts_tracer_mutex must be locked.
|
||||
* pre: bts_tracer_lock must be locked.
|
||||
*/
|
||||
static void bts_trace_stop_cpu(void *arg)
|
||||
{
|
||||
|
@ -90,12 +89,12 @@ static void bts_trace_stop_cpu(void *arg)
|
|||
|
||||
static void bts_trace_stop(struct trace_array *tr)
|
||||
{
|
||||
mutex_lock(&bts_tracer_mutex);
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
trace_hw_branches_enabled = 0;
|
||||
on_each_cpu(bts_trace_stop_cpu, NULL, 1);
|
||||
|
||||
mutex_unlock(&bts_tracer_mutex);
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
||||
|
@ -103,7 +102,7 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
|||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
|
||||
mutex_lock(&bts_tracer_mutex);
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
if (!trace_hw_branches_enabled)
|
||||
goto out;
|
||||
|
@ -119,7 +118,7 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
|||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&bts_tracer_mutex);
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
|
@ -225,7 +224,7 @@ static void trace_bts_at(const struct bts_trace *trace, void *at)
|
|||
/*
|
||||
* Collect the trace on the current cpu and write it into the ftrace buffer.
|
||||
*
|
||||
* pre: bts_tracer_mutex must be locked
|
||||
* pre: bts_tracer_lock must be locked
|
||||
*/
|
||||
static void trace_bts_cpu(void *arg)
|
||||
{
|
||||
|
@ -261,11 +260,11 @@ out:
|
|||
|
||||
static void trace_bts_prepare(struct trace_iterator *iter)
|
||||
{
|
||||
mutex_lock(&bts_tracer_mutex);
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
||||
|
||||
mutex_unlock(&bts_tracer_mutex);
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
static void trace_bts_close(struct trace_iterator *iter)
|
||||
|
@ -275,11 +274,11 @@ static void trace_bts_close(struct trace_iterator *iter)
|
|||
|
||||
void trace_hw_branch_oops(void)
|
||||
{
|
||||
mutex_lock(&bts_tracer_mutex);
|
||||
spin_lock(&bts_tracer_lock);
|
||||
|
||||
trace_bts_cpu(hw_branch_trace);
|
||||
|
||||
mutex_unlock(&bts_tracer_mutex);
|
||||
spin_unlock(&bts_tracer_lock);
|
||||
}
|
||||
|
||||
struct tracer bts_tracer __read_mostly =
|
||||
|
|
Loading…
Reference in a new issue