mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
perf_counter: abstract wakeup flag setting in core to fix powerpc build
Impact: build fix for powerpc Commit bd753921015e7905 ("perf_counter: software counter event infrastructure") introduced a use of TIF_PERF_COUNTERS into the core perfcounter code. This breaks the build on powerpc because we use a flag in a per-cpu area to signal wakeups on powerpc rather than a thread_info flag, because the thread_info flags have to be manipulated with atomic operations and are thus slower than per-cpu flags. This fixes the by changing the core to use an abstracted set_perf_counter_pending() function, which is defined on x86 to set the TIF_PERF_COUNTERS flag and on powerpc to set the per-cpu flag (paca->perf_counter_pending). It changes the previous powerpc definition of set_perf_counter_pending to not take an argument and adds a clear_perf_counter_pending, so as to simplify the definition on x86. On x86, set_perf_counter_pending() is defined as a macro. Defining it as a static inline in arch/x86/include/asm/perf_counters.h causes compile failures because <asm/perf_counters.h> gets included early in <linux/sched.h>, and the definitions of set_tsk_thread_flag etc. are therefore not available in <asm/perf_counters.h>. (On powerpc this problem is avoided by defining set_perf_counter_pending etc. in <asm/hw_irq.h>.) Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
7bb497bd88
commit
b6c5a71da1
5 changed files with 19 additions and 14 deletions
|
@ -142,10 +142,17 @@ static inline unsigned long get_perf_counter_pending(void)
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_perf_counter_pending(int x)
|
static inline void set_perf_counter_pending(void)
|
||||||
{
|
{
|
||||||
asm volatile("stb %0,%1(13)" : :
|
asm volatile("stb %0,%1(13)" : :
|
||||||
"r" (x),
|
"r" (1),
|
||||||
|
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void clear_perf_counter_pending(void)
|
||||||
|
{
|
||||||
|
asm volatile("stb %0,%1(13)" : :
|
||||||
|
"r" (0),
|
||||||
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +165,8 @@ static inline unsigned long get_perf_counter_pending(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_perf_counter_pending(int x) {}
|
static inline void set_perf_counter_pending(void) {}
|
||||||
|
static inline void clear_perf_counter_pending(void) {}
|
||||||
static inline void perf_counter_do_pending(void) {}
|
static inline void perf_counter_do_pending(void) {}
|
||||||
#endif /* CONFIG_PERF_COUNTERS */
|
#endif /* CONFIG_PERF_COUNTERS */
|
||||||
|
|
||||||
|
|
|
@ -104,13 +104,6 @@ static inline notrace void set_soft_enabled(unsigned long enable)
|
||||||
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
|
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_COUNTERS
|
|
||||||
notrace void __weak perf_counter_do_pending(void)
|
|
||||||
{
|
|
||||||
set_perf_counter_pending(0);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
notrace void raw_local_irq_restore(unsigned long en)
|
notrace void raw_local_irq_restore(unsigned long en)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -142,8 +135,10 @@ notrace void raw_local_irq_restore(unsigned long en)
|
||||||
iseries_handle_interrupts();
|
iseries_handle_interrupts();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (get_perf_counter_pending())
|
if (get_perf_counter_pending()) {
|
||||||
|
clear_perf_counter_pending();
|
||||||
perf_counter_do_pending();
|
perf_counter_do_pending();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if (get_paca()->hard_enabled) return;
|
* if (get_paca()->hard_enabled) return;
|
||||||
|
|
|
@ -653,7 +653,6 @@ void perf_counter_do_pending(void)
|
||||||
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
|
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
|
||||||
struct perf_counter *counter;
|
struct perf_counter *counter;
|
||||||
|
|
||||||
set_perf_counter_pending(0);
|
|
||||||
for (i = 0; i < cpuhw->n_counters; ++i) {
|
for (i = 0; i < cpuhw->n_counters; ++i) {
|
||||||
counter = cpuhw->counter[i];
|
counter = cpuhw->counter[i];
|
||||||
if (counter && counter->wakeup_pending) {
|
if (counter && counter->wakeup_pending) {
|
||||||
|
@ -811,7 +810,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
|
||||||
perf_counter_do_pending();
|
perf_counter_do_pending();
|
||||||
irq_exit();
|
irq_exit();
|
||||||
} else {
|
} else {
|
||||||
set_perf_counter_pending(1);
|
set_perf_counter_pending();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,6 +84,9 @@ union cpuid10_edx {
|
||||||
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
|
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
|
||||||
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
|
#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
|
||||||
|
|
||||||
|
#define set_perf_counter_pending() \
|
||||||
|
set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_COUNTERS
|
#ifdef CONFIG_PERF_COUNTERS
|
||||||
extern void init_hw_perf_counters(void);
|
extern void init_hw_perf_counters(void);
|
||||||
extern void perf_counters_lapic_init(int nmi);
|
extern void perf_counters_lapic_init(int nmi);
|
||||||
|
|
|
@ -1433,7 +1433,7 @@ static void perf_swcounter_interrupt(struct perf_counter *counter,
|
||||||
|
|
||||||
if (nmi) {
|
if (nmi) {
|
||||||
counter->wakeup_pending = 1;
|
counter->wakeup_pending = 1;
|
||||||
set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
|
set_perf_counter_pending();
|
||||||
} else
|
} else
|
||||||
wake_up(&counter->waitq);
|
wake_up(&counter->waitq);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue