mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perf_counter: Fix cpu migration counter
This fixes the cpu migration software counter to count correctly even when contexts get swapped from one task to another. Previously the cpu migration counts reported by perf stat were bogus, ranging from negative to several thousand for a single "lat_ctx 2 8 32" run. With this patch the cpu migration count reported for "lat_ctx 2 8 32" is almost always between 35 and 44. This fixes the problem by adding a call into the perf_counter code from set_task_cpu when tasks are migrated. This enables us to use the generic swcounter code (with some modifications) for the cpu migration counter. This modifies the swcounter code to allow a NULL regs pointer to be passed in to perf_swcounter_ctx_event() etc. The cpu migration counter does this because there isn't necessarily a pt_regs struct for the task available. In this case, the counter will not have interrupt capability - but the migration counter didn't have interrupt capability before, so this is no loss. Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <18979.35006.819769.416327@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
f38b082081
commit
3f731ca60a
3 changed files with 27 additions and 54 deletions
|
@ -615,6 +615,8 @@ extern void perf_counter_munmap(unsigned long addr, unsigned long len,
|
||||||
|
|
||||||
extern void perf_counter_comm(struct task_struct *tsk);
|
extern void perf_counter_comm(struct task_struct *tsk);
|
||||||
|
|
||||||
|
extern void perf_counter_task_migration(struct task_struct *task, int cpu);
|
||||||
|
|
||||||
#define MAX_STACK_DEPTH 255
|
#define MAX_STACK_DEPTH 255
|
||||||
|
|
||||||
struct perf_callchain_entry {
|
struct perf_callchain_entry {
|
||||||
|
@ -668,6 +670,8 @@ perf_counter_munmap(unsigned long addr, unsigned long len,
|
||||||
|
|
||||||
static inline void perf_counter_comm(struct task_struct *tsk) { }
|
static inline void perf_counter_comm(struct task_struct *tsk) { }
|
||||||
static inline void perf_counter_init(void) { }
|
static inline void perf_counter_init(void) { }
|
||||||
|
static inline void perf_counter_task_migration(struct task_struct *task,
|
||||||
|
int cpu) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
|
@ -2921,11 +2921,13 @@ static int perf_swcounter_match(struct perf_counter *counter,
|
||||||
if (counter->hw_event.config != event_config)
|
if (counter->hw_event.config != event_config)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (counter->hw_event.exclude_user && user_mode(regs))
|
if (regs) {
|
||||||
return 0;
|
if (counter->hw_event.exclude_user && user_mode(regs))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (counter->hw_event.exclude_kernel && !user_mode(regs))
|
if (counter->hw_event.exclude_kernel && !user_mode(regs))
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -2935,7 +2937,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
|
||||||
{
|
{
|
||||||
int neg = atomic64_add_negative(nr, &counter->hw.count);
|
int neg = atomic64_add_negative(nr, &counter->hw.count);
|
||||||
|
|
||||||
if (counter->hw.irq_period && !neg)
|
if (counter->hw.irq_period && !neg && regs)
|
||||||
perf_swcounter_overflow(counter, nmi, regs, addr);
|
perf_swcounter_overflow(counter, nmi, regs, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3151,55 +3153,24 @@ static const struct pmu perf_ops_task_clock = {
|
||||||
/*
|
/*
|
||||||
* Software counter: cpu migrations
|
* Software counter: cpu migrations
|
||||||
*/
|
*/
|
||||||
|
void perf_counter_task_migration(struct task_struct *task, int cpu)
|
||||||
static inline u64 get_cpu_migrations(struct perf_counter *counter)
|
|
||||||
{
|
{
|
||||||
struct task_struct *curr = counter->ctx->task;
|
struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||||
|
struct perf_counter_context *ctx;
|
||||||
|
|
||||||
if (curr)
|
perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
|
||||||
return curr->se.nr_migrations;
|
PERF_COUNT_CPU_MIGRATIONS,
|
||||||
return cpu_nr_migrations(smp_processor_id());
|
1, 1, NULL, 0);
|
||||||
|
|
||||||
|
ctx = perf_pin_task_context(task);
|
||||||
|
if (ctx) {
|
||||||
|
perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
|
||||||
|
PERF_COUNT_CPU_MIGRATIONS,
|
||||||
|
1, 1, NULL, 0);
|
||||||
|
perf_unpin_context(ctx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
|
|
||||||
{
|
|
||||||
u64 prev, now;
|
|
||||||
s64 delta;
|
|
||||||
|
|
||||||
prev = atomic64_read(&counter->hw.prev_count);
|
|
||||||
now = get_cpu_migrations(counter);
|
|
||||||
|
|
||||||
atomic64_set(&counter->hw.prev_count, now);
|
|
||||||
|
|
||||||
delta = now - prev;
|
|
||||||
|
|
||||||
atomic64_add(delta, &counter->count);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
|
|
||||||
{
|
|
||||||
cpu_migrations_perf_counter_update(counter);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
|
|
||||||
{
|
|
||||||
if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
|
|
||||||
atomic64_set(&counter->hw.prev_count,
|
|
||||||
get_cpu_migrations(counter));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
|
|
||||||
{
|
|
||||||
cpu_migrations_perf_counter_update(counter);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct pmu perf_ops_cpu_migrations = {
|
|
||||||
.enable = cpu_migrations_perf_counter_enable,
|
|
||||||
.disable = cpu_migrations_perf_counter_disable,
|
|
||||||
.read = cpu_migrations_perf_counter_read,
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_PROFILE
|
#ifdef CONFIG_EVENT_PROFILE
|
||||||
void perf_tpcounter_event(int event_id)
|
void perf_tpcounter_event(int event_id)
|
||||||
{
|
{
|
||||||
|
@ -3272,11 +3243,8 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
|
||||||
case PERF_COUNT_PAGE_FAULTS_MIN:
|
case PERF_COUNT_PAGE_FAULTS_MIN:
|
||||||
case PERF_COUNT_PAGE_FAULTS_MAJ:
|
case PERF_COUNT_PAGE_FAULTS_MAJ:
|
||||||
case PERF_COUNT_CONTEXT_SWITCHES:
|
case PERF_COUNT_CONTEXT_SWITCHES:
|
||||||
pmu = &perf_ops_generic;
|
|
||||||
break;
|
|
||||||
case PERF_COUNT_CPU_MIGRATIONS:
|
case PERF_COUNT_CPU_MIGRATIONS:
|
||||||
if (!counter->hw_event.exclude_kernel)
|
pmu = &perf_ops_generic;
|
||||||
pmu = &perf_ops_cpu_migrations;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1977,6 +1977,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
||||||
if (task_hot(p, old_rq->clock, NULL))
|
if (task_hot(p, old_rq->clock, NULL))
|
||||||
schedstat_inc(p, se.nr_forced2_migrations);
|
schedstat_inc(p, se.nr_forced2_migrations);
|
||||||
#endif
|
#endif
|
||||||
|
perf_counter_task_migration(p, new_cpu);
|
||||||
}
|
}
|
||||||
p->se.vruntime -= old_cfsrq->min_vruntime -
|
p->se.vruntime -= old_cfsrq->min_vruntime -
|
||||||
new_cfsrq->min_vruntime;
|
new_cfsrq->min_vruntime;
|
||||||
|
|
Loading…
Reference in a new issue