mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perf_counter: Put whole group on when enabling group leader
Currently, if you have a group where the leader is disabled and there are siblings that are enabled, and then you enable the leader, we only put the leader on the PMU, and not its enabled siblings. This is incorrect, since the enabled group members should be all on or all off at any given point. This fixes it by adding a call to group_sched_in in __perf_counter_enable in the case where we're enabling a group leader. To avoid the need for a forward declaration this also moves group_sched_in up before __perf_counter_enable. The actual content of group_sched_in is unchanged by this patch. [ Impact: fix bug in counter enable code ] Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <18951.34946.451546.691693@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8823392360
commit
6751b71ea2
1 changed files with 51 additions and 48 deletions
|
@ -419,6 +419,54 @@ counter_sched_in(struct perf_counter *counter,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
group_sched_in(struct perf_counter *group_counter,
|
||||||
|
struct perf_cpu_context *cpuctx,
|
||||||
|
struct perf_counter_context *ctx,
|
||||||
|
int cpu)
|
||||||
|
{
|
||||||
|
struct perf_counter *counter, *partial_group;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (group_counter->state == PERF_COUNTER_STATE_OFF)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
|
||||||
|
if (ret)
|
||||||
|
return ret < 0 ? ret : 0;
|
||||||
|
|
||||||
|
group_counter->prev_state = group_counter->state;
|
||||||
|
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Schedule in siblings as one group (if any):
|
||||||
|
*/
|
||||||
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
||||||
|
counter->prev_state = counter->state;
|
||||||
|
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
|
||||||
|
partial_group = counter;
|
||||||
|
goto group_error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
group_error:
|
||||||
|
/*
|
||||||
|
* Groups can be scheduled in as one unit only, so undo any
|
||||||
|
* partial group before returning:
|
||||||
|
*/
|
||||||
|
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
||||||
|
if (counter == partial_group)
|
||||||
|
break;
|
||||||
|
counter_sched_out(counter, cpuctx, ctx);
|
||||||
|
}
|
||||||
|
counter_sched_out(group_counter, cpuctx, ctx);
|
||||||
|
|
||||||
|
return -EAGAIN;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return 1 for a group consisting entirely of software counters,
|
* Return 1 for a group consisting entirely of software counters,
|
||||||
* 0 if the group contains any hardware counters.
|
* 0 if the group contains any hardware counters.
|
||||||
|
@ -643,6 +691,9 @@ static void __perf_counter_enable(void *info)
|
||||||
|
|
||||||
if (!group_can_go_on(counter, cpuctx, 1))
|
if (!group_can_go_on(counter, cpuctx, 1))
|
||||||
err = -EEXIST;
|
err = -EEXIST;
|
||||||
|
else if (counter == leader)
|
||||||
|
err = group_sched_in(counter, cpuctx, ctx,
|
||||||
|
smp_processor_id());
|
||||||
else
|
else
|
||||||
err = counter_sched_in(counter, cpuctx, ctx,
|
err = counter_sched_in(counter, cpuctx, ctx,
|
||||||
smp_processor_id());
|
smp_processor_id());
|
||||||
|
@ -791,54 +842,6 @@ static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
|
||||||
__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
|
__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
group_sched_in(struct perf_counter *group_counter,
|
|
||||||
struct perf_cpu_context *cpuctx,
|
|
||||||
struct perf_counter_context *ctx,
|
|
||||||
int cpu)
|
|
||||||
{
|
|
||||||
struct perf_counter *counter, *partial_group;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (group_counter->state == PERF_COUNTER_STATE_OFF)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
|
|
||||||
if (ret)
|
|
||||||
return ret < 0 ? ret : 0;
|
|
||||||
|
|
||||||
group_counter->prev_state = group_counter->state;
|
|
||||||
if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Schedule in siblings as one group (if any):
|
|
||||||
*/
|
|
||||||
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
|
||||||
counter->prev_state = counter->state;
|
|
||||||
if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
|
|
||||||
partial_group = counter;
|
|
||||||
goto group_error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
group_error:
|
|
||||||
/*
|
|
||||||
* Groups can be scheduled in as one unit only, so undo any
|
|
||||||
* partial group before returning:
|
|
||||||
*/
|
|
||||||
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
|
|
||||||
if (counter == partial_group)
|
|
||||||
break;
|
|
||||||
counter_sched_out(counter, cpuctx, ctx);
|
|
||||||
}
|
|
||||||
counter_sched_out(group_counter, cpuctx, ctx);
|
|
||||||
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
__perf_counter_sched_in(struct perf_counter_context *ctx,
|
__perf_counter_sched_in(struct perf_counter_context *ctx,
|
||||||
struct perf_cpu_context *cpuctx, int cpu)
|
struct perf_cpu_context *cpuctx, int cpu)
|
||||||
|
|
Loading…
Reference in a new issue