mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
perf_counter: Do not throttle single swcounter events
We can have swcounter events that contribute more than a single count per event, when used with a non-zero period, those can generate multiple events, which is when we need throttling. However, swcounter that contribute only a single count per event can only come as fast as we can run code, hence don't throttle them. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
0ec04e16d0
commit
850bc73ffc
1 changed files with 13 additions and 4 deletions
|
@ -3494,14 +3494,15 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
|
|||
* Generic counter overflow handling, sampling.
|
||||
*/
|
||||
|
||||
int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
struct perf_sample_data *data)
|
||||
static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
int throttle, struct perf_sample_data *data)
|
||||
{
|
||||
int events = atomic_read(&counter->event_limit);
|
||||
int throttle = counter->pmu->unthrottle != NULL;
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
int ret = 0;
|
||||
|
||||
throttle = (throttle && counter->pmu->unthrottle != NULL);
|
||||
|
||||
if (!throttle) {
|
||||
hwc->interrupts++;
|
||||
} else {
|
||||
|
@ -3554,6 +3555,12 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int perf_counter_overflow(struct perf_counter *counter, int nmi,
|
||||
struct perf_sample_data *data)
|
||||
{
|
||||
return __perf_counter_overflow(counter, nmi, 1, data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic software counter infrastructure
|
||||
*/
|
||||
|
@ -3592,6 +3599,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
|
|||
int nmi, struct perf_sample_data *data)
|
||||
{
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
int throttle = 0;
|
||||
u64 overflow;
|
||||
|
||||
data->period = counter->hw.last_period;
|
||||
|
@ -3601,13 +3609,14 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
|
|||
return;
|
||||
|
||||
for (; overflow; overflow--) {
|
||||
if (perf_counter_overflow(counter, nmi, data)) {
|
||||
if (__perf_counter_overflow(counter, nmi, throttle, data)) {
|
||||
/*
|
||||
* We inhibit the overflow from happening when
|
||||
* hwc->interrupts == MAX_INTERRUPTS.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
throttle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue