mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched_clock: Make it NMI safe
Arjan complained about the suckyness of TSC on modern machines, and asked if we could do something about that for PERF_SAMPLE_TIME. Make cpu_clock() NMI safe by removing the spinlock and using cmpxchg. This also makes it smaller and more robust. Affects architectures that use HAVE_UNSTABLE_SCHED_CLOCK, i.e. IA64 and x86. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
cf450a7355
commit
def0a9b257
2 changed files with 56 additions and 75 deletions
|
@ -2955,10 +2955,7 @@ void perf_prepare_sample(struct perf_event_header *header,
|
|||
}
|
||||
|
||||
if (sample_type & PERF_SAMPLE_TIME) {
|
||||
/*
|
||||
* Maybe do better on x86 and provide cpu_clock_nmi()
|
||||
*/
|
||||
data->time = sched_clock();
|
||||
data->time = perf_clock();
|
||||
|
||||
header->size += sizeof(data->time);
|
||||
}
|
||||
|
@ -3488,7 +3485,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
|
|||
.misc = 0,
|
||||
.size = sizeof(throttle_event),
|
||||
},
|
||||
.time = sched_clock(),
|
||||
.time = perf_clock(),
|
||||
.id = primary_counter_id(counter),
|
||||
.stream_id = counter->id,
|
||||
};
|
||||
|
@ -3540,7 +3537,7 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
|
|||
}
|
||||
|
||||
if (counter->attr.freq) {
|
||||
u64 now = sched_clock();
|
||||
u64 now = perf_clock();
|
||||
s64 delta = now - hwc->freq_stamp;
|
||||
|
||||
hwc->freq_stamp = now;
|
||||
|
|
|
@ -48,13 +48,6 @@ static __read_mostly int sched_clock_running;
|
|||
__read_mostly int sched_clock_stable;
|
||||
|
||||
struct sched_clock_data {
|
||||
/*
|
||||
* Raw spinlock - this is a special case: this might be called
|
||||
* from within instrumentation code so we dont want to do any
|
||||
* instrumentation ourselves.
|
||||
*/
|
||||
raw_spinlock_t lock;
|
||||
|
||||
u64 tick_raw;
|
||||
u64 tick_gtod;
|
||||
u64 clock;
|
||||
|
@ -80,7 +73,6 @@ void sched_clock_init(void)
|
|||
for_each_possible_cpu(cpu) {
|
||||
struct sched_clock_data *scd = cpu_sdc(cpu);
|
||||
|
||||
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
scd->tick_raw = 0;
|
||||
scd->tick_gtod = ktime_now;
|
||||
scd->clock = ktime_now;
|
||||
|
@ -109,14 +101,19 @@ static inline u64 wrap_max(u64 x, u64 y)
|
|||
* - filter out backward motion
|
||||
* - use the GTOD tick value to create a window to filter crazy TSC values
|
||||
*/
|
||||
static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
||||
static u64 sched_clock_local(struct sched_clock_data *scd)
|
||||
{
|
||||
s64 delta = now - scd->tick_raw;
|
||||
u64 clock, min_clock, max_clock;
|
||||
u64 now, clock, old_clock, min_clock, max_clock;
|
||||
s64 delta;
|
||||
|
||||
again:
|
||||
now = sched_clock();
|
||||
delta = now - scd->tick_raw;
|
||||
if (unlikely(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
old_clock = scd->clock;
|
||||
|
||||
/*
|
||||
* scd->clock = clamp(scd->tick_gtod + delta,
|
||||
* max(scd->tick_gtod, scd->clock),
|
||||
|
@ -124,58 +121,27 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
|
|||
*/
|
||||
|
||||
clock = scd->tick_gtod + delta;
|
||||
min_clock = wrap_max(scd->tick_gtod, scd->clock);
|
||||
max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
|
||||
min_clock = wrap_max(scd->tick_gtod, old_clock);
|
||||
max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
|
||||
|
||||
clock = wrap_max(clock, min_clock);
|
||||
clock = wrap_min(clock, max_clock);
|
||||
|
||||
scd->clock = clock;
|
||||
if (cmpxchg(&scd->clock, old_clock, clock) != old_clock)
|
||||
goto again;
|
||||
|
||||
return scd->clock;
|
||||
return clock;
|
||||
}
|
||||
|
||||
static void lock_double_clock(struct sched_clock_data *data1,
|
||||
struct sched_clock_data *data2)
|
||||
static u64 sched_clock_remote(struct sched_clock_data *scd)
|
||||
{
|
||||
if (data1 < data2) {
|
||||
__raw_spin_lock(&data1->lock);
|
||||
__raw_spin_lock(&data2->lock);
|
||||
} else {
|
||||
__raw_spin_lock(&data2->lock);
|
||||
__raw_spin_lock(&data1->lock);
|
||||
}
|
||||
}
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
u64 now, clock, this_clock, remote_clock;
|
||||
struct sched_clock_data *scd;
|
||||
|
||||
if (sched_clock_stable)
|
||||
return sched_clock();
|
||||
|
||||
scd = cpu_sdc(cpu);
|
||||
|
||||
/*
|
||||
* Normally this is not called in NMI context - but if it is,
|
||||
* trying to do any locking here is totally lethal.
|
||||
*/
|
||||
if (unlikely(in_nmi()))
|
||||
return scd->clock;
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
now = sched_clock();
|
||||
|
||||
if (cpu != raw_smp_processor_id()) {
|
||||
struct sched_clock_data *my_scd = this_scd();
|
||||
u64 this_clock, remote_clock;
|
||||
u64 *ptr, old_val, val;
|
||||
|
||||
lock_double_clock(scd, my_scd);
|
||||
|
||||
this_clock = __update_sched_clock(my_scd, now);
|
||||
sched_clock_local(my_scd);
|
||||
again:
|
||||
this_clock = my_scd->clock;
|
||||
remote_clock = scd->clock;
|
||||
|
||||
/*
|
||||
|
@ -185,23 +151,43 @@ u64 sched_clock_cpu(int cpu)
|
|||
* runqueues. (this creates monotonic movement)
|
||||
*/
|
||||
if (likely((s64)(remote_clock - this_clock) < 0)) {
|
||||
clock = this_clock;
|
||||
scd->clock = clock;
|
||||
ptr = &scd->clock;
|
||||
old_val = remote_clock;
|
||||
val = this_clock;
|
||||
} else {
|
||||
/*
|
||||
* Should be rare, but possible:
|
||||
*/
|
||||
clock = remote_clock;
|
||||
my_scd->clock = remote_clock;
|
||||
ptr = &my_scd->clock;
|
||||
old_val = this_clock;
|
||||
val = remote_clock;
|
||||
}
|
||||
|
||||
__raw_spin_unlock(&my_scd->lock);
|
||||
} else {
|
||||
__raw_spin_lock(&scd->lock);
|
||||
clock = __update_sched_clock(scd, now);
|
||||
if (cmpxchg(ptr, old_val, val) != old_val)
|
||||
goto again;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
struct sched_clock_data *scd;
|
||||
u64 clock;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
if (sched_clock_stable)
|
||||
return sched_clock();
|
||||
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0ull;
|
||||
|
||||
scd = cpu_sdc(cpu);
|
||||
|
||||
if (cpu != smp_processor_id())
|
||||
clock = sched_clock_remote(scd);
|
||||
else
|
||||
clock = sched_clock_local(scd);
|
||||
|
||||
return clock;
|
||||
}
|
||||
|
@ -223,11 +209,9 @@ void sched_clock_tick(void)
|
|||
now_gtod = ktime_to_ns(ktime_get());
|
||||
now = sched_clock();
|
||||
|
||||
__raw_spin_lock(&scd->lock);
|
||||
scd->tick_raw = now;
|
||||
scd->tick_gtod = now_gtod;
|
||||
__update_sched_clock(scd, now);
|
||||
__raw_spin_unlock(&scd->lock);
|
||||
sched_clock_local(scd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue