mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
[PATCH] optimize writer path in time_interpolator_get_counter()
Christoph Lameter <clameter@engr.sgi.com> When using a time interpolator that is susceptible to jitter there's potentially contention over a cmpxchg used to prevent time from going backwards. This is unnecessary when the caller holds the xtime write seqlock as all readers will be blocked from returning until the write is complete. We can therefore allow writers to insert a new value and exit rather than fight with CPUs who only hold a reader lock. Signed-off-by: Alex Williamson <alex.williamson@hp.com> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
9f4a6dce10
commit
486d46aefe
1 changed files with 13 additions and 4 deletions
|
@ -1429,7 +1429,7 @@ static inline u64 time_interpolator_get_cycles(unsigned int src)
|
|||
}
|
||||
}
|
||||
|
||||
static inline u64 time_interpolator_get_counter(void)
|
||||
static inline u64 time_interpolator_get_counter(int writelock)
|
||||
{
|
||||
unsigned int src = time_interpolator->source;
|
||||
|
||||
|
@ -1443,6 +1443,15 @@ static inline u64 time_interpolator_get_counter(void)
|
|||
now = time_interpolator_get_cycles(src);
|
||||
if (lcycle && time_after(lcycle, now))
|
||||
return lcycle;
|
||||
|
||||
/* When holding the xtime write lock, there's no need
|
||||
* to add the overhead of the cmpxchg. Readers are
|
||||
* force to retry until the write lock is released.
|
||||
*/
|
||||
if (writelock) {
|
||||
time_interpolator->last_cycle = now;
|
||||
return now;
|
||||
}
|
||||
/* Keep track of the last timer value returned. The use of cmpxchg here
|
||||
* will cause contention in an SMP environment.
|
||||
*/
|
||||
|
@ -1456,7 +1465,7 @@ static inline u64 time_interpolator_get_counter(void)
|
|||
void time_interpolator_reset(void)
|
||||
{
|
||||
time_interpolator->offset = 0;
|
||||
time_interpolator->last_counter = time_interpolator_get_counter();
|
||||
time_interpolator->last_counter = time_interpolator_get_counter(1);
|
||||
}
|
||||
|
||||
#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
|
||||
|
@ -1468,7 +1477,7 @@ unsigned long time_interpolator_get_offset(void)
|
|||
return 0;
|
||||
|
||||
return time_interpolator->offset +
|
||||
GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator);
|
||||
GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
|
||||
}
|
||||
|
||||
#define INTERPOLATOR_ADJUST 65536
|
||||
|
@ -1491,7 +1500,7 @@ static void time_interpolator_update(long delta_nsec)
|
|||
* and the tuning logic insures that.
|
||||
*/
|
||||
|
||||
counter = time_interpolator_get_counter();
|
||||
counter = time_interpolator_get_counter(1);
|
||||
offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
|
||||
|
||||
if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
|
||||
|
|
Loading…
Reference in a new issue