mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched_clock: delay using sched_clock()
Some arch's can't handle sched_clock() being called too early - delay this until sched_clock_init() has been called. Reported-by: Bill Gatliff <bgat@billgatliff.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Nishanth Aravamudan <nacc@us.ibm.com> CC: Russell King - ARM Linux <linux@arm.linux.org.uk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
4a273f209c
commit
c1955a3d47
2 changed files with 20 additions and 13 deletions
|
@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
|||
|
||||
extern unsigned long long sched_clock(void);
|
||||
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick(void)
|
||||
{
|
||||
}
|
||||
|
@ -1573,8 +1567,6 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|||
{
|
||||
}
|
||||
#else
|
||||
extern void sched_clock_init(void);
|
||||
extern u64 sched_clock_cpu(int cpu);
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
|
|
|
@ -42,6 +42,8 @@ unsigned long long __attribute__((weak)) sched_clock(void)
|
|||
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
|
||||
struct sched_clock_data {
|
||||
|
@ -70,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu)
|
|||
return &per_cpu(sched_clock_data, cpu);
|
||||
}
|
||||
|
||||
static __read_mostly int sched_clock_running;
|
||||
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
u64 ktime_now = ktime_to_ns(ktime_get());
|
||||
|
@ -248,6 +248,21 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
|
||||
|
||||
#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
|
||||
|
||||
void sched_clock_init(void)
|
||||
{
|
||||
sched_clock_running = 1;
|
||||
}
|
||||
|
||||
u64 sched_clock_cpu(int cpu)
|
||||
{
|
||||
if (unlikely(!sched_clock_running))
|
||||
return 0;
|
||||
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
unsigned long long cpu_clock(int cpu)
|
||||
|
|
Loading…
Reference in a new issue