mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: Fix/add missing update_rq_clock() calls
kthread_bind(), migrate_task() and sched_fork were missing updates, and try_to_wake_up() was updating after having already used the stale clock. Aside from preventing potential latency hits, there' a side benefit in that early boot printk time stamps become monotonic. Signed-off-by: Mike Galbraith <efault@gmx.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1258020464.6491.2.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu> LKML-Reference: <new-submission>
This commit is contained in:
parent
aa021baa32
commit
055a00865d
1 changed files with 13 additions and 6 deletions
|
@ -2017,6 +2017,7 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&rq->lock, flags);
|
spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
update_rq_clock(rq);
|
||||||
set_task_cpu(p, cpu);
|
set_task_cpu(p, cpu);
|
||||||
p->cpus_allowed = cpumask_of_cpu(cpu);
|
p->cpus_allowed = cpumask_of_cpu(cpu);
|
||||||
p->rt.nr_cpus_allowed = 1;
|
p->rt.nr_cpus_allowed = 1;
|
||||||
|
@ -2115,6 +2116,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
||||||
* it is sufficient to simply update the task's cpu field.
|
* it is sufficient to simply update the task's cpu field.
|
||||||
*/
|
*/
|
||||||
if (!p->se.on_rq && !task_running(rq, p)) {
|
if (!p->se.on_rq && !task_running(rq, p)) {
|
||||||
|
update_rq_clock(rq);
|
||||||
set_task_cpu(p, dest_cpu);
|
set_task_cpu(p, dest_cpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2376,13 +2378,14 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
||||||
task_rq_unlock(rq, &flags);
|
task_rq_unlock(rq, &flags);
|
||||||
|
|
||||||
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
||||||
if (cpu != orig_cpu)
|
if (cpu != orig_cpu) {
|
||||||
set_task_cpu(p, cpu);
|
local_irq_save(flags);
|
||||||
|
rq = cpu_rq(cpu);
|
||||||
rq = task_rq_lock(p, &flags);
|
|
||||||
|
|
||||||
if (rq != orig_rq)
|
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
|
set_task_cpu(p, cpu);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
}
|
||||||
|
rq = task_rq_lock(p, &flags);
|
||||||
|
|
||||||
WARN_ON(p->state != TASK_WAKING);
|
WARN_ON(p->state != TASK_WAKING);
|
||||||
cpu = task_cpu(p);
|
cpu = task_cpu(p);
|
||||||
|
@ -2545,6 +2548,7 @@ static void __sched_fork(struct task_struct *p)
|
||||||
void sched_fork(struct task_struct *p, int clone_flags)
|
void sched_fork(struct task_struct *p, int clone_flags)
|
||||||
{
|
{
|
||||||
int cpu = get_cpu();
|
int cpu = get_cpu();
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
__sched_fork(p);
|
__sched_fork(p);
|
||||||
|
|
||||||
|
@ -2581,7 +2585,10 @@ void sched_fork(struct task_struct *p, int clone_flags)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
|
cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
|
||||||
#endif
|
#endif
|
||||||
|
local_irq_save(flags);
|
||||||
|
update_rq_clock(cpu_rq(cpu));
|
||||||
set_task_cpu(p, cpu);
|
set_task_cpu(p, cpu);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
|
||||||
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
|
||||||
if (likely(sched_info_on()))
|
if (likely(sched_info_on()))
|
||||||
|
|
Loading…
Reference in a new issue