mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 19:56:18 +00:00
sched: sync wakeups vs avg_overlap
While looking at the code I wondered why we always do: sync && avg_overlap < migration_cost Which is a bit odd, since the overlap test was meant to detect sync wakeups so using it to specialize sync wakeups doesn't make much sense. Hence change the code to do: sync || avg_overlap < migration_cost Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
990d0f2ced
commit
2fb7635c4c
1 changed files with 10 additions and 8 deletions
|
@ -1103,6 +1103,11 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
|
||||||
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
|
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
if (!sync && sched_feat(SYNC_WAKEUPS) &&
|
||||||
|
curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
||||||
|
p->se.avg_overlap < sysctl_sched_migration_cost)
|
||||||
|
sync = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If sync wakeup then subtract the (maximum possible)
|
* If sync wakeup then subtract the (maximum possible)
|
||||||
* effect of the currently running task from the load
|
* effect of the currently running task from the load
|
||||||
|
@ -1127,11 +1132,8 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
|
||||||
* a reasonable amount of time then attract this newly
|
* a reasonable amount of time then attract this newly
|
||||||
* woken task:
|
* woken task:
|
||||||
*/
|
*/
|
||||||
if (sync && balanced) {
|
if (sync && balanced)
|
||||||
if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
|
return 1;
|
||||||
p->se.avg_overlap < sysctl_sched_migration_cost)
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
schedstat_inc(p, se.nr_wakeups_affine_attempts);
|
||||||
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
tl_per_task = cpu_avg_load_per_task(this_cpu);
|
||||||
|
@ -1268,9 +1270,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
|
||||||
if (!sched_feat(WAKEUP_PREEMPT))
|
if (!sched_feat(WAKEUP_PREEMPT))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (sched_feat(WAKEUP_OVERLAP) && sync &&
|
if (sched_feat(WAKEUP_OVERLAP) && (sync ||
|
||||||
se->avg_overlap < sysctl_sched_migration_cost &&
|
(se->avg_overlap < sysctl_sched_migration_cost &&
|
||||||
pse->avg_overlap < sysctl_sched_migration_cost) {
|
pse->avg_overlap < sysctl_sched_migration_cost))) {
|
||||||
resched_task(curr);
|
resched_task(curr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue