mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: Clean up check_preempt_wakeup()
Streamline the wakeup preemption code a bit, unifying the preempt path so that they all do the same. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a65ac745e4
commit
3a7e73a2e2
1 changed files with 35 additions and 42 deletions
|
@ -1651,10 +1651,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||||
int sync = wake_flags & WF_SYNC;
|
int sync = wake_flags & WF_SYNC;
|
||||||
int scale = cfs_rq->nr_running >= sched_nr_latency;
|
int scale = cfs_rq->nr_running >= sched_nr_latency;
|
||||||
|
|
||||||
if (unlikely(rt_prio(p->prio))) {
|
if (unlikely(rt_prio(p->prio)))
|
||||||
resched_task(curr);
|
goto preempt;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(p->sched_class != &fair_sched_class))
|
if (unlikely(p->sched_class != &fair_sched_class))
|
||||||
return;
|
return;
|
||||||
|
@ -1680,37 +1678,32 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Idle tasks are by definition preempted by everybody. */
|
/* Idle tasks are by definition preempted by everybody. */
|
||||||
if (unlikely(curr->policy == SCHED_IDLE)) {
|
if (unlikely(curr->policy == SCHED_IDLE))
|
||||||
resched_task(curr);
|
goto preempt;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((sched_feat(WAKEUP_SYNC) && sync) ||
|
if (sched_feat(WAKEUP_SYNC) && sync)
|
||||||
(sched_feat(WAKEUP_OVERLAP) &&
|
goto preempt;
|
||||||
(se->avg_overlap < sysctl_sched_migration_cost &&
|
|
||||||
pse->avg_overlap < sysctl_sched_migration_cost))) {
|
|
||||||
resched_task(curr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sched_feat(WAKEUP_RUNNING)) {
|
if (sched_feat(WAKEUP_OVERLAP) &&
|
||||||
if (pse->avg_running < se->avg_running) {
|
se->avg_overlap < sysctl_sched_migration_cost &&
|
||||||
set_next_buddy(pse);
|
pse->avg_overlap < sysctl_sched_migration_cost)
|
||||||
resched_task(curr);
|
goto preempt;
|
||||||
return;
|
|
||||||
}
|
if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running)
|
||||||
}
|
goto preempt;
|
||||||
|
|
||||||
if (!sched_feat(WAKEUP_PREEMPT))
|
if (!sched_feat(WAKEUP_PREEMPT))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
find_matching_se(&se, &pse);
|
|
||||||
|
|
||||||
BUG_ON(!pse);
|
|
||||||
|
|
||||||
update_curr(cfs_rq);
|
update_curr(cfs_rq);
|
||||||
|
find_matching_se(&se, &pse);
|
||||||
|
BUG_ON(!pse);
|
||||||
|
if (wakeup_preempt_entity(se, pse) == 1)
|
||||||
|
goto preempt;
|
||||||
|
|
||||||
if (wakeup_preempt_entity(se, pse) == 1) {
|
return;
|
||||||
|
|
||||||
|
preempt:
|
||||||
resched_task(curr);
|
resched_task(curr);
|
||||||
/*
|
/*
|
||||||
* Only set the backward buddy when the current task is still
|
* Only set the backward buddy when the current task is still
|
||||||
|
@ -1723,9 +1716,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||||
*/
|
*/
|
||||||
if (unlikely(!se->on_rq || curr == rq->idle))
|
if (unlikely(!se->on_rq || curr == rq->idle))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
|
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
|
||||||
set_last_buddy(se);
|
set_last_buddy(se);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct task_struct *pick_next_task_fair(struct rq *rq)
|
static struct task_struct *pick_next_task_fair(struct rq *rq)
|
||||||
|
|
Loading…
Reference in a new issue