mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: Fully integrate cpus_active_map and root-domain code
Reflect "active" cpus in the rq->rd->online field, instead of the online_map. The motivation is that things that use the root-domain code (such as cpupri) only care about cpus classified as "active" anyway. By synchronizing the root-domain state with the active map, we allow several optimizations. For instance, we can remove an extra cpumask_and from the scheduler hotpath by utilizing rq->rd->online (since it is now a cached version of cpu_active_map & rq->rd->span). Signed-off-by: Gregory Haskins <ghaskins@novell.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Max Krasnyansky <maxk@qualcomm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20090730145723.25226.24493.stgit@dev.haskins.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3f029d3c6d
commit
00aec93d10
3 changed files with 8 additions and 11 deletions
|
@ -7927,7 +7927,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
|||
rq->rd = rd;
|
||||
|
||||
cpumask_set_cpu(rq->cpu, rd->span);
|
||||
if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
|
||||
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
|
||||
set_rq_online(rq);
|
||||
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
|
|
@ -1046,17 +1046,21 @@ static void yield_task_fair(struct rq *rq)
|
|||
* search starts with cpus closest then further out as needed,
|
||||
* so we always favor a closer, idle cpu.
|
||||
* Domains may include CPUs that are not usable for migration,
|
||||
* hence we need to mask them out (cpu_active_mask)
|
||||
* hence we need to mask them out (rq->rd->online)
|
||||
*
|
||||
* Returns the CPU we should wake onto.
|
||||
*/
|
||||
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
|
||||
|
||||
#define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
|
||||
|
||||
static int wake_idle(int cpu, struct task_struct *p)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
int i;
|
||||
unsigned int chosen_wakeup_cpu;
|
||||
int this_cpu;
|
||||
struct rq *task_rq = task_rq(p);
|
||||
|
||||
/*
|
||||
* At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
|
||||
|
@ -1089,10 +1093,10 @@ static int wake_idle(int cpu, struct task_struct *p)
|
|||
for_each_domain(cpu, sd) {
|
||||
if ((sd->flags & SD_WAKE_IDLE)
|
||||
|| ((sd->flags & SD_WAKE_IDLE_FAR)
|
||||
&& !task_hot(p, task_rq(p)->clock, sd))) {
|
||||
&& !task_hot(p, task_rq->clock, sd))) {
|
||||
for_each_cpu_and(i, sched_domain_span(sd),
|
||||
&p->cpus_allowed) {
|
||||
if (cpu_active(i) && idle_cpu(i)) {
|
||||
if (cpu_rd_active(i, task_rq) && idle_cpu(i)) {
|
||||
if (i != task_cpu(p)) {
|
||||
schedstat_inc(p,
|
||||
se.nr_wakeups_idle);
|
||||
|
|
|
@ -1172,13 +1172,6 @@ static int find_lowest_rq(struct task_struct *task)
|
|||
if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
|
||||
return -1; /* No targets found */
|
||||
|
||||
/*
|
||||
* Only consider CPUs that are usable for migration.
|
||||
* I guess we might want to change cpupri_find() to ignore those
|
||||
* in the first place.
|
||||
*/
|
||||
cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
|
||||
|
||||
/*
|
||||
* At this point we have built a mask of cpus representing the
|
||||
* lowest priority tasks in the system. Now we want to elect
|
||||
|
|
Loading…
Reference in a new issue