mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: fix newidle smp group balancing
Re-compute the shares on newidle - so we can make a decision based on recent data. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
c8cba857b4
commit
3e5459b4be
1 changed files with 13 additions and 0 deletions
|
@ -1579,6 +1579,13 @@ static void update_shares(struct sched_domain *sd)
|
|||
walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
|
||||
}
|
||||
|
||||
static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
||||
{
|
||||
spin_unlock(&rq->lock);
|
||||
update_shares(sd);
|
||||
spin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
static void update_h_load(int cpu)
|
||||
{
|
||||
walk_tg_tree(tg_load_down, tg_nop, cpu, NULL);
|
||||
|
@ -1595,6 +1602,10 @@ static inline void update_shares(struct sched_domain *sd)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -3543,6 +3554,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
|
|||
|
||||
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
|
||||
redo:
|
||||
update_shares_locked(this_rq, sd);
|
||||
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
|
||||
&sd_idle, cpus, NULL);
|
||||
if (!group) {
|
||||
|
@ -3586,6 +3598,7 @@ redo:
|
|||
} else
|
||||
sd->nr_balance_failed = 0;
|
||||
|
||||
update_shares_locked(this_rq, sd);
|
||||
return ld_moved;
|
||||
|
||||
out_balanced:
|
||||
|
|
Loading…
Reference in a new issue