mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: Create a helper function to calculate imbalance
Move all the imbalance calculation out of find_busiest_group() through this helper function. With this change, the structure of find_busiest_group() will be as follows: - update_sched_domain_statistics. - check if imbalance exits. - update imbalance and return busiest. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Balbir Singh" <balbir@in.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com> LKML-Reference: <20090325091411.13992.43293.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2e6f44aeda
commit
dbc523a3b8
1 changed files with 45 additions and 33 deletions
|
@ -3549,6 +3549,47 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
|
|||
if (pwr_move > pwr_now)
|
||||
*imbalance = sds->busiest_load_per_task;
|
||||
}
|
||||
|
||||
/**
|
||||
* calculate_imbalance - Calculate the amount of imbalance present within the
|
||||
* groups of a given sched_domain during load balance.
|
||||
* @sds: statistics of the sched_domain whose imbalance is to be calculated.
|
||||
* @this_cpu: Cpu for which currently load balance is being performed.
|
||||
* @imbalance: The variable to store the imbalance.
|
||||
*/
|
||||
static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
|
||||
unsigned long *imbalance)
|
||||
{
|
||||
unsigned long max_pull;
|
||||
/*
|
||||
* In the presence of smp nice balancing, certain scenarios can have
|
||||
* max load less than avg load(as we skip the groups at or below
|
||||
* its cpu_power, while calculating max_load..)
|
||||
*/
|
||||
if (sds->max_load < sds->avg_load) {
|
||||
*imbalance = 0;
|
||||
return fix_small_imbalance(sds, this_cpu, imbalance);
|
||||
}
|
||||
|
||||
/* Don't want to pull so many tasks that a group would go idle */
|
||||
max_pull = min(sds->max_load - sds->avg_load,
|
||||
sds->max_load - sds->busiest_load_per_task);
|
||||
|
||||
/* How much load to actually move to equalise the imbalance */
|
||||
*imbalance = min(max_pull * sds->busiest->__cpu_power,
|
||||
(sds->avg_load - sds->this_load) * sds->this->__cpu_power)
|
||||
/ SCHED_LOAD_SCALE;
|
||||
|
||||
/*
|
||||
* if *imbalance is less than the average load per runnable task
|
||||
* there is no gaurantee that any tasks will be moved so we'll have
|
||||
* a think about bumping its value to force at least one task to be
|
||||
* moved
|
||||
*/
|
||||
if (*imbalance < sds->busiest_load_per_task)
|
||||
return fix_small_imbalance(sds, this_cpu, imbalance);
|
||||
|
||||
}
|
||||
/******* find_busiest_group() helpers end here *********************/
|
||||
|
||||
/*
|
||||
|
@ -3562,7 +3603,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|||
int *sd_idle, const struct cpumask *cpus, int *balance)
|
||||
{
|
||||
struct sd_lb_stats sds;
|
||||
unsigned long max_pull;
|
||||
|
||||
memset(&sds, 0, sizeof(sds));
|
||||
|
||||
|
@ -3605,36 +3645,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|||
if (sds.max_load <= sds.busiest_load_per_task)
|
||||
goto out_balanced;
|
||||
|
||||
/*
|
||||
* In the presence of smp nice balancing, certain scenarios can have
|
||||
* max load less than avg load(as we skip the groups at or below
|
||||
* its cpu_power, while calculating max_load..)
|
||||
*/
|
||||
if (sds.max_load < sds.avg_load) {
|
||||
*imbalance = 0;
|
||||
fix_small_imbalance(&sds, this_cpu, imbalance);
|
||||
goto ret_busiest;
|
||||
}
|
||||
|
||||
/* Don't want to pull so many tasks that a group would go idle */
|
||||
max_pull = min(sds.max_load - sds.avg_load,
|
||||
sds.max_load - sds.busiest_load_per_task);
|
||||
|
||||
/* How much load to actually move to equalise the imbalance */
|
||||
*imbalance = min(max_pull * sds.busiest->__cpu_power,
|
||||
(sds.avg_load - sds.this_load) * sds.this->__cpu_power)
|
||||
/ SCHED_LOAD_SCALE;
|
||||
|
||||
/*
|
||||
* if *imbalance is less than the average load per runnable task
|
||||
* there is no gaurantee that any tasks will be moved so we'll have
|
||||
* a think about bumping its value to force at least one task to be
|
||||
* moved
|
||||
*/
|
||||
if (*imbalance < sds.busiest_load_per_task)
|
||||
fix_small_imbalance(&sds, this_cpu, imbalance);
|
||||
|
||||
ret_busiest:
|
||||
/* Looks like there is an imbalance. Compute it */
|
||||
calculate_imbalance(&sds, this_cpu, imbalance);
|
||||
return sds.busiest;
|
||||
|
||||
out_balanced:
|
||||
|
|
Loading…
Reference in a new issue