mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
sched: Update the cpu_power sum during load-balance
In order to prepare for a more dynamic cpu_power, update the group sum while walking the sched domains during load-balance. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com> Acked-by: Gautham R Shenoy <ego@in.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> LKML-Reference: <20090901083825.985050292@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b5d978e0c7
commit
cc9fba7d76
1 changed files with 29 additions and 4 deletions
|
@ -3699,6 +3699,28 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
|
|||
}
|
||||
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
|
||||
|
||||
static void update_sched_power(struct sched_domain *sd)
|
||||
{
|
||||
struct sched_domain *child = sd->child;
|
||||
struct sched_group *group, *sdg = sd->groups;
|
||||
unsigned long power = sdg->__cpu_power;
|
||||
|
||||
if (!child) {
|
||||
/* compute cpu power for this cpu */
|
||||
return;
|
||||
}
|
||||
|
||||
sdg->__cpu_power = 0;
|
||||
|
||||
group = child->groups;
|
||||
do {
|
||||
sdg->__cpu_power += group->__cpu_power;
|
||||
group = group->next;
|
||||
} while (group != child->groups);
|
||||
|
||||
if (power != sdg->__cpu_power)
|
||||
sdg->reciprocal_cpu_power = reciprocal_value(sdg->__cpu_power);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
|
||||
|
@ -3712,7 +3734,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
|
|||
* @balance: Should we balance.
|
||||
* @sgs: variable to hold the statistics for this group.
|
||||
*/
|
||||
static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
|
||||
static inline void update_sg_lb_stats(struct sched_domain *sd,
|
||||
struct sched_group *group, int this_cpu,
|
||||
enum cpu_idle_type idle, int load_idx, int *sd_idle,
|
||||
int local_group, const struct cpumask *cpus,
|
||||
int *balance, struct sg_lb_stats *sgs)
|
||||
|
@ -3723,8 +3746,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
|
|||
unsigned long sum_avg_load_per_task;
|
||||
unsigned long avg_load_per_task;
|
||||
|
||||
if (local_group)
|
||||
if (local_group) {
|
||||
balance_cpu = group_first_cpu(group);
|
||||
if (balance_cpu == this_cpu)
|
||||
update_sched_power(sd);
|
||||
}
|
||||
|
||||
/* Tally up the load of all CPUs in the group */
|
||||
sum_avg_load_per_task = avg_load_per_task = 0;
|
||||
|
@ -3828,7 +3854,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
|
|||
local_group = cpumask_test_cpu(this_cpu,
|
||||
sched_group_cpus(group));
|
||||
memset(&sgs, 0, sizeof(sgs));
|
||||
update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
|
||||
update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
|
||||
local_group, cpus, balance, &sgs);
|
||||
|
||||
if (local_group && balance && !(*balance))
|
||||
|
@ -3863,7 +3889,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
|
|||
update_sd_power_savings_stats(group, sds, local_group, &sgs);
|
||||
group = group->next;
|
||||
} while (group != sd->groups);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue