sched: convert sched.c from for_each_cpu_mask to for_each_cpu.

Impact: trivial API conversion

This is a simple conversion, but note that for_each_cpu() terminates
with i >= nr_cpu_ids, not i == NR_CPUS like for_each_cpu_mask() did.

I don't convert all of them: sd->span changes in a later patch, so
change those iterators there rather than here.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Rusty Russell 2008-11-25 02:35:02 +10:30 committed by Ingo Molnar
parent ea6f18ed5a
commit abcd083a1a

View file

@ -2061,7 +2061,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* Tally up the load of all CPUs in the group */ /* Tally up the load of all CPUs in the group */
avg_load = 0; avg_load = 0;
for_each_cpu_mask_nr(i, group->cpumask) { for_each_cpu(i, &group->cpumask) {
/* Bias balancing toward cpus of our domain */ /* Bias balancing toward cpus of our domain */
if (local_group) if (local_group)
load = source_load(i, load_idx); load = source_load(i, load_idx);
@ -2103,7 +2103,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
/* Traverse only the allowed CPUs */ /* Traverse only the allowed CPUs */
cpus_and(*tmp, group->cpumask, p->cpus_allowed); cpus_and(*tmp, group->cpumask, p->cpus_allowed);
for_each_cpu_mask_nr(i, *tmp) { for_each_cpu(i, tmp) {
load = weighted_cpuload(i); load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) { if (load < min_load || (load == min_load && i == this_cpu)) {
@ -3121,7 +3121,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
max_cpu_load = 0; max_cpu_load = 0;
min_cpu_load = ~0UL; min_cpu_load = ~0UL;
for_each_cpu_mask_nr(i, group->cpumask) { for_each_cpu(i, &group->cpumask) {
struct rq *rq; struct rq *rq;
if (!cpu_isset(i, *cpus)) if (!cpu_isset(i, *cpus))
@ -3400,7 +3400,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
unsigned long max_load = 0; unsigned long max_load = 0;
int i; int i;
for_each_cpu_mask_nr(i, group->cpumask) { for_each_cpu(i, &group->cpumask) {
unsigned long wl; unsigned long wl;
if (!cpu_isset(i, *cpus)) if (!cpu_isset(i, *cpus))
@ -3942,7 +3942,7 @@ static void run_rebalance_domains(struct softirq_action *h)
int balance_cpu; int balance_cpu;
cpu_clear(this_cpu, cpus); cpu_clear(this_cpu, cpus);
for_each_cpu_mask_nr(balance_cpu, cpus) { for_each_cpu(balance_cpu, &cpus) {
/* /*
* If this cpu gets work to do, stop the load balancing * If this cpu gets work to do, stop the load balancing
* work being done for other cpus. Next load * work being done for other cpus. Next load
@ -6906,7 +6906,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
cpus_clear(*covered); cpus_clear(*covered);
for_each_cpu_mask_nr(i, *span) { for_each_cpu(i, span) {
struct sched_group *sg; struct sched_group *sg;
int group = group_fn(i, cpu_map, &sg, tmpmask); int group = group_fn(i, cpu_map, &sg, tmpmask);
int j; int j;
@ -6917,7 +6917,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
cpus_clear(sg->cpumask); cpus_clear(sg->cpumask);
sg->__cpu_power = 0; sg->__cpu_power = 0;
for_each_cpu_mask_nr(j, *span) { for_each_cpu(j, span) {
if (group_fn(j, cpu_map, NULL, tmpmask) != group) if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue; continue;
@ -7117,7 +7117,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg) if (!sg)
return; return;
do { do {
for_each_cpu_mask_nr(j, sg->cpumask) { for_each_cpu(j, &sg->cpumask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(phys_domains, j); sd = &per_cpu(phys_domains, j);
@ -7142,7 +7142,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{ {
int cpu, i; int cpu, i;
for_each_cpu_mask_nr(cpu, *cpu_map) { for_each_cpu(cpu, cpu_map) {
struct sched_group **sched_group_nodes struct sched_group **sched_group_nodes
= sched_group_nodes_bycpu[cpu]; = sched_group_nodes_bycpu[cpu];
@ -7396,7 +7396,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/* /*
* Set up domains for cpus specified by the cpu_map. * Set up domains for cpus specified by the cpu_map.
*/ */
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
struct sched_domain *sd = NULL, *p; struct sched_domain *sd = NULL, *p;
SCHED_CPUMASK_VAR(nodemask, allmasks); SCHED_CPUMASK_VAR(nodemask, allmasks);
@ -7463,7 +7463,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */ /* Set up CPU (sibling) groups */
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
SCHED_CPUMASK_VAR(this_sibling_map, allmasks); SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks);
@ -7480,7 +7480,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */ /* Set up multi-core groups */
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
SCHED_CPUMASK_VAR(this_core_map, allmasks); SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks); SCHED_CPUMASK_VAR(send_covered, allmasks);
@ -7547,7 +7547,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
goto error; goto error;
} }
sched_group_nodes[i] = sg; sched_group_nodes[i] = sg;
for_each_cpu_mask_nr(j, *nodemask) { for_each_cpu(j, nodemask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(node_domains, j); sd = &per_cpu(node_domains, j);
@ -7593,21 +7593,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
/* Calculate CPU power for physical packages and nodes */ /* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
struct sched_domain *sd = &per_cpu(cpu_domains, i); struct sched_domain *sd = &per_cpu(cpu_domains, i);
init_sched_groups_power(i, sd); init_sched_groups_power(i, sd);
} }
#endif #endif
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
struct sched_domain *sd = &per_cpu(core_domains, i); struct sched_domain *sd = &per_cpu(core_domains, i);
init_sched_groups_power(i, sd); init_sched_groups_power(i, sd);
} }
#endif #endif
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
struct sched_domain *sd = &per_cpu(phys_domains, i); struct sched_domain *sd = &per_cpu(phys_domains, i);
init_sched_groups_power(i, sd); init_sched_groups_power(i, sd);
@ -7627,7 +7627,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif #endif
/* Attach the domains */ /* Attach the domains */
for_each_cpu_mask_nr(i, *cpu_map) { for_each_cpu(i, cpu_map) {
struct sched_domain *sd; struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i); sd = &per_cpu(cpu_domains, i);
@ -7709,7 +7709,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
cpumask_t tmpmask; cpumask_t tmpmask;
int i; int i;
for_each_cpu_mask_nr(i, *cpu_map) for_each_cpu(i, cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i); cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched(); synchronize_sched();
arch_destroy_sched_domains(cpu_map, &tmpmask); arch_destroy_sched_domains(cpu_map, &tmpmask);