mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sched: Separate out build of NUMA sched groups from __build_sched_domains
... to further strip down __build_sched_domains(). Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20090818110111.GL29515@alberich.amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
de616e36c7
commit
0601a88d8f
1 changed files with 67 additions and 63 deletions
130
kernel/sched.c
130
kernel/sched.c
|
@ -8246,6 +8246,71 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
|
||||||
sg = sg->next;
|
sg = sg->next;
|
||||||
} while (sg != group_head);
|
} while (sg != group_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int build_numa_sched_groups(struct s_data *d,
|
||||||
|
const struct cpumask *cpu_map, int num)
|
||||||
|
{
|
||||||
|
struct sched_domain *sd;
|
||||||
|
struct sched_group *sg, *prev;
|
||||||
|
int n, j;
|
||||||
|
|
||||||
|
cpumask_clear(d->covered);
|
||||||
|
cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
|
||||||
|
if (cpumask_empty(d->nodemask)) {
|
||||||
|
d->sched_group_nodes[num] = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
sched_domain_node_span(num, d->domainspan);
|
||||||
|
cpumask_and(d->domainspan, d->domainspan, cpu_map);
|
||||||
|
|
||||||
|
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
||||||
|
GFP_KERNEL, num);
|
||||||
|
if (!sg) {
|
||||||
|
printk(KERN_WARNING "Can not alloc domain group for node %d\n",
|
||||||
|
num);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
d->sched_group_nodes[num] = sg;
|
||||||
|
|
||||||
|
for_each_cpu(j, d->nodemask) {
|
||||||
|
sd = &per_cpu(node_domains, j).sd;
|
||||||
|
sd->groups = sg;
|
||||||
|
}
|
||||||
|
|
||||||
|
sg->__cpu_power = 0;
|
||||||
|
cpumask_copy(sched_group_cpus(sg), d->nodemask);
|
||||||
|
sg->next = sg;
|
||||||
|
cpumask_or(d->covered, d->covered, d->nodemask);
|
||||||
|
|
||||||
|
prev = sg;
|
||||||
|
for (j = 0; j < nr_node_ids; j++) {
|
||||||
|
n = (num + j) % nr_node_ids;
|
||||||
|
cpumask_complement(d->notcovered, d->covered);
|
||||||
|
cpumask_and(d->tmpmask, d->notcovered, cpu_map);
|
||||||
|
cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
|
||||||
|
if (cpumask_empty(d->tmpmask))
|
||||||
|
break;
|
||||||
|
cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
|
||||||
|
if (cpumask_empty(d->tmpmask))
|
||||||
|
continue;
|
||||||
|
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
||||||
|
GFP_KERNEL, num);
|
||||||
|
if (!sg) {
|
||||||
|
printk(KERN_WARNING
|
||||||
|
"Can not alloc domain group for node %d\n", j);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
sg->__cpu_power = 0;
|
||||||
|
cpumask_copy(sched_group_cpus(sg), d->tmpmask);
|
||||||
|
sg->next = prev->next;
|
||||||
|
cpumask_or(d->covered, d->covered, d->tmpmask);
|
||||||
|
prev->next = sg;
|
||||||
|
prev = sg;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif /* CONFIG_NUMA */
|
#endif /* CONFIG_NUMA */
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
@ -8652,70 +8717,9 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
||||||
if (d.sd_allnodes)
|
if (d.sd_allnodes)
|
||||||
build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
|
build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
|
||||||
|
|
||||||
for (i = 0; i < nr_node_ids; i++) {
|
for (i = 0; i < nr_node_ids; i++)
|
||||||
/* Set up node groups */
|
if (build_numa_sched_groups(&d, cpu_map, i))
|
||||||
struct sched_group *sg, *prev;
|
|
||||||
int j;
|
|
||||||
|
|
||||||
cpumask_clear(d.covered);
|
|
||||||
cpumask_and(d.nodemask, cpumask_of_node(i), cpu_map);
|
|
||||||
if (cpumask_empty(d.nodemask)) {
|
|
||||||
d.sched_group_nodes[i] = NULL;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
sched_domain_node_span(i, d.domainspan);
|
|
||||||
cpumask_and(d.domainspan, d.domainspan, cpu_map);
|
|
||||||
|
|
||||||
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
||||||
GFP_KERNEL, i);
|
|
||||||
if (!sg) {
|
|
||||||
printk(KERN_WARNING "Can not alloc domain group for "
|
|
||||||
"node %d\n", i);
|
|
||||||
goto error;
|
goto error;
|
||||||
}
|
|
||||||
d.sched_group_nodes[i] = sg;
|
|
||||||
for_each_cpu(j, d.nodemask) {
|
|
||||||
struct sched_domain *sd;
|
|
||||||
|
|
||||||
sd = &per_cpu(node_domains, j).sd;
|
|
||||||
sd->groups = sg;
|
|
||||||
}
|
|
||||||
sg->__cpu_power = 0;
|
|
||||||
cpumask_copy(sched_group_cpus(sg), d.nodemask);
|
|
||||||
sg->next = sg;
|
|
||||||
cpumask_or(d.covered, d.covered, d.nodemask);
|
|
||||||
prev = sg;
|
|
||||||
|
|
||||||
for (j = 0; j < nr_node_ids; j++) {
|
|
||||||
int n = (i + j) % nr_node_ids;
|
|
||||||
|
|
||||||
cpumask_complement(d.notcovered, d.covered);
|
|
||||||
cpumask_and(d.tmpmask, d.notcovered, cpu_map);
|
|
||||||
cpumask_and(d.tmpmask, d.tmpmask, d.domainspan);
|
|
||||||
if (cpumask_empty(d.tmpmask))
|
|
||||||
break;
|
|
||||||
|
|
||||||
cpumask_and(d.tmpmask, d.tmpmask, cpumask_of_node(n));
|
|
||||||
if (cpumask_empty(d.tmpmask))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
sg = kmalloc_node(sizeof(struct sched_group) +
|
|
||||||
cpumask_size(),
|
|
||||||
GFP_KERNEL, i);
|
|
||||||
if (!sg) {
|
|
||||||
printk(KERN_WARNING
|
|
||||||
"Can not alloc domain group for node %d\n", j);
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
sg->__cpu_power = 0;
|
|
||||||
cpumask_copy(sched_group_cpus(sg), d.tmpmask);
|
|
||||||
sg->next = prev->next;
|
|
||||||
cpumask_or(d.covered, d.covered, d.tmpmask);
|
|
||||||
prev->next = sg;
|
|
||||||
prev = sg;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Calculate CPU power for physical packages and nodes */
|
/* Calculate CPU power for physical packages and nodes */
|
||||||
|
|
Loading…
Reference in a new issue