mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
[PATCH] Dynamic sched domains: ia64 changes
ia64 changes similar to kernel/sched.c. Signed-off-by: Dinakar Guniguntala <dino@in.ibm.com> Acked-by: Paul Jackson <pj@sgi.com> Acked-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
85d7b94981
commit
7f1867a5b3
1 changed files with 45 additions and 31 deletions
|
@ -27,7 +27,7 @@
|
|||
*
|
||||
* Should use nodemask_t.
|
||||
*/
|
||||
static int __devinit find_next_best_node(int node, unsigned long *used_nodes)
|
||||
static int find_next_best_node(int node, unsigned long *used_nodes)
|
||||
{
|
||||
int i, n, val, min_val, best_node = 0;
|
||||
|
||||
|
@ -66,7 +66,7 @@ static int __devinit find_next_best_node(int node, unsigned long *used_nodes)
|
|||
* should be one that prevents unnecessary balancing, but also spreads tasks
|
||||
* out optimally.
|
||||
*/
|
||||
static cpumask_t __devinit sched_domain_node_span(int node)
|
||||
static cpumask_t sched_domain_node_span(int node)
|
||||
{
|
||||
int i;
|
||||
cpumask_t span, nodemask;
|
||||
|
@ -96,7 +96,7 @@ static cpumask_t __devinit sched_domain_node_span(int node)
|
|||
#ifdef CONFIG_SCHED_SMT
|
||||
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
|
||||
static struct sched_group sched_group_cpus[NR_CPUS];
|
||||
static int __devinit cpu_to_cpu_group(int cpu)
|
||||
static int cpu_to_cpu_group(int cpu)
|
||||
{
|
||||
return cpu;
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ static int __devinit cpu_to_cpu_group(int cpu)
|
|||
|
||||
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
|
||||
static struct sched_group sched_group_phys[NR_CPUS];
|
||||
static int __devinit cpu_to_phys_group(int cpu)
|
||||
static int cpu_to_phys_group(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
return first_cpu(cpu_sibling_map[cpu]);
|
||||
|
@ -125,44 +125,36 @@ static struct sched_group *sched_group_nodes[MAX_NUMNODES];
|
|||
static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
|
||||
static struct sched_group sched_group_allnodes[MAX_NUMNODES];
|
||||
|
||||
static int __devinit cpu_to_allnodes_group(int cpu)
|
||||
static int cpu_to_allnodes_group(int cpu)
|
||||
{
|
||||
return cpu_to_node(cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
||||
* Build sched domains for a given set of cpus and attach the sched domains
|
||||
* to the individual cpus
|
||||
*/
|
||||
void __devinit arch_init_sched_domains(void)
|
||||
void build_sched_domains(const cpumask_t *cpu_map)
|
||||
{
|
||||
int i;
|
||||
cpumask_t cpu_default_map;
|
||||
|
||||
/*
|
||||
* Setup mask for cpus without special case scheduling requirements.
|
||||
* For now this just excludes isolated cpus, but could be used to
|
||||
* exclude other special cases in the future.
|
||||
* Set up domains for cpus specified by the cpu_map.
|
||||
*/
|
||||
cpus_complement(cpu_default_map, cpu_isolated_map);
|
||||
cpus_and(cpu_default_map, cpu_default_map, cpu_online_map);
|
||||
|
||||
/*
|
||||
* Set up domains. Isolated domains just stay on the dummy domain.
|
||||
*/
|
||||
for_each_cpu_mask(i, cpu_default_map) {
|
||||
for_each_cpu_mask(i, *cpu_map) {
|
||||
int group;
|
||||
struct sched_domain *sd = NULL, *p;
|
||||
cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
|
||||
|
||||
cpus_and(nodemask, nodemask, cpu_default_map);
|
||||
cpus_and(nodemask, nodemask, *cpu_map);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
if (num_online_cpus()
|
||||
> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
|
||||
sd = &per_cpu(allnodes_domains, i);
|
||||
*sd = SD_ALLNODES_INIT;
|
||||
sd->span = cpu_default_map;
|
||||
sd->span = *cpu_map;
|
||||
group = cpu_to_allnodes_group(i);
|
||||
sd->groups = &sched_group_allnodes[group];
|
||||
p = sd;
|
||||
|
@ -173,7 +165,7 @@ void __devinit arch_init_sched_domains(void)
|
|||
*sd = SD_NODE_INIT;
|
||||
sd->span = sched_domain_node_span(cpu_to_node(i));
|
||||
sd->parent = p;
|
||||
cpus_and(sd->span, sd->span, cpu_default_map);
|
||||
cpus_and(sd->span, sd->span, *cpu_map);
|
||||
#endif
|
||||
|
||||
p = sd;
|
||||
|
@ -190,7 +182,7 @@ void __devinit arch_init_sched_domains(void)
|
|||
group = cpu_to_cpu_group(i);
|
||||
*sd = SD_SIBLING_INIT;
|
||||
sd->span = cpu_sibling_map[i];
|
||||
cpus_and(sd->span, sd->span, cpu_default_map);
|
||||
cpus_and(sd->span, sd->span, *cpu_map);
|
||||
sd->parent = p;
|
||||
sd->groups = &sched_group_cpus[group];
|
||||
#endif
|
||||
|
@ -198,9 +190,9 @@ void __devinit arch_init_sched_domains(void)
|
|||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
/* Set up CPU (sibling) groups */
|
||||
for_each_cpu_mask(i, cpu_default_map) {
|
||||
for_each_cpu_mask(i, *cpu_map) {
|
||||
cpumask_t this_sibling_map = cpu_sibling_map[i];
|
||||
cpus_and(this_sibling_map, this_sibling_map, cpu_default_map);
|
||||
cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
|
||||
if (i != first_cpu(this_sibling_map))
|
||||
continue;
|
||||
|
||||
|
@ -213,7 +205,7 @@ void __devinit arch_init_sched_domains(void)
|
|||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
cpumask_t nodemask = node_to_cpumask(i);
|
||||
|
||||
cpus_and(nodemask, nodemask, cpu_default_map);
|
||||
cpus_and(nodemask, nodemask, *cpu_map);
|
||||
if (cpus_empty(nodemask))
|
||||
continue;
|
||||
|
||||
|
@ -222,7 +214,7 @@ void __devinit arch_init_sched_domains(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
init_sched_build_groups(sched_group_allnodes, cpu_default_map,
|
||||
init_sched_build_groups(sched_group_allnodes, *cpu_map,
|
||||
&cpu_to_allnodes_group);
|
||||
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
|
@ -233,12 +225,12 @@ void __devinit arch_init_sched_domains(void)
|
|||
cpumask_t covered = CPU_MASK_NONE;
|
||||
int j;
|
||||
|
||||
cpus_and(nodemask, nodemask, cpu_default_map);
|
||||
cpus_and(nodemask, nodemask, *cpu_map);
|
||||
if (cpus_empty(nodemask))
|
||||
continue;
|
||||
|
||||
domainspan = sched_domain_node_span(i);
|
||||
cpus_and(domainspan, domainspan, cpu_default_map);
|
||||
cpus_and(domainspan, domainspan, *cpu_map);
|
||||
|
||||
sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
|
||||
sched_group_nodes[i] = sg;
|
||||
|
@ -266,7 +258,7 @@ void __devinit arch_init_sched_domains(void)
|
|||
int n = (i + j) % MAX_NUMNODES;
|
||||
|
||||
cpus_complement(notcovered, covered);
|
||||
cpus_and(tmp, notcovered, cpu_default_map);
|
||||
cpus_and(tmp, notcovered, *cpu_map);
|
||||
cpus_and(tmp, tmp, domainspan);
|
||||
if (cpus_empty(tmp))
|
||||
break;
|
||||
|
@ -293,7 +285,7 @@ void __devinit arch_init_sched_domains(void)
|
|||
#endif
|
||||
|
||||
/* Calculate CPU power for physical packages and nodes */
|
||||
for_each_cpu_mask(i, cpu_default_map) {
|
||||
for_each_cpu_mask(i, *cpu_map) {
|
||||
int power;
|
||||
struct sched_domain *sd;
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
|
@ -359,13 +351,35 @@ next_sg:
|
|||
cpu_attach_domain(sd, i);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
||||
*/
|
||||
void arch_init_sched_domains(const cpumask_t *cpu_map)
|
||||
{
|
||||
cpumask_t cpu_default_map;
|
||||
|
||||
void __devinit arch_destroy_sched_domains(void)
|
||||
/*
|
||||
* Setup mask for cpus without special case scheduling requirements.
|
||||
* For now this just excludes isolated cpus, but could be used to
|
||||
* exclude other special cases in the future.
|
||||
*/
|
||||
cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
|
||||
|
||||
build_sched_domains(&cpu_default_map);
|
||||
}
|
||||
|
||||
void arch_destroy_sched_domains(const cpumask_t *cpu_map)
|
||||
{
|
||||
#ifdef CONFIG_NUMA
|
||||
int i;
|
||||
for (i = 0; i < MAX_NUMNODES; i++) {
|
||||
cpumask_t nodemask = node_to_cpumask(i);
|
||||
struct sched_group *oldsg, *sg = sched_group_nodes[i];
|
||||
|
||||
cpus_and(nodemask, nodemask, *cpu_map);
|
||||
if (cpus_empty(nodemask))
|
||||
continue;
|
||||
|
||||
if (sg == NULL)
|
||||
continue;
|
||||
sg = sg->next;
|
||||
|
|
Loading…
Reference in a new issue