mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Safer nr_node_ids and nr_node_ids determination and initial values
The nr_cpu_ids value is currently only calculated in smp_init. However, it may be needed before (SLUB needs it on kmem_cache_init!) and other kernel components may also want to allocate dynamically sized per cpu array before smp_init. So move the determination of possible cpus into sched_init() where we already loop over all possible cpus early in boot. Also initialize both nr_node_ids and nr_cpu_ids with the highest value they could take. If we have accidental users before these values are determined then the current valud of 0 may cause too small per cpu and per node arrays to be allocated. If it is set to the maximum possible then we only waste some memory for early boot users. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
aee16b3cee
commit
476f35348e
4 changed files with 9 additions and 9 deletions
|
@ -384,11 +384,6 @@ static void __init setup_per_cpu_areas(void)
|
|||
static void __init smp_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned highest = 0;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
highest = cpu;
|
||||
nr_cpu_ids = highest + 1;
|
||||
|
||||
/* FIXME: This should be done in userspace --RR */
|
||||
for_each_present_cpu(cpu) {
|
||||
|
|
|
@ -5244,6 +5244,11 @@ int __init migration_init(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/* Number of possible processor ids */
|
||||
int nr_cpu_ids __read_mostly = NR_CPUS;
|
||||
EXPORT_SYMBOL(nr_cpu_ids);
|
||||
|
||||
#undef SCHED_DOMAIN_DEBUG
|
||||
#ifdef SCHED_DOMAIN_DEBUG
|
||||
static void sched_domain_debug(struct sched_domain *sd, int cpu)
|
||||
|
@ -6726,6 +6731,7 @@ int in_sched_functions(unsigned long addr)
|
|||
void __init sched_init(void)
|
||||
{
|
||||
int i, j, k;
|
||||
int highest_cpu = 0;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct prio_array *array;
|
||||
|
@ -6760,11 +6766,13 @@ void __init sched_init(void)
|
|||
// delimiter for bitsearch
|
||||
__set_bit(MAX_PRIO, array->bitmap);
|
||||
}
|
||||
highest_cpu = i;
|
||||
}
|
||||
|
||||
set_load_weight(&init_task);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
nr_cpu_ids = highest_cpu + 1;
|
||||
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -15,9 +15,6 @@ int __next_cpu(int n, const cpumask_t *srcp)
|
|||
}
|
||||
EXPORT_SYMBOL(__next_cpu);
|
||||
|
||||
int nr_cpu_ids;
|
||||
EXPORT_SYMBOL(nr_cpu_ids);
|
||||
|
||||
int __any_online_cpu(const cpumask_t *mask)
|
||||
{
|
||||
int cpu;
|
||||
|
|
|
@ -665,7 +665,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|||
}
|
||||
|
||||
#if MAX_NUMNODES > 1
|
||||
int nr_node_ids __read_mostly;
|
||||
int nr_node_ids __read_mostly = MAX_NUMNODES;
|
||||
EXPORT_SYMBOL(nr_node_ids);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue