[PATCH] x86-64: Account for module percpu space separately from kernel percpu

Rather than using a single constant PERCPU_ENOUGH_ROOM, compute it as
the sum of kernel_percpu + PERCPU_MODULE_RESERVE.  This is now common
to all architectures; if an architecture wants to set
PERCPU_ENOUGH_ROOM to something special, then it may do so (ia64 is
the only one which does).

Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Andi Kleen <ak@suse.de>
This commit is contained in:
Jeremy Fitzhardinge 2007-05-02 19:27:11 +02:00 committed by Andi Kleen
parent bbba11c35b
commit b00742d399
5 changed files with 9 additions and 36 deletions

View file

@ -1,20 +1,6 @@
#ifndef __ALPHA_PERCPU_H #ifndef __ALPHA_PERCPU_H
#define __ALPHA_PERCPU_H #define __ALPHA_PERCPU_H
/*
* Increase the per cpu area for Alpha so that
* modules using percpu area can load.
*/
#ifdef CONFIG_MODULES
# define PERCPU_MODULE_RESERVE 8192
#else
# define PERCPU_MODULE_RESERVE 0
#endif
#define PERCPU_ENOUGH_ROOM \
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
PERCPU_MODULE_RESERVE)
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */ #endif /* __ALPHA_PERCPU_H */

View file

@ -5,16 +5,6 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_MODULES
# define PERCPU_MODULE_RESERVE 8192
#else
# define PERCPU_MODULE_RESERVE 0
#endif
#define PERCPU_ENOUGH_ROOM \
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
PERCPU_MODULE_RESERVE)
extern void setup_per_cpu_areas(void); extern void setup_per_cpu_areas(void);
extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_base;

View file

@ -11,16 +11,6 @@
#include <asm/pda.h> #include <asm/pda.h>
#ifdef CONFIG_MODULES
# define PERCPU_MODULE_RESERVE 8192
#else
# define PERCPU_MODULE_RESERVE 0
#endif
#define PERCPU_ENOUGH_ROOM \
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
PERCPU_MODULE_RESERVE)
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
#define __my_cpu_offset() read_pda(data_offset) #define __my_cpu_offset() read_pda(data_offset)

View file

@ -11,9 +11,16 @@
/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
#ifndef PERCPU_ENOUGH_ROOM #ifndef PERCPU_ENOUGH_ROOM
#define PERCPU_ENOUGH_ROOM 32768 #ifdef CONFIG_MODULES
#define PERCPU_MODULE_RESERVE 8192
#else
#define PERCPU_MODULE_RESERVE 0
#endif #endif
#define PERCPU_ENOUGH_ROOM \
(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
#endif /* PERCPU_ENOUGH_ROOM */
/* /*
* Must be an lvalue. Since @var must be a simple identifier, * Must be an lvalue. Since @var must be a simple identifier,
* we force a syntax error here if it isn't. * we force a syntax error here if it isn't.

View file

@ -430,7 +430,7 @@ static int percpu_modinit(void)
pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated, pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
GFP_KERNEL); GFP_KERNEL);
/* Static in-kernel percpu data (used). */ /* Static in-kernel percpu data (used). */
pcpu_size[0] = -ALIGN(__per_cpu_end-__per_cpu_start, SMP_CACHE_BYTES); pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
/* Free room. */ /* Free room. */
pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0]; pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
if (pcpu_size[1] < 0) { if (pcpu_size[1] < 0) {