mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 03:06:10 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits) m68k: rename global variable vmalloc_end to m68k_vmalloc_end percpu: add missing per_cpu_ptr_to_phys() definition for UP percpu: Fix kdump failure if booted with percpu_alloc=page percpu: make misc percpu symbols unique percpu: make percpu symbols in ia64 unique percpu: make percpu symbols in powerpc unique percpu: make percpu symbols in x86 unique percpu: make percpu symbols in xen unique percpu: make percpu symbols in cpufreq unique percpu: make percpu symbols in oprofile unique percpu: make percpu symbols in tracer unique percpu: make percpu symbols under kernel/ and mm/ unique percpu: remove some sparse warnings percpu: make alloc_percpu() handle array types vmalloc: fix use of non-existent percpu variable in put_cpu_var() this_cpu: Use this_cpu_xx in trace_functions_graph.c this_cpu: Use this_cpu_xx for ftrace this_cpu: Use this_cpu_xx in nmi handling this_cpu: Use this_cpu operations in RCU this_cpu: Use this_cpu ops for VM statistics ... Fix up trivial (famous last words) global per-cpu naming conflicts in arch/x86/kvm/svm.c mm/slab.c
This commit is contained in:
commit
d0316554d3
79 changed files with 1222 additions and 978 deletions
|
@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
|
||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config HAVE_LEGACY_PER_CPU_AREA
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config HAVE_SETUP_PER_CPU_AREA
|
config HAVE_SETUP_PER_CPU_AREA
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
||||||
extern unsigned long vmalloc_end;
|
extern unsigned long VMALLOC_END;
|
||||||
extern struct page *vmem_map;
|
extern struct page *vmem_map;
|
||||||
extern int find_largest_hole(u64 start, u64 end, void *arg);
|
extern int find_largest_hole(u64 start, u64 end, void *arg);
|
||||||
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
|
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
|
||||||
|
|
|
@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
|
||||||
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
|
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
|
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
|
||||||
# define VMALLOC_END vmalloc_end
|
extern unsigned long VMALLOC_END;
|
||||||
extern unsigned long vmalloc_end;
|
|
||||||
#else
|
#else
|
||||||
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
|
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||||
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
|
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
|
||||||
|
|
|
@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
|
DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The "local" data variable. It refers to the per-CPU data of the currently executing
|
* The "local" data variable. It refers to the per-CPU data of the currently executing
|
||||||
|
@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
|
||||||
* Do not use the address of local_cpu_data, since it will be different from
|
* Do not use the address of local_cpu_data, since it will be different from
|
||||||
* cpu_data(smp_processor_id())!
|
* cpu_data(smp_processor_id())!
|
||||||
*/
|
*/
|
||||||
#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
|
#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
|
||||||
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
|
#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
|
||||||
|
|
||||||
extern void print_cpu_info (struct cpuinfo_ia64 *);
|
extern void print_cpu_info (struct cpuinfo_ia64 *);
|
||||||
|
|
||||||
|
|
|
@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
|
||||||
printk(KERN_ERR PREFIX
|
printk(KERN_ERR PREFIX
|
||||||
"Error parsing MADT - no LAPIC entries\n");
|
"Error parsing MADT - no LAPIC entries\n");
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
if (available_cpus == 0) {
|
||||||
|
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
|
||||||
|
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
|
||||||
|
smp_boot_data.cpu_phys_id[available_cpus] =
|
||||||
|
hard_smp_processor_id();
|
||||||
|
available_cpus = 1; /* We've got at least one of these, no? */
|
||||||
|
}
|
||||||
|
smp_boot_data.cpu_count = available_cpus;
|
||||||
|
#endif
|
||||||
|
/* Make boot-up look pretty */
|
||||||
|
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
|
||||||
|
total_cpus);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int __init acpi_boot_init(void)
|
int __init acpi_boot_init(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
|
||||||
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
|
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
|
||||||
printk(KERN_ERR PREFIX "Can't find FADT\n");
|
printk(KERN_ERR PREFIX "Can't find FADT\n");
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
if (available_cpus == 0) {
|
|
||||||
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
|
|
||||||
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
|
|
||||||
smp_boot_data.cpu_phys_id[available_cpus] =
|
|
||||||
hard_smp_processor_id();
|
|
||||||
available_cpus = 1; /* We've got at least one of these, no? */
|
|
||||||
}
|
|
||||||
smp_boot_data.cpu_count = available_cpus;
|
|
||||||
|
|
||||||
smp_build_cpu_map();
|
|
||||||
#ifdef CONFIG_ACPI_NUMA
|
#ifdef CONFIG_ACPI_NUMA
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
if (srat_num_cpus == 0) {
|
if (srat_num_cpus == 0) {
|
||||||
int cpu, i = 1;
|
int cpu, i = 1;
|
||||||
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
|
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
|
||||||
|
@ -790,13 +792,8 @@ int __init acpi_boot_init(void)
|
||||||
smp_boot_data.cpu_phys_id[cpu];
|
smp_boot_data.cpu_phys_id[cpu];
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_ACPI_NUMA
|
|
||||||
build_cpu_to_node_map();
|
build_cpu_to_node_map();
|
||||||
#endif
|
#endif
|
||||||
/* Make boot-up look pretty */
|
|
||||||
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
|
|
||||||
total_cpus);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
|
||||||
* intermediate precision so that we can produce a full 64-bit result.
|
* intermediate precision so that we can produce a full 64-bit result.
|
||||||
*/
|
*/
|
||||||
GLOBAL_ENTRY(ia64_native_sched_clock)
|
GLOBAL_ENTRY(ia64_native_sched_clock)
|
||||||
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||||
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
|
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
|
||||||
;;
|
;;
|
||||||
ldf8 f8=[r8]
|
ldf8 f8=[r8]
|
||||||
|
@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||||
GLOBAL_ENTRY(cycle_to_cputime)
|
GLOBAL_ENTRY(cycle_to_cputime)
|
||||||
alloc r16=ar.pfs,1,0,0,0
|
alloc r16=ar.pfs,1,0,0,0
|
||||||
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||||
;;
|
;;
|
||||||
ldf8 f8=[r8]
|
ldf8 f8=[r8]
|
||||||
;;
|
;;
|
||||||
|
|
|
@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
EXPORT_SYMBOL(per_cpu__cpu_info);
|
EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
|
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -59,7 +59,7 @@
|
||||||
ia64_do_tlb_purge:
|
ia64_do_tlb_purge:
|
||||||
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
||||||
|
|
||||||
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
|
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
|
||||||
;;
|
;;
|
||||||
addl r17=O(PTCE_STRIDE),r2
|
addl r17=O(PTCE_STRIDE),r2
|
||||||
addl r2=O(PTCE_BASE),r2
|
addl r2=O(PTCE_BASE),r2
|
||||||
|
|
|
@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
|
||||||
|
|
||||||
// purge all TC entries
|
// purge all TC entries
|
||||||
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
||||||
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
|
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
|
||||||
;;
|
;;
|
||||||
addl r17=O(PTCE_STRIDE),r2
|
addl r17=O(PTCE_STRIDE),r2
|
||||||
addl r2=O(PTCE_BASE),r2
|
addl r2=O(PTCE_BASE),r2
|
||||||
|
|
|
@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
|
||||||
EXPORT_SYMBOL(__per_cpu_offset);
|
EXPORT_SYMBOL(__per_cpu_offset);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
|
DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
|
||||||
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
|
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
|
||||||
unsigned long ia64_cycles_per_usec;
|
unsigned long ia64_cycles_per_usec;
|
||||||
struct ia64_boot_param *ia64_boot_param;
|
struct ia64_boot_param *ia64_boot_param;
|
||||||
|
@ -573,12 +573,11 @@ setup_arch (char **cmdline_p)
|
||||||
32 : cpus_weight(early_cpu_possible_map)),
|
32 : cpus_weight(early_cpu_possible_map)),
|
||||||
additional_cpus > 0 ? additional_cpus : 0);
|
additional_cpus > 0 ? additional_cpus : 0);
|
||||||
# endif
|
# endif
|
||||||
#else
|
|
||||||
# ifdef CONFIG_SMP
|
|
||||||
smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
|
|
||||||
# endif
|
|
||||||
#endif /* CONFIG_APCI_BOOT */
|
#endif /* CONFIG_APCI_BOOT */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
smp_build_cpu_map();
|
||||||
|
#endif
|
||||||
find_memory();
|
find_memory();
|
||||||
|
|
||||||
/* process SAL system table: */
|
/* process SAL system table: */
|
||||||
|
@ -855,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
|
||||||
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
|
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* In UP configuration, setup_per_cpu_areas() is defined in
|
|
||||||
* include/linux/percpu.h
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
void __init
|
|
||||||
setup_per_cpu_areas (void)
|
|
||||||
{
|
|
||||||
/* start_kernel() requires this... */
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do the following calculations:
|
* Do the following calculations:
|
||||||
*
|
*
|
||||||
|
@ -980,7 +967,7 @@ cpu_init (void)
|
||||||
* depends on the data returned by identify_cpu(). We break the dependency by
|
* depends on the data returned by identify_cpu(). We break the dependency by
|
||||||
* accessing cpu_data() through the canonical per-CPU address.
|
* accessing cpu_data() through the canonical per-CPU address.
|
||||||
*/
|
*/
|
||||||
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
|
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
|
||||||
identify_cpu(cpu_info);
|
identify_cpu(cpu_info);
|
||||||
|
|
||||||
#ifdef CONFIG_MCKINLEY
|
#ifdef CONFIG_MCKINLEY
|
||||||
|
|
|
@ -166,6 +166,12 @@ SECTIONS
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||||
|
__cpu0_per_cpu = .;
|
||||||
|
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
||||||
|
#endif
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
__init_end = .;
|
__init_end = .;
|
||||||
|
|
||||||
|
@ -198,11 +204,6 @@ SECTIONS
|
||||||
data : { } :data
|
data : { } :data
|
||||||
.data : AT(ADDR(.data) - LOAD_OFFSET)
|
.data : AT(ADDR(.data) - LOAD_OFFSET)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
|
||||||
__cpu0_per_cpu = .;
|
|
||||||
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
|
||||||
#endif
|
|
||||||
INIT_TASK_DATA(PAGE_SIZE)
|
INIT_TASK_DATA(PAGE_SIZE)
|
||||||
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
|
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
|
||||||
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
|
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
|
||||||
|
|
|
@ -154,38 +154,99 @@ static void *cpu_data;
|
||||||
void * __cpuinit
|
void * __cpuinit
|
||||||
per_cpu_init (void)
|
per_cpu_init (void)
|
||||||
{
|
{
|
||||||
int cpu;
|
static bool first_time = true;
|
||||||
static int first_time=1;
|
void *cpu0_data = __cpu0_per_cpu;
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
if (!first_time)
|
||||||
|
goto skip;
|
||||||
|
first_time = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get_free_pages() cannot be used before cpu_init() done. BSP
|
* get_free_pages() cannot be used before cpu_init() done.
|
||||||
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
|
* BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
|
||||||
* get_zeroed_page().
|
* to avoid that AP calls get_zeroed_page().
|
||||||
*/
|
*/
|
||||||
if (first_time) {
|
for_each_possible_cpu(cpu) {
|
||||||
void *cpu0_data = __cpu0_per_cpu;
|
void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
|
||||||
|
|
||||||
first_time=0;
|
memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
|
||||||
|
|
||||||
__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
|
|
||||||
per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
|
|
||||||
|
|
||||||
for (cpu = 1; cpu < NR_CPUS; cpu++) {
|
|
||||||
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
|
|
||||||
__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
|
__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
|
||||||
cpu_data += PERCPU_PAGE_SIZE;
|
|
||||||
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* percpu area for cpu0 is moved from the __init area
|
||||||
|
* which is setup by head.S and used till this point.
|
||||||
|
* Update ar.k3. This move is ensures that percpu
|
||||||
|
* area for cpu0 is on the correct node and its
|
||||||
|
* virtual address isn't insanely far from other
|
||||||
|
* percpu areas which is important for congruent
|
||||||
|
* percpu allocator.
|
||||||
|
*/
|
||||||
|
if (cpu == 0)
|
||||||
|
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
|
||||||
|
(unsigned long)__per_cpu_start);
|
||||||
|
|
||||||
|
cpu_data += PERCPU_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
skip:
|
||||||
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
alloc_per_cpu_data(void)
|
alloc_per_cpu_data(void)
|
||||||
{
|
{
|
||||||
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
|
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
|
||||||
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* setup_per_cpu_areas - setup percpu areas
|
||||||
|
*
|
||||||
|
* Arch code has already allocated and initialized percpu areas. All
|
||||||
|
* this function has to do is to teach the determined layout to the
|
||||||
|
* dynamic percpu allocator, which happens to be more complex than
|
||||||
|
* creating whole new ones using helpers.
|
||||||
|
*/
|
||||||
|
void __init
|
||||||
|
setup_per_cpu_areas(void)
|
||||||
|
{
|
||||||
|
struct pcpu_alloc_info *ai;
|
||||||
|
struct pcpu_group_info *gi;
|
||||||
|
unsigned int cpu;
|
||||||
|
ssize_t static_size, reserved_size, dyn_size;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
|
||||||
|
if (!ai)
|
||||||
|
panic("failed to allocate pcpu_alloc_info");
|
||||||
|
gi = &ai->groups[0];
|
||||||
|
|
||||||
|
/* units are assigned consecutively to possible cpus */
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
gi->cpu_map[gi->nr_units++] = cpu;
|
||||||
|
|
||||||
|
/* set parameters */
|
||||||
|
static_size = __per_cpu_end - __per_cpu_start;
|
||||||
|
reserved_size = PERCPU_MODULE_RESERVE;
|
||||||
|
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
|
||||||
|
if (dyn_size < 0)
|
||||||
|
panic("percpu area overflow static=%zd reserved=%zd\n",
|
||||||
|
static_size, reserved_size);
|
||||||
|
|
||||||
|
ai->static_size = static_size;
|
||||||
|
ai->reserved_size = reserved_size;
|
||||||
|
ai->dyn_size = dyn_size;
|
||||||
|
ai->unit_size = PERCPU_PAGE_SIZE;
|
||||||
|
ai->atom_size = PAGE_SIZE;
|
||||||
|
ai->alloc_size = PERCPU_PAGE_SIZE;
|
||||||
|
|
||||||
|
rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
|
||||||
|
if (rc)
|
||||||
|
panic("failed to setup percpu area (err=%d)", rc);
|
||||||
|
|
||||||
|
pcpu_free_alloc_info(ai);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
#define alloc_per_cpu_data() do { } while (0)
|
#define alloc_per_cpu_data() do { } while (0)
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -270,8 +331,8 @@ paging_init (void)
|
||||||
|
|
||||||
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
||||||
sizeof(struct page));
|
sizeof(struct page));
|
||||||
vmalloc_end -= map_size;
|
VMALLOC_END -= map_size;
|
||||||
vmem_map = (struct page *) vmalloc_end;
|
vmem_map = (struct page *) VMALLOC_END;
|
||||||
efi_memmap_walk(create_mem_map_page_table, NULL);
|
efi_memmap_walk(create_mem_map_page_table, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_early_cpu(cpu) {
|
for_each_possible_early_cpu(cpu) {
|
||||||
if (cpu == 0) {
|
void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
|
||||||
void *cpu0_data = __cpu0_per_cpu;
|
|
||||||
__per_cpu_offset[cpu] = (char*)cpu0_data -
|
if (node != node_cpuid[cpu].nid)
|
||||||
__per_cpu_start;
|
continue;
|
||||||
} else if (node == node_cpuid[cpu].nid) {
|
|
||||||
memcpy(__va(cpu_data), __phys_per_cpu_start,
|
memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
|
||||||
__per_cpu_end - __per_cpu_start);
|
|
||||||
__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
|
__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
|
||||||
__per_cpu_start;
|
__per_cpu_start;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* percpu area for cpu0 is moved from the __init area
|
||||||
|
* which is setup by head.S and used till this point.
|
||||||
|
* Update ar.k3. This move is ensures that percpu
|
||||||
|
* area for cpu0 is on the correct node and its
|
||||||
|
* virtual address isn't insanely far from other
|
||||||
|
* percpu areas which is important for congruent
|
||||||
|
* percpu allocator.
|
||||||
|
*/
|
||||||
|
if (cpu == 0)
|
||||||
|
ia64_set_kr(IA64_KR_PER_CPU_DATA,
|
||||||
|
(unsigned long)cpu_data -
|
||||||
|
(unsigned long)__per_cpu_start);
|
||||||
|
|
||||||
cpu_data += PERCPU_PAGE_SIZE;
|
cpu_data += PERCPU_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
return cpu_data;
|
return cpu_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/**
|
||||||
|
* setup_per_cpu_areas - setup percpu areas
|
||||||
|
*
|
||||||
|
* Arch code has already allocated and initialized percpu areas. All
|
||||||
|
* this function has to do is to teach the determined layout to the
|
||||||
|
* dynamic percpu allocator, which happens to be more complex than
|
||||||
|
* creating whole new ones using helpers.
|
||||||
|
*/
|
||||||
|
void __init setup_per_cpu_areas(void)
|
||||||
|
{
|
||||||
|
struct pcpu_alloc_info *ai;
|
||||||
|
struct pcpu_group_info *uninitialized_var(gi);
|
||||||
|
unsigned int *cpu_map;
|
||||||
|
void *base;
|
||||||
|
unsigned long base_offset;
|
||||||
|
unsigned int cpu;
|
||||||
|
ssize_t static_size, reserved_size, dyn_size;
|
||||||
|
int node, prev_node, unit, nr_units, rc;
|
||||||
|
|
||||||
|
ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
|
||||||
|
if (!ai)
|
||||||
|
panic("failed to allocate pcpu_alloc_info");
|
||||||
|
cpu_map = ai->groups[0].cpu_map;
|
||||||
|
|
||||||
|
/* determine base */
|
||||||
|
base = (void *)ULONG_MAX;
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
base = min(base,
|
||||||
|
(void *)(__per_cpu_offset[cpu] + __per_cpu_start));
|
||||||
|
base_offset = (void *)__per_cpu_start - base;
|
||||||
|
|
||||||
|
/* build cpu_map, units are grouped by node */
|
||||||
|
unit = 0;
|
||||||
|
for_each_node(node)
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
if (node == node_cpuid[cpu].nid)
|
||||||
|
cpu_map[unit++] = cpu;
|
||||||
|
nr_units = unit;
|
||||||
|
|
||||||
|
/* set basic parameters */
|
||||||
|
static_size = __per_cpu_end - __per_cpu_start;
|
||||||
|
reserved_size = PERCPU_MODULE_RESERVE;
|
||||||
|
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
|
||||||
|
if (dyn_size < 0)
|
||||||
|
panic("percpu area overflow static=%zd reserved=%zd\n",
|
||||||
|
static_size, reserved_size);
|
||||||
|
|
||||||
|
ai->static_size = static_size;
|
||||||
|
ai->reserved_size = reserved_size;
|
||||||
|
ai->dyn_size = dyn_size;
|
||||||
|
ai->unit_size = PERCPU_PAGE_SIZE;
|
||||||
|
ai->atom_size = PAGE_SIZE;
|
||||||
|
ai->alloc_size = PERCPU_PAGE_SIZE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPUs are put into groups according to node. Walk cpu_map
|
||||||
|
* and create new groups at node boundaries.
|
||||||
|
*/
|
||||||
|
prev_node = -1;
|
||||||
|
ai->nr_groups = 0;
|
||||||
|
for (unit = 0; unit < nr_units; unit++) {
|
||||||
|
cpu = cpu_map[unit];
|
||||||
|
node = node_cpuid[cpu].nid;
|
||||||
|
|
||||||
|
if (node == prev_node) {
|
||||||
|
gi->nr_units++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
prev_node = node;
|
||||||
|
|
||||||
|
gi = &ai->groups[ai->nr_groups++];
|
||||||
|
gi->nr_units = 1;
|
||||||
|
gi->base_offset = __per_cpu_offset[cpu] + base_offset;
|
||||||
|
gi->cpu_map = &cpu_map[unit];
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = pcpu_setup_first_chunk(ai, base);
|
||||||
|
if (rc)
|
||||||
|
panic("failed to setup percpu area (err=%d)", rc);
|
||||||
|
|
||||||
|
pcpu_free_alloc_info(ai);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fill_pernode - initialize pernode data.
|
* fill_pernode - initialize pernode data.
|
||||||
* @node: the node id.
|
* @node: the node id.
|
||||||
|
@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
|
||||||
/* Set the node_data pointer for each per-cpu struct */
|
/* Set the node_data pointer for each per-cpu struct */
|
||||||
for_each_possible_early_cpu(cpu) {
|
for_each_possible_early_cpu(cpu) {
|
||||||
node = node_cpuid[cpu].nid;
|
node = node_cpuid[cpu].nid;
|
||||||
per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
|
per_cpu(ia64_cpu_info, cpu).node_data =
|
||||||
|
mem_data[node].node_data;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
{
|
{
|
||||||
|
@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
|
||||||
cpu = 0;
|
cpu = 0;
|
||||||
node = node_cpuid[cpu].nid;
|
node = node_cpuid[cpu].nid;
|
||||||
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
|
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
|
||||||
((char *)&per_cpu__cpu_info - __per_cpu_start));
|
((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
|
||||||
cpu0_cpu_info->node_data = mem_data[node].node_data;
|
cpu0_cpu_info->node_data = mem_data[node].node_data;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
@ -666,9 +765,9 @@ void __init paging_init(void)
|
||||||
sparse_init();
|
sparse_init();
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
||||||
sizeof(struct page));
|
sizeof(struct page));
|
||||||
vmem_map = (struct page *) vmalloc_end;
|
vmem_map = (struct page *) VMALLOC_END;
|
||||||
efi_memmap_walk(create_mem_map_page_table, NULL);
|
efi_memmap_walk(create_mem_map_page_table, NULL);
|
||||||
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
|
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
|
||||||
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
|
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
|
||||||
|
|
||||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||||
unsigned long vmalloc_end = VMALLOC_END_INIT;
|
unsigned long VMALLOC_END = VMALLOC_END_INIT;
|
||||||
EXPORT_SYMBOL(vmalloc_end);
|
EXPORT_SYMBOL(VMALLOC_END);
|
||||||
struct page *vmem_map;
|
struct page *vmem_map;
|
||||||
EXPORT_SYMBOL(vmem_map);
|
EXPORT_SYMBOL(vmem_map);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
|
||||||
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
|
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
|
||||||
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
|
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
|
||||||
stat->deadlocks,
|
stat->deadlocks,
|
||||||
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
|
||||||
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
|
||||||
1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
|
1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
|
||||||
stat->shub_ptc_flushes_not_my_mm,
|
stat->shub_ptc_flushes_not_my_mm,
|
||||||
stat->deadlocks2,
|
stat->deadlocks2,
|
||||||
stat->shub_ipi_flushes,
|
stat->shub_ipi_flushes,
|
||||||
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
|
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, timer_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
|
||||||
static DEFINE_PER_CPU(int, ipi_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
|
||||||
static DEFINE_PER_CPU(int, resched_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
|
||||||
static DEFINE_PER_CPU(int, cmc_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
|
||||||
static DEFINE_PER_CPU(int, cmcp_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
|
||||||
static DEFINE_PER_CPU(int, cpep_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
|
||||||
#define NAME_SIZE 15
|
#define NAME_SIZE 15
|
||||||
static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
|
static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
|
||||||
static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
|
static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
|
||||||
static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
|
static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
|
||||||
static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
|
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
|
||||||
static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
|
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
|
||||||
static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
|
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
|
||||||
#undef NAME_SIZE
|
#undef NAME_SIZE
|
||||||
|
|
||||||
struct saved_irq {
|
struct saved_irq {
|
||||||
|
@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
|
||||||
if (xen_slab_ready) {
|
if (xen_slab_ready) {
|
||||||
switch (vec) {
|
switch (vec) {
|
||||||
case IA64_TIMER_VECTOR:
|
case IA64_TIMER_VECTOR:
|
||||||
snprintf(per_cpu(timer_name, cpu),
|
snprintf(per_cpu(xen_timer_name, cpu),
|
||||||
sizeof(per_cpu(timer_name, cpu)),
|
sizeof(per_cpu(xen_timer_name, cpu)),
|
||||||
"%s%d", action->name, cpu);
|
"%s%d", action->name, cpu);
|
||||||
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
|
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
|
||||||
action->handler, action->flags,
|
action->handler, action->flags,
|
||||||
per_cpu(timer_name, cpu), action->dev_id);
|
per_cpu(xen_timer_name, cpu), action->dev_id);
|
||||||
per_cpu(timer_irq, cpu) = irq;
|
per_cpu(xen_timer_irq, cpu) = irq;
|
||||||
break;
|
break;
|
||||||
case IA64_IPI_RESCHEDULE:
|
case IA64_IPI_RESCHEDULE:
|
||||||
snprintf(per_cpu(resched_name, cpu),
|
snprintf(per_cpu(xen_resched_name, cpu),
|
||||||
sizeof(per_cpu(resched_name, cpu)),
|
sizeof(per_cpu(xen_resched_name, cpu)),
|
||||||
"%s%d", action->name, cpu);
|
"%s%d", action->name, cpu);
|
||||||
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
|
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
|
||||||
action->handler, action->flags,
|
action->handler, action->flags,
|
||||||
per_cpu(resched_name, cpu), action->dev_id);
|
per_cpu(xen_resched_name, cpu), action->dev_id);
|
||||||
per_cpu(resched_irq, cpu) = irq;
|
per_cpu(xen_resched_irq, cpu) = irq;
|
||||||
break;
|
break;
|
||||||
case IA64_IPI_VECTOR:
|
case IA64_IPI_VECTOR:
|
||||||
snprintf(per_cpu(ipi_name, cpu),
|
snprintf(per_cpu(xen_ipi_name, cpu),
|
||||||
sizeof(per_cpu(ipi_name, cpu)),
|
sizeof(per_cpu(xen_ipi_name, cpu)),
|
||||||
"%s%d", action->name, cpu);
|
"%s%d", action->name, cpu);
|
||||||
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
|
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
|
||||||
action->handler, action->flags,
|
action->handler, action->flags,
|
||||||
per_cpu(ipi_name, cpu), action->dev_id);
|
per_cpu(xen_ipi_name, cpu), action->dev_id);
|
||||||
per_cpu(ipi_irq, cpu) = irq;
|
per_cpu(xen_ipi_irq, cpu) = irq;
|
||||||
break;
|
break;
|
||||||
case IA64_CMC_VECTOR:
|
case IA64_CMC_VECTOR:
|
||||||
snprintf(per_cpu(cmc_name, cpu),
|
snprintf(per_cpu(xen_cmc_name, cpu),
|
||||||
sizeof(per_cpu(cmc_name, cpu)),
|
sizeof(per_cpu(xen_cmc_name, cpu)),
|
||||||
"%s%d", action->name, cpu);
|
"%s%d", action->name, cpu);
|
||||||
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
|
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
|
||||||
action->handler,
|
action->handler,
|
||||||
action->flags,
|
action->flags,
|
||||||
per_cpu(cmc_name, cpu),
|
per_cpu(xen_cmc_name, cpu),
|
||||||
action->dev_id);
|
action->dev_id);
|
||||||
per_cpu(cmc_irq, cpu) = irq;
|
per_cpu(xen_cmc_irq, cpu) = irq;
|
||||||
break;
|
break;
|
||||||
case IA64_CMCP_VECTOR:
|
case IA64_CMCP_VECTOR:
|
||||||
snprintf(per_cpu(cmcp_name, cpu),
|
snprintf(per_cpu(xen_cmcp_name, cpu),
|
||||||
sizeof(per_cpu(cmcp_name, cpu)),
|
sizeof(per_cpu(xen_cmcp_name, cpu)),
|
||||||
"%s%d", action->name, cpu);
|
"%s%d", action->name, cpu);
|
||||||
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
|
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
|
||||||
action->handler,
|
action->handler,
|
||||||
action->flags,
|
action->flags,
|
||||||
per_cpu(cmcp_name, cpu),
|
per_cpu(xen_cmcp_name, cpu),
|
||||||
action->dev_id);
|
action->dev_id);
|
||||||
per_cpu(cmcp_irq, cpu) = irq;
|
per_cpu(xen_cmcp_irq, cpu) = irq;
|
||||||
break;
|
break;
|
||||||
case IA64_CPEP_VECTOR:
|
case IA64_CPEP_VECTOR:
|
||||||
snprintf(per_cpu(cpep_name, cpu),
|
snprintf(per_cpu(xen_cpep_name, cpu),
|
||||||
sizeof(per_cpu(cpep_name, cpu)),
|
sizeof(per_cpu(xen_cpep_name, cpu)),
|
||||||
"%s%d", action->name, cpu);
|
"%s%d", action->name, cpu);
|
||||||
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
|
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
|
||||||
action->handler,
|
action->handler,
|
||||||
action->flags,
|
action->flags,
|
||||||
per_cpu(cpep_name, cpu),
|
per_cpu(xen_cpep_name, cpu),
|
||||||
action->dev_id);
|
action->dev_id);
|
||||||
per_cpu(cpep_irq, cpu) = irq;
|
per_cpu(xen_cpep_irq, cpu) = irq;
|
||||||
break;
|
break;
|
||||||
case IA64_CPE_VECTOR:
|
case IA64_CPE_VECTOR:
|
||||||
case IA64_MCA_RENDEZ_VECTOR:
|
case IA64_MCA_RENDEZ_VECTOR:
|
||||||
|
@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
|
||||||
|
|
||||||
if (action == CPU_DEAD) {
|
if (action == CPU_DEAD) {
|
||||||
/* Unregister evtchn. */
|
/* Unregister evtchn. */
|
||||||
if (per_cpu(cpep_irq, cpu) >= 0) {
|
if (per_cpu(xen_cpep_irq, cpu) >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
|
||||||
per_cpu(cpep_irq, cpu) = -1;
|
|
||||||
}
|
|
||||||
if (per_cpu(cmcp_irq, cpu) >= 0) {
|
|
||||||
unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
|
|
||||||
per_cpu(cmcp_irq, cpu) = -1;
|
|
||||||
}
|
|
||||||
if (per_cpu(cmc_irq, cpu) >= 0) {
|
|
||||||
unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
|
|
||||||
per_cpu(cmc_irq, cpu) = -1;
|
|
||||||
}
|
|
||||||
if (per_cpu(ipi_irq, cpu) >= 0) {
|
|
||||||
unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
|
|
||||||
per_cpu(ipi_irq, cpu) = -1;
|
|
||||||
}
|
|
||||||
if (per_cpu(resched_irq, cpu) >= 0) {
|
|
||||||
unbind_from_irqhandler(per_cpu(resched_irq, cpu),
|
|
||||||
NULL);
|
NULL);
|
||||||
per_cpu(resched_irq, cpu) = -1;
|
per_cpu(xen_cpep_irq, cpu) = -1;
|
||||||
}
|
}
|
||||||
if (per_cpu(timer_irq, cpu) >= 0) {
|
if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
|
||||||
unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
|
||||||
per_cpu(timer_irq, cpu) = -1;
|
NULL);
|
||||||
|
per_cpu(xen_cmcp_irq, cpu) = -1;
|
||||||
|
}
|
||||||
|
if (per_cpu(xen_cmc_irq, cpu) >= 0) {
|
||||||
|
unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
|
||||||
|
per_cpu(xen_cmc_irq, cpu) = -1;
|
||||||
|
}
|
||||||
|
if (per_cpu(xen_ipi_irq, cpu) >= 0) {
|
||||||
|
unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
|
||||||
|
per_cpu(xen_ipi_irq, cpu) = -1;
|
||||||
|
}
|
||||||
|
if (per_cpu(xen_resched_irq, cpu) >= 0) {
|
||||||
|
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
|
||||||
|
NULL);
|
||||||
|
per_cpu(xen_resched_irq, cpu) = -1;
|
||||||
|
}
|
||||||
|
if (per_cpu(xen_timer_irq, cpu) >= 0) {
|
||||||
|
unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
|
||||||
|
NULL);
|
||||||
|
per_cpu(xen_timer_irq, cpu) = -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
|
@ -34,15 +34,15 @@
|
||||||
|
|
||||||
#include "../kernel/fsyscall_gtod_data.h"
|
#include "../kernel/fsyscall_gtod_data.h"
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
|
||||||
DEFINE_PER_CPU(unsigned long, processed_stolen_time);
|
static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
|
||||||
DEFINE_PER_CPU(unsigned long, processed_blocked_time);
|
static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
|
||||||
|
|
||||||
/* taken from i386/kernel/time-xen.c */
|
/* taken from i386/kernel/time-xen.c */
|
||||||
static void xen_init_missing_ticks_accounting(int cpu)
|
static void xen_init_missing_ticks_accounting(int cpu)
|
||||||
{
|
{
|
||||||
struct vcpu_register_runstate_memory_area area;
|
struct vcpu_register_runstate_memory_area area;
|
||||||
struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
|
struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
memset(runstate, 0, sizeof(*runstate));
|
memset(runstate, 0, sizeof(*runstate));
|
||||||
|
@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
|
||||||
&area);
|
&area);
|
||||||
WARN_ON(rc && rc != -ENOSYS);
|
WARN_ON(rc && rc != -ENOSYS);
|
||||||
|
|
||||||
per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
|
per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
|
||||||
per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
|
per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
|
||||||
+ runstate->time[RUNSTATE_offline];
|
+ runstate->time[RUNSTATE_offline];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
|
||||||
|
|
||||||
BUG_ON(preemptible());
|
BUG_ON(preemptible());
|
||||||
|
|
||||||
state = &__get_cpu_var(runstate);
|
state = &__get_cpu_var(xen_runstate);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The runstate info is always updated by the hypervisor on
|
* The runstate info is always updated by the hypervisor on
|
||||||
|
@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
|
||||||
* This function just checks and reject this effect.
|
* This function just checks and reject this effect.
|
||||||
*/
|
*/
|
||||||
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
|
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
|
||||||
per_cpu(processed_blocked_time, cpu)))
|
per_cpu(xen_blocked_time, cpu)))
|
||||||
blocked = 0;
|
blocked = 0;
|
||||||
|
|
||||||
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
|
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
|
||||||
runstate.time[RUNSTATE_offline],
|
runstate.time[RUNSTATE_offline],
|
||||||
per_cpu(processed_stolen_time, cpu)))
|
per_cpu(xen_stolen_time, cpu)))
|
||||||
stolen = 0;
|
stolen = 0;
|
||||||
|
|
||||||
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
|
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
|
||||||
|
@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
|
||||||
} else {
|
} else {
|
||||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
local_cpu_data->itm_next = delta_itm + new_itm;
|
||||||
}
|
}
|
||||||
per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
|
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
|
||||||
per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
|
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
|
||||||
}
|
}
|
||||||
return delta_itm;
|
return delta_itm;
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,9 +83,9 @@
|
||||||
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
||||||
#define VMALLOC_END KMAP_START
|
#define VMALLOC_END KMAP_START
|
||||||
#else
|
#else
|
||||||
extern unsigned long vmalloc_end;
|
extern unsigned long m68k_vmalloc_end;
|
||||||
#define VMALLOC_START 0x0f800000
|
#define VMALLOC_START 0x0f800000
|
||||||
#define VMALLOC_END vmalloc_end
|
#define VMALLOC_END m68k_vmalloc_end
|
||||||
#endif /* CONFIG_SUN3 */
|
#endif /* CONFIG_SUN3 */
|
||||||
|
|
||||||
/* zero page used for uninitialized stuff */
|
/* zero page used for uninitialized stuff */
|
||||||
|
|
|
@ -45,8 +45,8 @@
|
||||||
** Globals
|
** Globals
|
||||||
*/
|
*/
|
||||||
|
|
||||||
unsigned long vmalloc_end;
|
unsigned long m68k_vmalloc_end;
|
||||||
EXPORT_SYMBOL(vmalloc_end);
|
EXPORT_SYMBOL(m68k_vmalloc_end);
|
||||||
|
|
||||||
unsigned long pmeg_vaddr[PMEGS_NUM];
|
unsigned long pmeg_vaddr[PMEGS_NUM];
|
||||||
unsigned char pmeg_alloc[PMEGS_NUM];
|
unsigned char pmeg_alloc[PMEGS_NUM];
|
||||||
|
@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
|
||||||
#endif
|
#endif
|
||||||
// the lowest mapping here is the end of our
|
// the lowest mapping here is the end of our
|
||||||
// vmalloc region
|
// vmalloc region
|
||||||
if(!vmalloc_end)
|
if (!m68k_vmalloc_end)
|
||||||
vmalloc_end = seg;
|
m68k_vmalloc_end = seg;
|
||||||
|
|
||||||
// mark the segmap alloc'd, and reserve any
|
// mark the segmap alloc'd, and reserve any
|
||||||
// of the first 0xbff pages the hardware is
|
// of the first 0xbff pages the hardware is
|
||||||
|
|
|
@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
|
||||||
#define KPROBE_HIT_ACTIVE 0x00000001
|
#define KPROBE_HIT_ACTIVE 0x00000001
|
||||||
#define KPROBE_HIT_SS 0x00000002
|
#define KPROBE_HIT_SS 0x00000002
|
||||||
|
|
||||||
static struct kprobe *current_kprobe;
|
static struct kprobe *cur_kprobe;
|
||||||
static unsigned long current_kprobe_orig_pc;
|
static unsigned long cur_kprobe_orig_pc;
|
||||||
static unsigned long current_kprobe_next_pc;
|
static unsigned long cur_kprobe_next_pc;
|
||||||
static int current_kprobe_ss_flags;
|
static int cur_kprobe_ss_flags;
|
||||||
static unsigned long kprobe_status;
|
static unsigned long kprobe_status;
|
||||||
static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
|
static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
|
||||||
static unsigned long current_kprobe_bp_addr;
|
static unsigned long cur_kprobe_bp_addr;
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||||
|
|
||||||
|
@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long nextpc;
|
unsigned long nextpc;
|
||||||
|
|
||||||
current_kprobe_orig_pc = regs->pc;
|
cur_kprobe_orig_pc = regs->pc;
|
||||||
memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
|
memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
|
||||||
regs->pc = (unsigned long) current_kprobe_ss_buf;
|
regs->pc = (unsigned long) cur_kprobe_ss_buf;
|
||||||
|
|
||||||
nextpc = find_nextpc(regs, ¤t_kprobe_ss_flags);
|
nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
|
||||||
if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
|
if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
|
||||||
current_kprobe_next_pc =
|
cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
|
||||||
current_kprobe_orig_pc + (nextpc - regs->pc);
|
|
||||||
else
|
else
|
||||||
current_kprobe_next_pc = nextpc;
|
cur_kprobe_next_pc = nextpc;
|
||||||
|
|
||||||
/* branching instructions need special handling */
|
/* branching instructions need special handling */
|
||||||
if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
|
if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
|
||||||
nextpc = singlestep_branch_setup(regs);
|
nextpc = singlestep_branch_setup(regs);
|
||||||
|
|
||||||
current_kprobe_bp_addr = nextpc;
|
cur_kprobe_bp_addr = nextpc;
|
||||||
|
|
||||||
*(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
|
*(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
|
||||||
mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
|
mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
|
||||||
sizeof(current_kprobe_ss_buf));
|
sizeof(cur_kprobe_ss_buf));
|
||||||
mn10300_icache_inv();
|
mn10300_icache_inv();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||||
disarm_kprobe(p, regs);
|
disarm_kprobe(p, regs);
|
||||||
ret = 1;
|
ret = 1;
|
||||||
} else {
|
} else {
|
||||||
p = current_kprobe;
|
p = cur_kprobe;
|
||||||
if (p->break_handler && p->break_handler(p, regs))
|
if (p->break_handler && p->break_handler(p, regs))
|
||||||
goto ss_probe;
|
goto ss_probe;
|
||||||
}
|
}
|
||||||
|
@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
kprobe_status = KPROBE_HIT_ACTIVE;
|
kprobe_status = KPROBE_HIT_ACTIVE;
|
||||||
current_kprobe = p;
|
cur_kprobe = p;
|
||||||
if (p->pre_handler(p, regs)) {
|
if (p->pre_handler(p, regs)) {
|
||||||
/* handler has already set things up, so skip ss setup */
|
/* handler has already set things up, so skip ss setup */
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -491,8 +490,8 @@ no_kprobe:
|
||||||
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/* we may need to fixup regs/stack after singlestepping a call insn */
|
/* we may need to fixup regs/stack after singlestepping a call insn */
|
||||||
if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
|
if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
|
||||||
regs->pc = current_kprobe_orig_pc;
|
regs->pc = cur_kprobe_orig_pc;
|
||||||
switch (p->ainsn.insn[0]) {
|
switch (p->ainsn.insn[0]) {
|
||||||
case 0xcd: /* CALL (d16,PC) */
|
case 0xcd: /* CALL (d16,PC) */
|
||||||
*(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
|
*(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
|
||||||
|
@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
regs->pc = current_kprobe_next_pc;
|
regs->pc = cur_kprobe_next_pc;
|
||||||
current_kprobe_bp_addr = 0;
|
cur_kprobe_bp_addr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||||
|
@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||||
if (!kprobe_running())
|
if (!kprobe_running())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (current_kprobe->post_handler)
|
if (cur_kprobe->post_handler)
|
||||||
current_kprobe->post_handler(current_kprobe, regs, 0);
|
cur_kprobe->post_handler(cur_kprobe, regs, 0);
|
||||||
|
|
||||||
resume_execution(current_kprobe, regs);
|
resume_execution(cur_kprobe, regs);
|
||||||
reset_current_kprobe();
|
reset_current_kprobe();
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||||
static inline
|
static inline
|
||||||
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||||
{
|
{
|
||||||
if (current_kprobe->fault_handler &&
|
if (cur_kprobe->fault_handler &&
|
||||||
current_kprobe->fault_handler(current_kprobe, regs, trapnr))
|
cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (kprobe_status & KPROBE_HIT_SS) {
|
if (kprobe_status & KPROBE_HIT_SS) {
|
||||||
resume_execution(current_kprobe, regs);
|
resume_execution(cur_kprobe, regs);
|
||||||
reset_current_kprobe();
|
reset_current_kprobe();
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
}
|
}
|
||||||
|
@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||||
|
|
||||||
switch (val) {
|
switch (val) {
|
||||||
case DIE_BREAKPOINT:
|
case DIE_BREAKPOINT:
|
||||||
if (current_kprobe_bp_addr != args->regs->pc) {
|
if (cur_kprobe_bp_addr != args->regs->pc) {
|
||||||
if (kprobe_handler(args->regs))
|
if (kprobe_handler(args->regs))
|
||||||
return NOTIFY_STOP;
|
return NOTIFY_STOP;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -37,7 +37,7 @@ extern void cpu_die(void);
|
||||||
extern void smp_send_debugger_break(int cpu);
|
extern void smp_send_debugger_break(int cpu);
|
||||||
extern void smp_message_recv(int);
|
extern void smp_message_recv(int);
|
||||||
|
|
||||||
DECLARE_PER_CPU(unsigned int, pvr);
|
DECLARE_PER_CPU(unsigned int, cpu_pvr);
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
extern void fixup_irqs(cpumask_t map);
|
extern void fixup_irqs(cpumask_t map);
|
||||||
|
|
|
@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
|
||||||
* Since we can't get PMU interrupts inside a PMU interrupt handler,
|
* Since we can't get PMU interrupts inside a PMU interrupt handler,
|
||||||
* we don't need separate irq and nmi entries here.
|
* we don't need separate irq and nmi entries here.
|
||||||
*/
|
*/
|
||||||
static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
|
static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
|
||||||
|
|
||||||
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
|
struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
|
||||||
|
|
||||||
entry->nr = 0;
|
entry->nr = 0;
|
||||||
|
|
||||||
|
|
|
@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
|
||||||
#endif /* CONFIG_TAU */
|
#endif /* CONFIG_TAU */
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
DEFINE_PER_CPU(unsigned int, pvr);
|
DEFINE_PER_CPU(unsigned int, cpu_pvr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
|
@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
pvr = per_cpu(pvr, cpu_id);
|
pvr = per_cpu(cpu_pvr, cpu_id);
|
||||||
#else
|
#else
|
||||||
pvr = mfspr(SPRN_PVR);
|
pvr = mfspr(SPRN_PVR);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
|
||||||
|
|
||||||
static void __devinit smp_store_cpu_info(int id)
|
static void __devinit smp_store_cpu_info(int id)
|
||||||
{
|
{
|
||||||
per_cpu(pvr, id) = mfspr(SPRN_PVR);
|
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init smp_create_idle(unsigned int cpu)
|
static void __init smp_create_idle(unsigned int cpu)
|
||||||
|
|
|
@ -54,7 +54,7 @@ struct iic {
|
||||||
struct device_node *node;
|
struct device_node *node;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct iic, iic);
|
static DEFINE_PER_CPU(struct iic, cpu_iic);
|
||||||
#define IIC_NODE_COUNT 2
|
#define IIC_NODE_COUNT 2
|
||||||
static struct irq_host *iic_host;
|
static struct irq_host *iic_host;
|
||||||
|
|
||||||
|
@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq)
|
||||||
|
|
||||||
static void iic_eoi(unsigned int irq)
|
static void iic_eoi(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct iic *iic = &__get_cpu_var(iic);
|
struct iic *iic = &__get_cpu_var(cpu_iic);
|
||||||
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
|
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
|
||||||
BUG_ON(iic->eoi_ptr < 0);
|
BUG_ON(iic->eoi_ptr < 0);
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
|
||||||
struct iic *iic;
|
struct iic *iic;
|
||||||
unsigned int virq;
|
unsigned int virq;
|
||||||
|
|
||||||
iic = &__get_cpu_var(iic);
|
iic = &__get_cpu_var(cpu_iic);
|
||||||
*(unsigned long *) &pending =
|
*(unsigned long *) &pending =
|
||||||
in_be64((u64 __iomem *) &iic->regs->pending_destr);
|
in_be64((u64 __iomem *) &iic->regs->pending_destr);
|
||||||
if (!(pending.flags & CBE_IIC_IRQ_VALID))
|
if (!(pending.flags & CBE_IIC_IRQ_VALID))
|
||||||
|
@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
|
||||||
|
|
||||||
void iic_setup_cpu(void)
|
void iic_setup_cpu(void)
|
||||||
{
|
{
|
||||||
out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
|
out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
|
||||||
}
|
}
|
||||||
|
|
||||||
u8 iic_get_target_id(int cpu)
|
u8 iic_get_target_id(int cpu)
|
||||||
{
|
{
|
||||||
return per_cpu(iic, cpu).target_id;
|
return per_cpu(cpu_iic, cpu).target_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL_GPL(iic_get_target_id);
|
EXPORT_SYMBOL_GPL(iic_get_target_id);
|
||||||
|
@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
|
||||||
|
|
||||||
void iic_cause_IPI(int cpu, int mesg)
|
void iic_cause_IPI(int cpu, int mesg)
|
||||||
{
|
{
|
||||||
out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
|
out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct irq_host *iic_get_irq_host(int node)
|
struct irq_host *iic_get_irq_host(int node)
|
||||||
|
@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
|
||||||
/* XXX FIXME: should locate the linux CPU number from the HW cpu
|
/* XXX FIXME: should locate the linux CPU number from the HW cpu
|
||||||
* number properly. We are lucky for now
|
* number properly. We are lucky for now
|
||||||
*/
|
*/
|
||||||
struct iic *iic = &per_cpu(iic, hw_cpu);
|
struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
|
||||||
|
|
||||||
iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
|
iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
|
||||||
BUG_ON(iic->regs == NULL);
|
BUG_ON(iic->regs == NULL);
|
||||||
|
|
|
@ -54,7 +54,7 @@ struct dtl {
|
||||||
int buf_entries;
|
int buf_entries;
|
||||||
u64 last_idx;
|
u64 last_idx;
|
||||||
};
|
};
|
||||||
static DEFINE_PER_CPU(struct dtl, dtl);
|
static DEFINE_PER_CPU(struct dtl, cpu_dtl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Dispatch trace log event mask:
|
* Dispatch trace log event mask:
|
||||||
|
@ -261,7 +261,7 @@ static int dtl_init(void)
|
||||||
|
|
||||||
/* set up the per-cpu log structures */
|
/* set up the per-cpu log structures */
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct dtl *dtl = &per_cpu(dtl, i);
|
struct dtl *dtl = &per_cpu(cpu_dtl, i);
|
||||||
dtl->cpu = i;
|
dtl->cpu = i;
|
||||||
|
|
||||||
rc = dtl_setup_file(dtl);
|
rc = dtl_setup_file(dtl);
|
||||||
|
|
|
@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
|
||||||
static int endflag __initdata;
|
static int endflag __initdata;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
|
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
|
||||||
static DEFINE_PER_CPU(local_t, alert_counter);
|
static DEFINE_PER_CPU(long, alert_counter);
|
||||||
static DEFINE_PER_CPU(int, nmi_touch);
|
static DEFINE_PER_CPU(int, nmi_touch);
|
||||||
|
|
||||||
void touch_nmi_watchdog(void)
|
void touch_nmi_watchdog(void)
|
||||||
|
@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
||||||
touched = 1;
|
touched = 1;
|
||||||
}
|
}
|
||||||
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
|
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
|
||||||
local_inc(&__get_cpu_var(alert_counter));
|
__this_cpu_inc(per_cpu_var(alert_counter));
|
||||||
if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
|
if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
|
||||||
die_nmi("BUG: NMI Watchdog detected LOCKUP",
|
die_nmi("BUG: NMI Watchdog detected LOCKUP",
|
||||||
regs, panic_on_timeout);
|
regs, panic_on_timeout);
|
||||||
} else {
|
} else {
|
||||||
__get_cpu_var(last_irq_sum) = sum;
|
__get_cpu_var(last_irq_sum) = sum;
|
||||||
local_set(&__get_cpu_var(alert_counter), 0);
|
__this_cpu_write(per_cpu_var(alert_counter), 0);
|
||||||
}
|
}
|
||||||
if (__get_cpu_var(wd_enabled)) {
|
if (__get_cpu_var(wd_enabled)) {
|
||||||
write_pic(picl_value(nmi_hz));
|
write_pic(picl_value(nmi_hz));
|
||||||
|
|
|
@ -74,31 +74,31 @@ extern void __bad_percpu_size(void);
|
||||||
|
|
||||||
#define percpu_to_op(op, var, val) \
|
#define percpu_to_op(op, var, val) \
|
||||||
do { \
|
do { \
|
||||||
typedef typeof(var) T__; \
|
typedef typeof(var) pto_T__; \
|
||||||
if (0) { \
|
if (0) { \
|
||||||
T__ tmp__; \
|
pto_T__ pto_tmp__; \
|
||||||
tmp__ = (val); \
|
pto_tmp__ = (val); \
|
||||||
} \
|
} \
|
||||||
switch (sizeof(var)) { \
|
switch (sizeof(var)) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
asm(op "b %1,"__percpu_arg(0) \
|
asm(op "b %1,"__percpu_arg(0) \
|
||||||
: "+m" (var) \
|
: "+m" (var) \
|
||||||
: "qi" ((T__)(val))); \
|
: "qi" ((pto_T__)(val))); \
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
asm(op "w %1,"__percpu_arg(0) \
|
asm(op "w %1,"__percpu_arg(0) \
|
||||||
: "+m" (var) \
|
: "+m" (var) \
|
||||||
: "ri" ((T__)(val))); \
|
: "ri" ((pto_T__)(val))); \
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
asm(op "l %1,"__percpu_arg(0) \
|
asm(op "l %1,"__percpu_arg(0) \
|
||||||
: "+m" (var) \
|
: "+m" (var) \
|
||||||
: "ri" ((T__)(val))); \
|
: "ri" ((pto_T__)(val))); \
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
case 8: \
|
||||||
asm(op "q %1,"__percpu_arg(0) \
|
asm(op "q %1,"__percpu_arg(0) \
|
||||||
: "+m" (var) \
|
: "+m" (var) \
|
||||||
: "re" ((T__)(val))); \
|
: "re" ((pto_T__)(val))); \
|
||||||
break; \
|
break; \
|
||||||
default: __bad_percpu_size(); \
|
default: __bad_percpu_size(); \
|
||||||
} \
|
} \
|
||||||
|
@ -106,31 +106,31 @@ do { \
|
||||||
|
|
||||||
#define percpu_from_op(op, var, constraint) \
|
#define percpu_from_op(op, var, constraint) \
|
||||||
({ \
|
({ \
|
||||||
typeof(var) ret__; \
|
typeof(var) pfo_ret__; \
|
||||||
switch (sizeof(var)) { \
|
switch (sizeof(var)) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
asm(op "b "__percpu_arg(1)",%0" \
|
asm(op "b "__percpu_arg(1)",%0" \
|
||||||
: "=q" (ret__) \
|
: "=q" (pfo_ret__) \
|
||||||
: constraint); \
|
: constraint); \
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
asm(op "w "__percpu_arg(1)",%0" \
|
asm(op "w "__percpu_arg(1)",%0" \
|
||||||
: "=r" (ret__) \
|
: "=r" (pfo_ret__) \
|
||||||
: constraint); \
|
: constraint); \
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
asm(op "l "__percpu_arg(1)",%0" \
|
asm(op "l "__percpu_arg(1)",%0" \
|
||||||
: "=r" (ret__) \
|
: "=r" (pfo_ret__) \
|
||||||
: constraint); \
|
: constraint); \
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
case 8: \
|
||||||
asm(op "q "__percpu_arg(1)",%0" \
|
asm(op "q "__percpu_arg(1)",%0" \
|
||||||
: "=r" (ret__) \
|
: "=r" (pfo_ret__) \
|
||||||
: constraint); \
|
: constraint); \
|
||||||
break; \
|
break; \
|
||||||
default: __bad_percpu_size(); \
|
default: __bad_percpu_size(); \
|
||||||
} \
|
} \
|
||||||
ret__; \
|
pfo_ret__; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -153,6 +153,84 @@ do { \
|
||||||
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
|
#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
|
||||||
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
|
#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
|
||||||
|
|
||||||
|
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
|
||||||
|
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||||
|
* 32 bit must fall back to generic operations.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_64
|
||||||
|
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
|
#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
|
||||||
|
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
|
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
|
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||||
#define x86_test_and_clear_bit_percpu(bit, var) \
|
#define x86_test_and_clear_bit_percpu(bit, var) \
|
||||||
({ \
|
({ \
|
||||||
|
|
|
@ -361,7 +361,7 @@ void stop_apic_nmi_watchdog(void *unused)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned, last_irq_sum);
|
static DEFINE_PER_CPU(unsigned, last_irq_sum);
|
||||||
static DEFINE_PER_CPU(local_t, alert_counter);
|
static DEFINE_PER_CPU(long, alert_counter);
|
||||||
static DEFINE_PER_CPU(int, nmi_touch);
|
static DEFINE_PER_CPU(int, nmi_touch);
|
||||||
|
|
||||||
void touch_nmi_watchdog(void)
|
void touch_nmi_watchdog(void)
|
||||||
|
@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||||
* Ayiee, looks like this CPU is stuck ...
|
* Ayiee, looks like this CPU is stuck ...
|
||||||
* wait a few IRQs (5 seconds) before doing the oops ...
|
* wait a few IRQs (5 seconds) before doing the oops ...
|
||||||
*/
|
*/
|
||||||
local_inc(&__get_cpu_var(alert_counter));
|
__this_cpu_inc(per_cpu_var(alert_counter));
|
||||||
if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
|
if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
|
||||||
/*
|
/*
|
||||||
* die_nmi will return ONLY if NOTIFY_STOP happens..
|
* die_nmi will return ONLY if NOTIFY_STOP happens..
|
||||||
*/
|
*/
|
||||||
|
@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
|
||||||
regs, panic_on_timeout);
|
regs, panic_on_timeout);
|
||||||
} else {
|
} else {
|
||||||
__get_cpu_var(last_irq_sum) = sum;
|
__get_cpu_var(last_irq_sum) = sum;
|
||||||
local_set(&__get_cpu_var(alert_counter), 0);
|
__this_cpu_write(per_cpu_var(alert_counter), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* see if the nmi watchdog went off */
|
/* see if the nmi watchdog went off */
|
||||||
|
|
|
@ -1093,7 +1093,7 @@ static void clear_all_debug_regs(void)
|
||||||
|
|
||||||
void __cpuinit cpu_init(void)
|
void __cpuinit cpu_init(void)
|
||||||
{
|
{
|
||||||
struct orig_ist *orig_ist;
|
struct orig_ist *oist;
|
||||||
struct task_struct *me;
|
struct task_struct *me;
|
||||||
struct tss_struct *t;
|
struct tss_struct *t;
|
||||||
unsigned long v;
|
unsigned long v;
|
||||||
|
@ -1102,7 +1102,7 @@ void __cpuinit cpu_init(void)
|
||||||
|
|
||||||
cpu = stack_smp_processor_id();
|
cpu = stack_smp_processor_id();
|
||||||
t = &per_cpu(init_tss, cpu);
|
t = &per_cpu(init_tss, cpu);
|
||||||
orig_ist = &per_cpu(orig_ist, cpu);
|
oist = &per_cpu(orig_ist, cpu);
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
if (cpu != 0 && percpu_read(node_number) == 0 &&
|
if (cpu != 0 && percpu_read(node_number) == 0 &&
|
||||||
|
@ -1143,12 +1143,12 @@ void __cpuinit cpu_init(void)
|
||||||
/*
|
/*
|
||||||
* set up and load the per-CPU TSS
|
* set up and load the per-CPU TSS
|
||||||
*/
|
*/
|
||||||
if (!orig_ist->ist[0]) {
|
if (!oist->ist[0]) {
|
||||||
char *estacks = per_cpu(exception_stacks, cpu);
|
char *estacks = per_cpu(exception_stacks, cpu);
|
||||||
|
|
||||||
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
|
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
|
||||||
estacks += exception_stack_sizes[v];
|
estacks += exception_stack_sizes[v];
|
||||||
orig_ist->ist[v] = t->x86_tss.ist[v] =
|
oist->ist[v] = t->x86_tss.ist[v] =
|
||||||
(unsigned long)estacks;
|
(unsigned long)estacks;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,9 +30,9 @@
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
|
static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpud_arr);
|
||||||
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
|
static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], cpud_priv_arr);
|
||||||
static DEFINE_PER_CPU(int, cpu_priv_count);
|
static DEFINE_PER_CPU(int, cpud_priv_count);
|
||||||
|
|
||||||
static DEFINE_MUTEX(cpu_debug_lock);
|
static DEFINE_MUTEX(cpu_debug_lock);
|
||||||
|
|
||||||
|
@ -531,7 +531,7 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
|
||||||
|
|
||||||
/* Already intialized */
|
/* Already intialized */
|
||||||
if (file == CPU_INDEX_BIT)
|
if (file == CPU_INDEX_BIT)
|
||||||
if (per_cpu(cpu_arr[type].init, cpu))
|
if (per_cpu(cpud_arr[type].init, cpu))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
|
@ -543,8 +543,8 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
|
||||||
priv->reg = reg;
|
priv->reg = reg;
|
||||||
priv->file = file;
|
priv->file = file;
|
||||||
mutex_lock(&cpu_debug_lock);
|
mutex_lock(&cpu_debug_lock);
|
||||||
per_cpu(priv_arr[type], cpu) = priv;
|
per_cpu(cpud_priv_arr[type], cpu) = priv;
|
||||||
per_cpu(cpu_priv_count, cpu)++;
|
per_cpu(cpud_priv_count, cpu)++;
|
||||||
mutex_unlock(&cpu_debug_lock);
|
mutex_unlock(&cpu_debug_lock);
|
||||||
|
|
||||||
if (file)
|
if (file)
|
||||||
|
@ -552,10 +552,10 @@ static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
|
||||||
dentry, (void *)priv, &cpu_fops);
|
dentry, (void *)priv, &cpu_fops);
|
||||||
else {
|
else {
|
||||||
debugfs_create_file(cpu_base[type].name, S_IRUGO,
|
debugfs_create_file(cpu_base[type].name, S_IRUGO,
|
||||||
per_cpu(cpu_arr[type].dentry, cpu),
|
per_cpu(cpud_arr[type].dentry, cpu),
|
||||||
(void *)priv, &cpu_fops);
|
(void *)priv, &cpu_fops);
|
||||||
mutex_lock(&cpu_debug_lock);
|
mutex_lock(&cpu_debug_lock);
|
||||||
per_cpu(cpu_arr[type].init, cpu) = 1;
|
per_cpu(cpud_arr[type].init, cpu) = 1;
|
||||||
mutex_unlock(&cpu_debug_lock);
|
mutex_unlock(&cpu_debug_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -615,7 +615,7 @@ static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
|
||||||
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
|
if (!is_typeflag_valid(cpu, cpu_base[type].flag))
|
||||||
continue;
|
continue;
|
||||||
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
|
cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
|
||||||
per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
|
per_cpu(cpud_arr[type].dentry, cpu) = cpu_dentry;
|
||||||
|
|
||||||
if (type < CPU_TSS_BIT)
|
if (type < CPU_TSS_BIT)
|
||||||
err = cpu_init_msr(cpu, type, cpu_dentry);
|
err = cpu_init_msr(cpu, type, cpu_dentry);
|
||||||
|
@ -647,11 +647,11 @@ static int cpu_init_cpu(void)
|
||||||
err = cpu_init_allreg(cpu, cpu_dentry);
|
err = cpu_init_allreg(cpu, cpu_dentry);
|
||||||
|
|
||||||
pr_info("cpu%d(%d) debug files %d\n",
|
pr_info("cpu%d(%d) debug files %d\n",
|
||||||
cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
|
cpu, nr_cpu_ids, per_cpu(cpud_priv_count, cpu));
|
||||||
if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
|
if (per_cpu(cpud_priv_count, cpu) > MAX_CPU_FILES) {
|
||||||
pr_err("Register files count %d exceeds limit %d\n",
|
pr_err("Register files count %d exceeds limit %d\n",
|
||||||
per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
|
per_cpu(cpud_priv_count, cpu), MAX_CPU_FILES);
|
||||||
per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
|
per_cpu(cpud_priv_count, cpu) = MAX_CPU_FILES;
|
||||||
err = -ENFILE;
|
err = -ENFILE;
|
||||||
}
|
}
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -676,8 +676,8 @@ static void __exit cpu_debug_exit(void)
|
||||||
debugfs_remove_recursive(cpu_debugfs_dir);
|
debugfs_remove_recursive(cpu_debugfs_dir);
|
||||||
|
|
||||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||||
for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
|
for (i = 0; i < per_cpu(cpud_priv_count, cpu); i++)
|
||||||
kfree(per_cpu(priv_arr[i], cpu));
|
kfree(per_cpu(cpud_priv_arr[i], cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(cpu_debug_init);
|
module_init(cpu_debug_init);
|
||||||
|
|
|
@ -68,9 +68,9 @@ struct acpi_cpufreq_data {
|
||||||
unsigned int cpu_feature;
|
unsigned int cpu_feature;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
|
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct aperfmperf, old_perf);
|
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
|
||||||
|
|
||||||
/* acpi_perf_data is a pointer to percpu data. */
|
/* acpi_perf_data is a pointer to percpu data. */
|
||||||
static struct acpi_processor_performance *acpi_perf_data;
|
static struct acpi_processor_performance *acpi_perf_data;
|
||||||
|
@ -214,14 +214,14 @@ static u32 get_cur_val(const struct cpumask *mask)
|
||||||
if (unlikely(cpumask_empty(mask)))
|
if (unlikely(cpumask_empty(mask)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
|
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
|
||||||
case SYSTEM_INTEL_MSR_CAPABLE:
|
case SYSTEM_INTEL_MSR_CAPABLE:
|
||||||
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
|
||||||
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
|
cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
|
||||||
break;
|
break;
|
||||||
case SYSTEM_IO_CAPABLE:
|
case SYSTEM_IO_CAPABLE:
|
||||||
cmd.type = SYSTEM_IO_CAPABLE;
|
cmd.type = SYSTEM_IO_CAPABLE;
|
||||||
perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
|
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
|
||||||
cmd.addr.io.port = perf->control_register.address;
|
cmd.addr.io.port = perf->control_register.address;
|
||||||
cmd.addr.io.bit_width = perf->control_register.bit_width;
|
cmd.addr.io.bit_width = perf->control_register.bit_width;
|
||||||
break;
|
break;
|
||||||
|
@ -268,8 +268,8 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
|
||||||
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
|
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
|
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
|
||||||
per_cpu(old_perf, cpu) = perf;
|
per_cpu(acfreq_old_perf, cpu) = perf;
|
||||||
|
|
||||||
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
|
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
|
||||||
|
|
||||||
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
|
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
unsigned int cached_freq;
|
unsigned int cached_freq;
|
||||||
|
|
||||||
|
@ -322,7 +322,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
|
||||||
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
static int acpi_cpufreq_target(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq, unsigned int relation)
|
unsigned int target_freq, unsigned int relation)
|
||||||
{
|
{
|
||||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||||
struct acpi_processor_performance *perf;
|
struct acpi_processor_performance *perf;
|
||||||
struct cpufreq_freqs freqs;
|
struct cpufreq_freqs freqs;
|
||||||
struct drv_cmd cmd;
|
struct drv_cmd cmd;
|
||||||
|
@ -416,7 +416,7 @@ out:
|
||||||
|
|
||||||
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
|
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||||
|
|
||||||
dprintk("acpi_cpufreq_verify\n");
|
dprintk("acpi_cpufreq_verify\n");
|
||||||
|
|
||||||
|
@ -574,7 +574,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
|
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
|
||||||
per_cpu(drv_data, cpu) = data;
|
per_cpu(acfreq_data, cpu) = data;
|
||||||
|
|
||||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
|
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
|
||||||
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
|
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
|
||||||
|
@ -725,20 +725,20 @@ err_unreg:
|
||||||
acpi_processor_unregister_performance(perf, cpu);
|
acpi_processor_unregister_performance(perf, cpu);
|
||||||
err_free:
|
err_free:
|
||||||
kfree(data);
|
kfree(data);
|
||||||
per_cpu(drv_data, cpu) = NULL;
|
per_cpu(acfreq_data, cpu) = NULL;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||||
|
|
||||||
dprintk("acpi_cpufreq_cpu_exit\n");
|
dprintk("acpi_cpufreq_cpu_exit\n");
|
||||||
|
|
||||||
if (data) {
|
if (data) {
|
||||||
cpufreq_frequency_table_put_attr(policy->cpu);
|
cpufreq_frequency_table_put_attr(policy->cpu);
|
||||||
per_cpu(drv_data, policy->cpu) = NULL;
|
per_cpu(acfreq_data, policy->cpu) = NULL;
|
||||||
acpi_processor_unregister_performance(data->acpi_data,
|
acpi_processor_unregister_performance(data->acpi_data,
|
||||||
policy->cpu);
|
policy->cpu);
|
||||||
kfree(data);
|
kfree(data);
|
||||||
|
@ -749,7 +749,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
||||||
|
|
||||||
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
|
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
|
||||||
|
|
||||||
dprintk("acpi_cpufreq_resume\n");
|
dprintk("acpi_cpufreq_resume\n");
|
||||||
|
|
||||||
|
|
|
@ -499,8 +499,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
|
|
||||||
/* pointer to _cpuid4_info array (for each cache leaf) */
|
/* pointer to _cpuid4_info array (for each cache leaf) */
|
||||||
static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
|
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
|
||||||
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
|
#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||||
|
@ -513,7 +513,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||||
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
|
if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
|
||||||
struct cpuinfo_x86 *d;
|
struct cpuinfo_x86 *d;
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
if (!per_cpu(cpuid4_info, i))
|
if (!per_cpu(ici_cpuid4_info, i))
|
||||||
continue;
|
continue;
|
||||||
d = &cpu_data(i);
|
d = &cpu_data(i);
|
||||||
this_leaf = CPUID4_INFO_IDX(i, index);
|
this_leaf = CPUID4_INFO_IDX(i, index);
|
||||||
|
@ -535,7 +535,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||||
c->apicid >> index_msb) {
|
c->apicid >> index_msb) {
|
||||||
cpumask_set_cpu(i,
|
cpumask_set_cpu(i,
|
||||||
to_cpumask(this_leaf->shared_cpu_map));
|
to_cpumask(this_leaf->shared_cpu_map));
|
||||||
if (i != cpu && per_cpu(cpuid4_info, i)) {
|
if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
|
||||||
sibling_leaf =
|
sibling_leaf =
|
||||||
CPUID4_INFO_IDX(i, index);
|
CPUID4_INFO_IDX(i, index);
|
||||||
cpumask_set_cpu(cpu, to_cpumask(
|
cpumask_set_cpu(cpu, to_cpumask(
|
||||||
|
@ -574,8 +574,8 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||||
for (i = 0; i < num_cache_leaves; i++)
|
for (i = 0; i < num_cache_leaves; i++)
|
||||||
cache_remove_shared_cpu_map(cpu, i);
|
cache_remove_shared_cpu_map(cpu, i);
|
||||||
|
|
||||||
kfree(per_cpu(cpuid4_info, cpu));
|
kfree(per_cpu(ici_cpuid4_info, cpu));
|
||||||
per_cpu(cpuid4_info, cpu) = NULL;
|
per_cpu(ici_cpuid4_info, cpu) = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -614,15 +614,15 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||||
if (num_cache_leaves == 0)
|
if (num_cache_leaves == 0)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
per_cpu(cpuid4_info, cpu) = kzalloc(
|
per_cpu(ici_cpuid4_info, cpu) = kzalloc(
|
||||||
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
|
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
|
||||||
if (per_cpu(cpuid4_info, cpu) == NULL)
|
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
|
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
kfree(per_cpu(cpuid4_info, cpu));
|
kfree(per_cpu(ici_cpuid4_info, cpu));
|
||||||
per_cpu(cpuid4_info, cpu) = NULL;
|
per_cpu(ici_cpuid4_info, cpu) = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
|
@ -634,7 +634,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||||
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
|
extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
|
||||||
|
|
||||||
/* pointer to kobject for cpuX/cache */
|
/* pointer to kobject for cpuX/cache */
|
||||||
static DEFINE_PER_CPU(struct kobject *, cache_kobject);
|
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
|
||||||
|
|
||||||
struct _index_kobject {
|
struct _index_kobject {
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
|
@ -643,8 +643,8 @@ struct _index_kobject {
|
||||||
};
|
};
|
||||||
|
|
||||||
/* pointer to array of kobjects for cpuX/cache/indexY */
|
/* pointer to array of kobjects for cpuX/cache/indexY */
|
||||||
static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
|
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
|
||||||
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
|
#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
|
||||||
|
|
||||||
#define show_one_plus(file_name, object, val) \
|
#define show_one_plus(file_name, object, val) \
|
||||||
static ssize_t show_##file_name \
|
static ssize_t show_##file_name \
|
||||||
|
@ -863,10 +863,10 @@ static struct kobj_type ktype_percpu_entry = {
|
||||||
|
|
||||||
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
|
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
|
||||||
{
|
{
|
||||||
kfree(per_cpu(cache_kobject, cpu));
|
kfree(per_cpu(ici_cache_kobject, cpu));
|
||||||
kfree(per_cpu(index_kobject, cpu));
|
kfree(per_cpu(ici_index_kobject, cpu));
|
||||||
per_cpu(cache_kobject, cpu) = NULL;
|
per_cpu(ici_cache_kobject, cpu) = NULL;
|
||||||
per_cpu(index_kobject, cpu) = NULL;
|
per_cpu(ici_index_kobject, cpu) = NULL;
|
||||||
free_cache_attributes(cpu);
|
free_cache_attributes(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -882,14 +882,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* Allocate all required memory */
|
/* Allocate all required memory */
|
||||||
per_cpu(cache_kobject, cpu) =
|
per_cpu(ici_cache_kobject, cpu) =
|
||||||
kzalloc(sizeof(struct kobject), GFP_KERNEL);
|
kzalloc(sizeof(struct kobject), GFP_KERNEL);
|
||||||
if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
|
if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
per_cpu(index_kobject, cpu) = kzalloc(
|
per_cpu(ici_index_kobject, cpu) = kzalloc(
|
||||||
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
|
||||||
if (unlikely(per_cpu(index_kobject, cpu) == NULL))
|
if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -913,7 +913,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||||
if (unlikely(retval < 0))
|
if (unlikely(retval < 0))
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
|
retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
|
||||||
&ktype_percpu_entry,
|
&ktype_percpu_entry,
|
||||||
&sys_dev->kobj, "%s", "cache");
|
&sys_dev->kobj, "%s", "cache");
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
|
@ -927,12 +927,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||||
this_object->index = i;
|
this_object->index = i;
|
||||||
retval = kobject_init_and_add(&(this_object->kobj),
|
retval = kobject_init_and_add(&(this_object->kobj),
|
||||||
&ktype_cache,
|
&ktype_cache,
|
||||||
per_cpu(cache_kobject, cpu),
|
per_cpu(ici_cache_kobject, cpu),
|
||||||
"index%1lu", i);
|
"index%1lu", i);
|
||||||
if (unlikely(retval)) {
|
if (unlikely(retval)) {
|
||||||
for (j = 0; j < i; j++)
|
for (j = 0; j < i; j++)
|
||||||
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
|
||||||
kobject_put(per_cpu(cache_kobject, cpu));
|
kobject_put(per_cpu(ici_cache_kobject, cpu));
|
||||||
cpuid4_cache_sysfs_exit(cpu);
|
cpuid4_cache_sysfs_exit(cpu);
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
@ -940,7 +940,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
|
||||||
}
|
}
|
||||||
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
|
cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
|
||||||
|
|
||||||
kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
|
kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -949,7 +949,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
||||||
unsigned int cpu = sys_dev->id;
|
unsigned int cpu = sys_dev->id;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
if (per_cpu(cpuid4_info, cpu) == NULL)
|
if (per_cpu(ici_cpuid4_info, cpu) == NULL)
|
||||||
return;
|
return;
|
||||||
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
|
if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
|
||||||
return;
|
return;
|
||||||
|
@ -957,7 +957,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
|
||||||
|
|
||||||
for (i = 0; i < num_cache_leaves; i++)
|
for (i = 0; i < num_cache_leaves; i++)
|
||||||
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
|
||||||
kobject_put(per_cpu(cache_kobject, cpu));
|
kobject_put(per_cpu(ici_cache_kobject, cpu));
|
||||||
cpuid4_cache_sysfs_exit(cpu);
|
cpuid4_cache_sysfs_exit(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -265,13 +265,13 @@ struct ds_context {
|
||||||
int cpu;
|
int cpu;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct ds_context *, cpu_context);
|
static DEFINE_PER_CPU(struct ds_context *, cpu_ds_context);
|
||||||
|
|
||||||
|
|
||||||
static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
|
static struct ds_context *ds_get_context(struct task_struct *task, int cpu)
|
||||||
{
|
{
|
||||||
struct ds_context **p_context =
|
struct ds_context **p_context =
|
||||||
(task ? &task->thread.ds_ctx : &per_cpu(cpu_context, cpu));
|
(task ? &task->thread.ds_ctx : &per_cpu(cpu_ds_context, cpu));
|
||||||
struct ds_context *context = NULL;
|
struct ds_context *context = NULL;
|
||||||
struct ds_context *new_context = NULL;
|
struct ds_context *new_context = NULL;
|
||||||
|
|
||||||
|
|
|
@ -316,7 +316,7 @@ static void svm_hardware_disable(void *garbage)
|
||||||
static int svm_hardware_enable(void *garbage)
|
static int svm_hardware_enable(void *garbage)
|
||||||
{
|
{
|
||||||
|
|
||||||
struct svm_cpu_data *svm_data;
|
struct svm_cpu_data *sd;
|
||||||
uint64_t efer;
|
uint64_t efer;
|
||||||
struct descriptor_table gdt_descr;
|
struct descriptor_table gdt_descr;
|
||||||
struct desc_struct *gdt;
|
struct desc_struct *gdt;
|
||||||
|
@ -331,63 +331,61 @@ static int svm_hardware_enable(void *garbage)
|
||||||
me);
|
me);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
svm_data = per_cpu(svm_data, me);
|
sd = per_cpu(svm_data, me);
|
||||||
|
|
||||||
if (!svm_data) {
|
if (!sd) {
|
||||||
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
|
printk(KERN_ERR "svm_hardware_enable: svm_data is NULL on %d\n",
|
||||||
me);
|
me);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
svm_data->asid_generation = 1;
|
sd->asid_generation = 1;
|
||||||
svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
|
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
|
||||||
svm_data->next_asid = svm_data->max_asid + 1;
|
sd->next_asid = sd->max_asid + 1;
|
||||||
|
|
||||||
kvm_get_gdt(&gdt_descr);
|
kvm_get_gdt(&gdt_descr);
|
||||||
gdt = (struct desc_struct *)gdt_descr.base;
|
gdt = (struct desc_struct *)gdt_descr.base;
|
||||||
svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
|
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
|
||||||
|
|
||||||
wrmsrl(MSR_EFER, efer | EFER_SVME);
|
wrmsrl(MSR_EFER, efer | EFER_SVME);
|
||||||
|
|
||||||
wrmsrl(MSR_VM_HSAVE_PA,
|
wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
|
||||||
page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_cpu_uninit(int cpu)
|
static void svm_cpu_uninit(int cpu)
|
||||||
{
|
{
|
||||||
struct svm_cpu_data *svm_data
|
struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
|
||||||
= per_cpu(svm_data, raw_smp_processor_id());
|
|
||||||
|
|
||||||
if (!svm_data)
|
if (!sd)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
|
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
|
||||||
__free_page(svm_data->save_area);
|
__free_page(sd->save_area);
|
||||||
kfree(svm_data);
|
kfree(sd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int svm_cpu_init(int cpu)
|
static int svm_cpu_init(int cpu)
|
||||||
{
|
{
|
||||||
struct svm_cpu_data *svm_data;
|
struct svm_cpu_data *sd;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
|
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
|
||||||
if (!svm_data)
|
if (!sd)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
svm_data->cpu = cpu;
|
sd->cpu = cpu;
|
||||||
svm_data->save_area = alloc_page(GFP_KERNEL);
|
sd->save_area = alloc_page(GFP_KERNEL);
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
if (!svm_data->save_area)
|
if (!sd->save_area)
|
||||||
goto err_1;
|
goto err_1;
|
||||||
|
|
||||||
per_cpu(svm_data, cpu) = svm_data;
|
per_cpu(svm_data, cpu) = sd;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_1:
|
err_1:
|
||||||
kfree(svm_data);
|
kfree(sd);
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1092,16 +1090,16 @@ static void save_host_msrs(struct kvm_vcpu *vcpu)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
|
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
|
||||||
{
|
{
|
||||||
if (svm_data->next_asid > svm_data->max_asid) {
|
if (sd->next_asid > sd->max_asid) {
|
||||||
++svm_data->asid_generation;
|
++sd->asid_generation;
|
||||||
svm_data->next_asid = 1;
|
sd->next_asid = 1;
|
||||||
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
|
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
|
||||||
}
|
}
|
||||||
|
|
||||||
svm->asid_generation = svm_data->asid_generation;
|
svm->asid_generation = sd->asid_generation;
|
||||||
svm->vmcb->control.asid = svm_data->next_asid++;
|
svm->vmcb->control.asid = sd->next_asid++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
|
static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
|
||||||
|
@ -2429,8 +2427,8 @@ static void reload_tss(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int cpu = raw_smp_processor_id();
|
int cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
|
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||||
svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
|
sd->tss_desc->type = 9; /* available 32/64-bit TSS */
|
||||||
load_TR_desc();
|
load_TR_desc();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2438,12 +2436,12 @@ static void pre_svm_run(struct vcpu_svm *svm)
|
||||||
{
|
{
|
||||||
int cpu = raw_smp_processor_id();
|
int cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
|
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
|
||||||
|
|
||||||
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
|
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
|
||||||
/* FIXME: handle wraparound of asid_generation */
|
/* FIXME: handle wraparound of asid_generation */
|
||||||
if (svm->asid_generation != svm_data->asid_generation)
|
if (svm->asid_generation != sd->asid_generation)
|
||||||
new_asid(svm, svm_data);
|
new_asid(svm, sd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
|
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
|
||||||
|
|
|
@ -35,10 +35,10 @@
|
||||||
|
|
||||||
cpumask_var_t xen_cpu_initialized_map;
|
cpumask_var_t xen_cpu_initialized_map;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(int, resched_irq);
|
static DEFINE_PER_CPU(int, xen_resched_irq);
|
||||||
static DEFINE_PER_CPU(int, callfunc_irq);
|
static DEFINE_PER_CPU(int, xen_callfunc_irq);
|
||||||
static DEFINE_PER_CPU(int, callfuncsingle_irq);
|
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
|
||||||
static DEFINE_PER_CPU(int, debug_irq) = -1;
|
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
|
||||||
|
|
||||||
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
|
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
|
||||||
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
|
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
|
||||||
|
@ -103,7 +103,7 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||||
NULL);
|
NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(resched_irq, cpu) = rc;
|
per_cpu(xen_resched_irq, cpu) = rc;
|
||||||
|
|
||||||
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
|
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
|
||||||
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
|
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
|
||||||
|
@ -114,7 +114,7 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||||
NULL);
|
NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(callfunc_irq, cpu) = rc;
|
per_cpu(xen_callfunc_irq, cpu) = rc;
|
||||||
|
|
||||||
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
|
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
|
||||||
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
|
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
|
||||||
|
@ -122,7 +122,7 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||||
debug_name, NULL);
|
debug_name, NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(debug_irq, cpu) = rc;
|
per_cpu(xen_debug_irq, cpu) = rc;
|
||||||
|
|
||||||
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
|
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
|
||||||
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
|
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
|
||||||
|
@ -133,19 +133,20 @@ static int xen_smp_intr_init(unsigned int cpu)
|
||||||
NULL);
|
NULL);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
per_cpu(callfuncsingle_irq, cpu) = rc;
|
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
if (per_cpu(resched_irq, cpu) >= 0)
|
if (per_cpu(xen_resched_irq, cpu) >= 0)
|
||||||
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
|
||||||
if (per_cpu(callfunc_irq, cpu) >= 0)
|
if (per_cpu(xen_callfunc_irq, cpu) >= 0)
|
||||||
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
|
||||||
if (per_cpu(debug_irq, cpu) >= 0)
|
if (per_cpu(xen_debug_irq, cpu) >= 0)
|
||||||
unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
|
||||||
if (per_cpu(callfuncsingle_irq, cpu) >= 0)
|
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
|
||||||
unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
|
||||||
|
NULL);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -349,10 +350,10 @@ static void xen_cpu_die(unsigned int cpu)
|
||||||
current->state = TASK_UNINTERRUPTIBLE;
|
current->state = TASK_UNINTERRUPTIBLE;
|
||||||
schedule_timeout(HZ/10);
|
schedule_timeout(HZ/10);
|
||||||
}
|
}
|
||||||
unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
|
||||||
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
|
||||||
unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
|
||||||
unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
|
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
|
||||||
xen_uninit_lock_cpu(cpu);
|
xen_uninit_lock_cpu(cpu);
|
||||||
xen_teardown_timer(cpu);
|
xen_teardown_timer(cpu);
|
||||||
|
|
||||||
|
|
|
@ -31,14 +31,14 @@
|
||||||
#define NS_PER_TICK (1000000000LL / HZ)
|
#define NS_PER_TICK (1000000000LL / HZ)
|
||||||
|
|
||||||
/* runstate info updated by Xen */
|
/* runstate info updated by Xen */
|
||||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
|
||||||
|
|
||||||
/* snapshots of runstate info */
|
/* snapshots of runstate info */
|
||||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
|
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
|
||||||
|
|
||||||
/* unused ns of stolen and blocked time */
|
/* unused ns of stolen and blocked time */
|
||||||
static DEFINE_PER_CPU(u64, residual_stolen);
|
static DEFINE_PER_CPU(u64, xen_residual_stolen);
|
||||||
static DEFINE_PER_CPU(u64, residual_blocked);
|
static DEFINE_PER_CPU(u64, xen_residual_blocked);
|
||||||
|
|
||||||
/* return an consistent snapshot of 64-bit time/counter value */
|
/* return an consistent snapshot of 64-bit time/counter value */
|
||||||
static u64 get64(const u64 *p)
|
static u64 get64(const u64 *p)
|
||||||
|
@ -79,7 +79,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
|
||||||
|
|
||||||
BUG_ON(preemptible());
|
BUG_ON(preemptible());
|
||||||
|
|
||||||
state = &__get_cpu_var(runstate);
|
state = &__get_cpu_var(xen_runstate);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The runstate info is always updated by the hypervisor on
|
* The runstate info is always updated by the hypervisor on
|
||||||
|
@ -97,14 +97,14 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
|
||||||
/* return true when a vcpu could run but has no real cpu to run on */
|
/* return true when a vcpu could run but has no real cpu to run on */
|
||||||
bool xen_vcpu_stolen(int vcpu)
|
bool xen_vcpu_stolen(int vcpu)
|
||||||
{
|
{
|
||||||
return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
|
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
|
||||||
}
|
}
|
||||||
|
|
||||||
void xen_setup_runstate_info(int cpu)
|
void xen_setup_runstate_info(int cpu)
|
||||||
{
|
{
|
||||||
struct vcpu_register_runstate_memory_area area;
|
struct vcpu_register_runstate_memory_area area;
|
||||||
|
|
||||||
area.addr.v = &per_cpu(runstate, cpu);
|
area.addr.v = &per_cpu(xen_runstate, cpu);
|
||||||
|
|
||||||
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
|
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
|
||||||
cpu, &area))
|
cpu, &area))
|
||||||
|
@ -122,7 +122,7 @@ static void do_stolen_accounting(void)
|
||||||
|
|
||||||
WARN_ON(state.state != RUNSTATE_running);
|
WARN_ON(state.state != RUNSTATE_running);
|
||||||
|
|
||||||
snap = &__get_cpu_var(runstate_snapshot);
|
snap = &__get_cpu_var(xen_runstate_snapshot);
|
||||||
|
|
||||||
/* work out how much time the VCPU has not been runn*ing* */
|
/* work out how much time the VCPU has not been runn*ing* */
|
||||||
blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
|
blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
|
||||||
|
@ -133,24 +133,24 @@ static void do_stolen_accounting(void)
|
||||||
|
|
||||||
/* Add the appropriate number of ticks of stolen time,
|
/* Add the appropriate number of ticks of stolen time,
|
||||||
including any left-overs from last time. */
|
including any left-overs from last time. */
|
||||||
stolen = runnable + offline + __get_cpu_var(residual_stolen);
|
stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
|
||||||
|
|
||||||
if (stolen < 0)
|
if (stolen < 0)
|
||||||
stolen = 0;
|
stolen = 0;
|
||||||
|
|
||||||
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
|
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
|
||||||
__get_cpu_var(residual_stolen) = stolen;
|
__get_cpu_var(xen_residual_stolen) = stolen;
|
||||||
account_steal_ticks(ticks);
|
account_steal_ticks(ticks);
|
||||||
|
|
||||||
/* Add the appropriate number of ticks of blocked time,
|
/* Add the appropriate number of ticks of blocked time,
|
||||||
including any left-overs from last time. */
|
including any left-overs from last time. */
|
||||||
blocked += __get_cpu_var(residual_blocked);
|
blocked += __get_cpu_var(xen_residual_blocked);
|
||||||
|
|
||||||
if (blocked < 0)
|
if (blocked < 0)
|
||||||
blocked = 0;
|
blocked = 0;
|
||||||
|
|
||||||
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
|
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
|
||||||
__get_cpu_var(residual_blocked) = blocked;
|
__get_cpu_var(xen_residual_blocked) = blocked;
|
||||||
account_idle_ticks(ticks);
|
account_idle_ticks(ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
|
||||||
struct cryptd_cpu_queue *cpu_queue;
|
struct cryptd_cpu_queue *cpu_queue;
|
||||||
|
|
||||||
cpu = get_cpu();
|
cpu = get_cpu();
|
||||||
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
|
cpu_queue = this_cpu_ptr(queue->cpu_queue);
|
||||||
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
err = crypto_enqueue_request(&cpu_queue->queue, request);
|
||||||
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
|
@ -131,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
|
||||||
* boot up and this data does not change there after. Hence this
|
* boot up and this data does not change there after. Hence this
|
||||||
* operation should be safe. No locking required.
|
* operation should be safe. No locking required.
|
||||||
*/
|
*/
|
||||||
addr = __pa(per_cpu_ptr(crash_notes, cpunum));
|
addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
|
||||||
rc = sprintf(buf, "%Lx\n", addr);
|
rc = sprintf(buf, "%Lx\n", addr);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,14 +64,14 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
|
||||||
* - Lock should not be held across
|
* - Lock should not be held across
|
||||||
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
|
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
|
||||||
*/
|
*/
|
||||||
static DEFINE_PER_CPU(int, policy_cpu);
|
static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
|
||||||
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
|
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
|
||||||
|
|
||||||
#define lock_policy_rwsem(mode, cpu) \
|
#define lock_policy_rwsem(mode, cpu) \
|
||||||
int lock_policy_rwsem_##mode \
|
int lock_policy_rwsem_##mode \
|
||||||
(int cpu) \
|
(int cpu) \
|
||||||
{ \
|
{ \
|
||||||
int policy_cpu = per_cpu(policy_cpu, cpu); \
|
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
|
||||||
BUG_ON(policy_cpu == -1); \
|
BUG_ON(policy_cpu == -1); \
|
||||||
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
|
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
|
||||||
if (unlikely(!cpu_online(cpu))) { \
|
if (unlikely(!cpu_online(cpu))) { \
|
||||||
|
@ -90,7 +90,7 @@ EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
|
||||||
|
|
||||||
void unlock_policy_rwsem_read(int cpu)
|
void unlock_policy_rwsem_read(int cpu)
|
||||||
{
|
{
|
||||||
int policy_cpu = per_cpu(policy_cpu, cpu);
|
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
|
||||||
BUG_ON(policy_cpu == -1);
|
BUG_ON(policy_cpu == -1);
|
||||||
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
|
||||||
|
|
||||||
void unlock_policy_rwsem_write(int cpu)
|
void unlock_policy_rwsem_write(int cpu)
|
||||||
{
|
{
|
||||||
int policy_cpu = per_cpu(policy_cpu, cpu);
|
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
|
||||||
BUG_ON(policy_cpu == -1);
|
BUG_ON(policy_cpu == -1);
|
||||||
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
|
||||||
}
|
}
|
||||||
|
@ -818,7 +818,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
|
||||||
|
|
||||||
/* Set proper policy_cpu */
|
/* Set proper policy_cpu */
|
||||||
unlock_policy_rwsem_write(cpu);
|
unlock_policy_rwsem_write(cpu);
|
||||||
per_cpu(policy_cpu, cpu) = managed_policy->cpu;
|
per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
|
||||||
|
|
||||||
if (lock_policy_rwsem_write(cpu) < 0) {
|
if (lock_policy_rwsem_write(cpu) < 0) {
|
||||||
/* Should not go through policy unlock path */
|
/* Should not go through policy unlock path */
|
||||||
|
@ -932,7 +932,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
|
||||||
if (!cpu_online(j))
|
if (!cpu_online(j))
|
||||||
continue;
|
continue;
|
||||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||||
per_cpu(policy_cpu, j) = policy->cpu;
|
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||||
|
|
||||||
|
@ -1020,7 +1020,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
|
||||||
cpumask_copy(policy->cpus, cpumask_of(cpu));
|
cpumask_copy(policy->cpus, cpumask_of(cpu));
|
||||||
|
|
||||||
/* Initially set CPU itself as the policy_cpu */
|
/* Initially set CPU itself as the policy_cpu */
|
||||||
per_cpu(policy_cpu, cpu) = cpu;
|
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
|
||||||
ret = (lock_policy_rwsem_write(cpu) < 0);
|
ret = (lock_policy_rwsem_write(cpu) < 0);
|
||||||
WARN_ON(ret);
|
WARN_ON(ret);
|
||||||
|
|
||||||
|
@ -2002,7 +2002,7 @@ static int __init cpufreq_core_init(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
per_cpu(policy_cpu, cpu) = -1;
|
per_cpu(cpufreq_policy_cpu, cpu) = -1;
|
||||||
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
|
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -174,7 +174,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
|
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
|
||||||
/**
|
/**
|
||||||
* show_available_freqs - show available frequencies for the specified CPU
|
* show_available_freqs - show available frequencies for the specified CPU
|
||||||
*/
|
*/
|
||||||
|
@ -185,10 +185,10 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf)
|
||||||
ssize_t count = 0;
|
ssize_t count = 0;
|
||||||
struct cpufreq_frequency_table *table;
|
struct cpufreq_frequency_table *table;
|
||||||
|
|
||||||
if (!per_cpu(show_table, cpu))
|
if (!per_cpu(cpufreq_show_table, cpu))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
table = per_cpu(show_table, cpu);
|
table = per_cpu(cpufreq_show_table, cpu);
|
||||||
|
|
||||||
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
|
||||||
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
|
if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
|
||||||
|
@ -217,20 +217,20 @@ void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
|
||||||
unsigned int cpu)
|
unsigned int cpu)
|
||||||
{
|
{
|
||||||
dprintk("setting show_table for cpu %u to %p\n", cpu, table);
|
dprintk("setting show_table for cpu %u to %p\n", cpu, table);
|
||||||
per_cpu(show_table, cpu) = table;
|
per_cpu(cpufreq_show_table, cpu) = table;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
|
||||||
|
|
||||||
void cpufreq_frequency_table_put_attr(unsigned int cpu)
|
void cpufreq_frequency_table_put_attr(unsigned int cpu)
|
||||||
{
|
{
|
||||||
dprintk("clearing show_table for cpu %u\n", cpu);
|
dprintk("clearing show_table for cpu %u\n", cpu);
|
||||||
per_cpu(show_table, cpu) = NULL;
|
per_cpu(cpufreq_show_table, cpu) = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
|
||||||
|
|
||||||
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
|
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
|
||||||
{
|
{
|
||||||
return per_cpu(show_table, cpu);
|
return per_cpu(cpufreq_show_table, cpu);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
|
EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ struct aes_ctx {
|
||||||
u32 *D;
|
u32 *D;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cword *, last_cword);
|
static DEFINE_PER_CPU(struct cword *, paes_last_cword);
|
||||||
|
|
||||||
/* Tells whether the ACE is capable to generate
|
/* Tells whether the ACE is capable to generate
|
||||||
the extended key for a given key_len. */
|
the extended key for a given key_len. */
|
||||||
|
@ -152,9 +152,9 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
|
||||||
|
|
||||||
ok:
|
ok:
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
|
if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
|
||||||
&ctx->cword.decrypt == per_cpu(last_cword, cpu))
|
&ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
|
||||||
per_cpu(last_cword, cpu) = NULL;
|
per_cpu(paes_last_cword, cpu) = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ static inline void padlock_reset_key(struct cword *cword)
|
||||||
{
|
{
|
||||||
int cpu = raw_smp_processor_id();
|
int cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
if (cword != per_cpu(last_cword, cpu))
|
if (cword != per_cpu(paes_last_cword, cpu))
|
||||||
#ifndef CONFIG_X86_64
|
#ifndef CONFIG_X86_64
|
||||||
asm volatile ("pushfl; popfl");
|
asm volatile ("pushfl; popfl");
|
||||||
#else
|
#else
|
||||||
|
@ -176,7 +176,7 @@ static inline void padlock_reset_key(struct cword *cword)
|
||||||
|
|
||||||
static inline void padlock_store_cword(struct cword *cword)
|
static inline void padlock_store_cword(struct cword *cword)
|
||||||
{
|
{
|
||||||
per_cpu(last_cword, raw_smp_processor_id()) = cword;
|
per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -326,14 +326,7 @@ arch_initcall(dma_channel_table_init);
|
||||||
*/
|
*/
|
||||||
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
|
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
|
||||||
{
|
{
|
||||||
struct dma_chan *chan;
|
return this_cpu_read(channel_table[tx_type]->chan);
|
||||||
int cpu;
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
|
|
||||||
put_cpu();
|
|
||||||
|
|
||||||
return chan;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_find_channel);
|
EXPORT_SYMBOL(dma_find_channel);
|
||||||
|
|
||||||
|
@ -857,7 +850,6 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
|
||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx;
|
||||||
dma_addr_t dma_dest, dma_src;
|
dma_addr_t dma_dest, dma_src;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
int cpu;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
|
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
|
||||||
|
@ -876,10 +868,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
|
||||||
tx->callback = NULL;
|
tx->callback = NULL;
|
||||||
cookie = tx->tx_submit(tx);
|
cookie = tx->tx_submit(tx);
|
||||||
|
|
||||||
cpu = get_cpu();
|
preempt_disable();
|
||||||
per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
|
__this_cpu_add(chan->local->bytes_transferred, len);
|
||||||
per_cpu_ptr(chan->local, cpu)->memcpy_count++;
|
__this_cpu_inc(chan->local->memcpy_count);
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
@ -906,7 +898,6 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
|
||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx;
|
||||||
dma_addr_t dma_dest, dma_src;
|
dma_addr_t dma_dest, dma_src;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
int cpu;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
|
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
|
||||||
|
@ -923,10 +914,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
|
||||||
tx->callback = NULL;
|
tx->callback = NULL;
|
||||||
cookie = tx->tx_submit(tx);
|
cookie = tx->tx_submit(tx);
|
||||||
|
|
||||||
cpu = get_cpu();
|
preempt_disable();
|
||||||
per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
|
__this_cpu_add(chan->local->bytes_transferred, len);
|
||||||
per_cpu_ptr(chan->local, cpu)->memcpy_count++;
|
__this_cpu_inc(chan->local->memcpy_count);
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
@ -955,7 +946,6 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
|
||||||
struct dma_async_tx_descriptor *tx;
|
struct dma_async_tx_descriptor *tx;
|
||||||
dma_addr_t dma_dest, dma_src;
|
dma_addr_t dma_dest, dma_src;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
int cpu;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
|
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
|
||||||
|
@ -973,10 +963,10 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
|
||||||
tx->callback = NULL;
|
tx->callback = NULL;
|
||||||
cookie = tx->tx_submit(tx);
|
cookie = tx->tx_submit(tx);
|
||||||
|
|
||||||
cpu = get_cpu();
|
preempt_disable();
|
||||||
per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
|
__this_cpu_add(chan->local->bytes_transferred, len);
|
||||||
per_cpu_ptr(chan->local, cpu)->memcpy_count++;
|
__this_cpu_inc(chan->local->memcpy_count);
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
|
|
@ -826,8 +826,7 @@ static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu)
|
||||||
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
|
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
|
||||||
|
|
||||||
list_del(&cq->entry);
|
list_del(&cq->entry);
|
||||||
__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
|
__queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks));
|
||||||
smp_processor_id()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
|
spin_unlock_irqrestore(&cct->task_lock, flags_cct);
|
||||||
|
|
|
@ -69,7 +69,7 @@ static struct lguest_pages *lguest_pages(unsigned int cpu)
|
||||||
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
|
(SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct lg_cpu *, last_cpu);
|
static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu);
|
||||||
|
|
||||||
/*S:010
|
/*S:010
|
||||||
* We approach the Switcher.
|
* We approach the Switcher.
|
||||||
|
@ -90,8 +90,8 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
|
||||||
* meanwhile). If that's not the case, we pretend everything in the
|
* meanwhile). If that's not the case, we pretend everything in the
|
||||||
* Guest has changed.
|
* Guest has changed.
|
||||||
*/
|
*/
|
||||||
if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) {
|
if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
|
||||||
__get_cpu_var(last_cpu) = cpu;
|
__get_cpu_var(lg_last_cpu) = cpu;
|
||||||
cpu->last_pages = pages;
|
cpu->last_pages = pages;
|
||||||
cpu->changed = CHANGED_ALL;
|
cpu->changed = CHANGED_ALL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1378,7 +1378,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
|
||||||
}
|
}
|
||||||
__skb_pull(skb, sizeof(*p));
|
__skb_pull(skb, sizeof(*p));
|
||||||
|
|
||||||
st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
|
st = this_cpu_ptr(sge->port_stats[p->iff]);
|
||||||
|
|
||||||
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
|
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
|
||||||
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
|
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
|
||||||
|
@ -1780,8 +1780,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct adapter *adapter = dev->ml_priv;
|
struct adapter *adapter = dev->ml_priv;
|
||||||
struct sge *sge = adapter->sge;
|
struct sge *sge = adapter->sge;
|
||||||
struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
|
struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
|
||||||
smp_processor_id());
|
|
||||||
struct cpl_tx_pkt *cpl;
|
struct cpl_tx_pkt *cpl;
|
||||||
struct sk_buff *orig_skb = skb;
|
struct sk_buff *orig_skb = skb;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
|
@ -81,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
|
||||||
|
|
||||||
/* it's OK to use per_cpu_ptr() because BHs are off */
|
/* it's OK to use per_cpu_ptr() because BHs are off */
|
||||||
pcpu_lstats = dev->ml_priv;
|
pcpu_lstats = dev->ml_priv;
|
||||||
lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
|
lb_stats = this_cpu_ptr(pcpu_lstats);
|
||||||
|
|
||||||
len = skb->len;
|
len = skb->len;
|
||||||
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
|
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
|
||||||
|
|
|
@ -153,15 +153,14 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||||
struct net_device *rcv = NULL;
|
struct net_device *rcv = NULL;
|
||||||
struct veth_priv *priv, *rcv_priv;
|
struct veth_priv *priv, *rcv_priv;
|
||||||
struct veth_net_stats *stats, *rcv_stats;
|
struct veth_net_stats *stats, *rcv_stats;
|
||||||
int length, cpu;
|
int length;
|
||||||
|
|
||||||
priv = netdev_priv(dev);
|
priv = netdev_priv(dev);
|
||||||
rcv = priv->peer;
|
rcv = priv->peer;
|
||||||
rcv_priv = netdev_priv(rcv);
|
rcv_priv = netdev_priv(rcv);
|
||||||
|
|
||||||
cpu = smp_processor_id();
|
stats = this_cpu_ptr(priv->stats);
|
||||||
stats = per_cpu_ptr(priv->stats, cpu);
|
rcv_stats = this_cpu_ptr(rcv_priv->stats);
|
||||||
rcv_stats = per_cpu_ptr(rcv_priv->stats, cpu);
|
|
||||||
|
|
||||||
if (!(rcv->flags & IFF_UP))
|
if (!(rcv->flags & IFF_UP))
|
||||||
goto tx_drop;
|
goto tx_drop;
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
*/
|
*/
|
||||||
static struct ring_buffer *op_ring_buffer_read;
|
static struct ring_buffer *op_ring_buffer_read;
|
||||||
static struct ring_buffer *op_ring_buffer_write;
|
static struct ring_buffer *op_ring_buffer_write;
|
||||||
DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
|
||||||
|
|
||||||
static void wq_sync_buffer(struct work_struct *work);
|
static void wq_sync_buffer(struct work_struct *work);
|
||||||
|
|
||||||
|
@ -61,8 +61,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
|
||||||
|
|
||||||
void oprofile_cpu_buffer_inc_smpl_lost(void)
|
void oprofile_cpu_buffer_inc_smpl_lost(void)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
|
||||||
= &__get_cpu_var(cpu_buffer);
|
|
||||||
|
|
||||||
cpu_buf->sample_lost_overflow++;
|
cpu_buf->sample_lost_overflow++;
|
||||||
}
|
}
|
||||||
|
@ -95,7 +94,7 @@ int alloc_cpu_buffers(void)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
|
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
|
||||||
|
|
||||||
b->last_task = NULL;
|
b->last_task = NULL;
|
||||||
b->last_is_kernel = -1;
|
b->last_is_kernel = -1;
|
||||||
|
@ -122,7 +121,7 @@ void start_cpu_work(void)
|
||||||
work_enabled = 1;
|
work_enabled = 1;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
|
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Spread the work by 1 jiffy per cpu so they dont all
|
* Spread the work by 1 jiffy per cpu so they dont all
|
||||||
|
@ -139,7 +138,7 @@ void end_cpu_work(void)
|
||||||
work_enabled = 0;
|
work_enabled = 0;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
|
struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
|
||||||
|
|
||||||
cancel_delayed_work(&b->work);
|
cancel_delayed_work(&b->work);
|
||||||
}
|
}
|
||||||
|
@ -330,7 +329,7 @@ static inline void
|
||||||
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
|
__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
|
||||||
unsigned long event, int is_kernel)
|
unsigned long event, int is_kernel)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
|
||||||
unsigned long backtrace = oprofile_backtrace_depth;
|
unsigned long backtrace = oprofile_backtrace_depth;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -375,7 +374,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
|
||||||
{
|
{
|
||||||
struct op_sample *sample;
|
struct op_sample *sample;
|
||||||
int is_kernel = !user_mode(regs);
|
int is_kernel = !user_mode(regs);
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
|
||||||
|
|
||||||
cpu_buf->sample_received++;
|
cpu_buf->sample_received++;
|
||||||
|
|
||||||
|
@ -430,13 +429,13 @@ int oprofile_write_commit(struct op_entry *entry)
|
||||||
|
|
||||||
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
|
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
|
||||||
log_sample(cpu_buf, pc, 0, is_kernel, event);
|
log_sample(cpu_buf, pc, 0, is_kernel, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
void oprofile_add_trace(unsigned long pc)
|
void oprofile_add_trace(unsigned long pc)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
|
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
|
||||||
|
|
||||||
if (!cpu_buf->tracing)
|
if (!cpu_buf->tracing)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -50,7 +50,7 @@ struct oprofile_cpu_buffer {
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Resets the cpu buffer to a sane state.
|
* Resets the cpu buffer to a sane state.
|
||||||
|
@ -60,7 +60,7 @@ DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
|
||||||
*/
|
*/
|
||||||
static inline void op_cpu_buffer_reset(int cpu)
|
static inline void op_cpu_buffer_reset(int cpu)
|
||||||
{
|
{
|
||||||
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
|
struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu);
|
||||||
|
|
||||||
cpu_buf->last_is_kernel = -1;
|
cpu_buf->last_is_kernel = -1;
|
||||||
cpu_buf->last_task = NULL;
|
cpu_buf->last_task = NULL;
|
||||||
|
|
|
@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
cpu_buf = &per_cpu(cpu_buffer, i);
|
cpu_buf = &per_cpu(op_cpu_buffer, i);
|
||||||
cpu_buf->sample_received = 0;
|
cpu_buf->sample_received = 0;
|
||||||
cpu_buf->sample_lost_overflow = 0;
|
cpu_buf->sample_lost_overflow = 0;
|
||||||
cpu_buf->backtrace_aborted = 0;
|
cpu_buf->backtrace_aborted = 0;
|
||||||
|
@ -51,7 +51,7 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
cpu_buf = &per_cpu(cpu_buffer, i);
|
cpu_buf = &per_cpu(op_cpu_buffer, i);
|
||||||
snprintf(buf, 10, "cpu%d", i);
|
snprintf(buf, 10, "cpu%d", i);
|
||||||
cpudir = oprofilefs_mkdir(sb, dir, buf);
|
cpudir = oprofilefs_mkdir(sb, dir, buf);
|
||||||
|
|
||||||
|
|
|
@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
|
||||||
#define IUCV_DBF_TEXT_(name, level, text...) \
|
#define IUCV_DBF_TEXT_(name, level, text...) \
|
||||||
do { \
|
do { \
|
||||||
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
|
if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
|
||||||
char* iucv_dbf_txt_buf = \
|
char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
|
||||||
get_cpu_var(iucv_dbf_txt_buf); \
|
sprintf(__buf, text); \
|
||||||
sprintf(iucv_dbf_txt_buf, text); \
|
debug_text_event(iucv_dbf_##name, level, __buf); \
|
||||||
debug_text_event(iucv_dbf_##name, level, \
|
|
||||||
iucv_dbf_txt_buf); \
|
|
||||||
put_cpu_var(iucv_dbf_txt_buf); \
|
put_cpu_var(iucv_dbf_txt_buf); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -3955,7 +3955,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
|
||||||
* per cpu locality group is to reduce the contention between block
|
* per cpu locality group is to reduce the contention between block
|
||||||
* request from multiple CPUs.
|
* request from multiple CPUs.
|
||||||
*/
|
*/
|
||||||
ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
|
ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
|
||||||
|
|
||||||
/* we're going to use group allocation */
|
/* we're going to use group allocation */
|
||||||
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
|
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
|
||||||
|
|
|
@ -25,13 +25,7 @@ struct nfs_iostats {
|
||||||
static inline void nfs_inc_server_stats(const struct nfs_server *server,
|
static inline void nfs_inc_server_stats(const struct nfs_server *server,
|
||||||
enum nfs_stat_eventcounters stat)
|
enum nfs_stat_eventcounters stat)
|
||||||
{
|
{
|
||||||
struct nfs_iostats *iostats;
|
this_cpu_inc(server->io_stats->events[stat]);
|
||||||
int cpu;
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
iostats = per_cpu_ptr(server->io_stats, cpu);
|
|
||||||
iostats->events[stat]++;
|
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nfs_inc_stats(const struct inode *inode,
|
static inline void nfs_inc_stats(const struct inode *inode,
|
||||||
|
@ -44,13 +38,7 @@ static inline void nfs_add_server_stats(const struct nfs_server *server,
|
||||||
enum nfs_stat_bytecounters stat,
|
enum nfs_stat_bytecounters stat,
|
||||||
unsigned long addend)
|
unsigned long addend)
|
||||||
{
|
{
|
||||||
struct nfs_iostats *iostats;
|
this_cpu_add(server->io_stats->bytes[stat], addend);
|
||||||
int cpu;
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
iostats = per_cpu_ptr(server->io_stats, cpu);
|
|
||||||
iostats->bytes[stat] += addend;
|
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void nfs_add_stats(const struct inode *inode,
|
static inline void nfs_add_stats(const struct inode *inode,
|
||||||
|
@ -65,13 +53,7 @@ static inline void nfs_add_fscache_stats(struct inode *inode,
|
||||||
enum nfs_stat_fscachecounters stat,
|
enum nfs_stat_fscachecounters stat,
|
||||||
unsigned long addend)
|
unsigned long addend)
|
||||||
{
|
{
|
||||||
struct nfs_iostats *iostats;
|
this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
|
||||||
int cpu;
|
|
||||||
|
|
||||||
cpu = get_cpu();
|
|
||||||
iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
|
|
||||||
iostats->fscache[stat] += addend;
|
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -2389,12 +2389,12 @@ xfs_icsb_modify_counters(
|
||||||
{
|
{
|
||||||
xfs_icsb_cnts_t *icsbp;
|
xfs_icsb_cnts_t *icsbp;
|
||||||
long long lcounter; /* long counter for 64 bit fields */
|
long long lcounter; /* long counter for 64 bit fields */
|
||||||
int cpu, ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
again:
|
again:
|
||||||
cpu = get_cpu();
|
preempt_disable();
|
||||||
icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu);
|
icsbp = this_cpu_ptr(mp->m_sb_cnts);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the counter is disabled, go to slow path
|
* if the counter is disabled, go to slow path
|
||||||
|
@ -2438,11 +2438,11 @@ again:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
xfs_icsb_unlock_cntr(icsbp);
|
xfs_icsb_unlock_cntr(icsbp);
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
slow_path:
|
slow_path:
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* serialise with a mutex so we don't burn lots of cpu on
|
* serialise with a mutex so we don't burn lots of cpu on
|
||||||
|
@ -2490,7 +2490,7 @@ slow_path:
|
||||||
|
|
||||||
balance_counter:
|
balance_counter:
|
||||||
xfs_icsb_unlock_cntr(icsbp);
|
xfs_icsb_unlock_cntr(icsbp);
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We may have multiple threads here if multiple per-cpu
|
* We may have multiple threads here if multiple per-cpu
|
||||||
|
|
|
@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
||||||
#define __raw_get_cpu_var(var) \
|
#define __raw_get_cpu_var(var) \
|
||||||
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
|
(*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
|
||||||
|
|
||||||
|
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
|
||||||
|
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
||||||
extern void setup_per_cpu_areas(void);
|
extern void setup_per_cpu_areas(void);
|
||||||
|
@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void);
|
||||||
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
|
||||||
#define __get_cpu_var(var) per_cpu_var(var)
|
#define __get_cpu_var(var) per_cpu_var(var)
|
||||||
#define __raw_get_cpu_var(var) per_cpu_var(var)
|
#define __raw_get_cpu_var(var) per_cpu_var(var)
|
||||||
|
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
|
||||||
|
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
|
||||||
|
|
||||||
#endif /* SMP */
|
#endif /* SMP */
|
||||||
|
|
||||||
|
|
|
@ -60,6 +60,7 @@
|
||||||
|
|
||||||
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
|
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
|
||||||
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
|
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
|
||||||
|
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
||||||
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
|
||||||
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
|
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
|
||||||
__typeof__(type) per_cpu__##name
|
__typeof__(type) per_cpu__##name
|
||||||
|
|
|
@ -34,8 +34,6 @@
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
|
||||||
|
|
||||||
/* minimum unit size, also is the maximum supported allocation size */
|
/* minimum unit size, also is the maximum supported allocation size */
|
||||||
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
|
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
|
||||||
|
|
||||||
|
@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
|
||||||
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
||||||
|
|
||||||
extern void *__alloc_reserved_percpu(size_t size, size_t align);
|
extern void *__alloc_reserved_percpu(size_t size, size_t align);
|
||||||
|
|
||||||
#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
|
||||||
|
|
||||||
struct percpu_data {
|
|
||||||
void *ptrs[1];
|
|
||||||
};
|
|
||||||
|
|
||||||
/* pointer disguising messes up the kmemleak objects tracking */
|
|
||||||
#ifndef CONFIG_DEBUG_KMEMLEAK
|
|
||||||
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
|
|
||||||
#else
|
|
||||||
#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define per_cpu_ptr(ptr, cpu) \
|
|
||||||
({ \
|
|
||||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
|
||||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
|
||||||
})
|
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
|
||||||
|
|
||||||
extern void *__alloc_percpu(size_t size, size_t align);
|
extern void *__alloc_percpu(size_t size, size_t align);
|
||||||
extern void free_percpu(void *__pdata);
|
extern void free_percpu(void *__pdata);
|
||||||
|
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
||||||
extern void __init setup_per_cpu_areas(void);
|
extern void __init setup_per_cpu_areas(void);
|
||||||
|
@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
|
||||||
kfree(p);
|
kfree(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
|
||||||
|
{
|
||||||
|
return __pa(addr);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __init setup_per_cpu_areas(void) { }
|
static inline void __init setup_per_cpu_areas(void) { }
|
||||||
|
|
||||||
static inline void *pcpu_lpage_remapped(void *kaddr)
|
static inline void *pcpu_lpage_remapped(void *kaddr)
|
||||||
|
@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
|
#define alloc_percpu(type) \
|
||||||
__alignof__(type))
|
(typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optional methods for optimized non-lvalue per-cpu variable access.
|
* Optional methods for optimized non-lvalue per-cpu variable access.
|
||||||
|
@ -243,4 +225,404 @@ do { \
|
||||||
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
|
# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Branching function to split up a function into a set of functions that
|
||||||
|
* are called for different scalar sizes of the objects handled.
|
||||||
|
*/
|
||||||
|
|
||||||
|
extern void __bad_size_call_parameter(void);
|
||||||
|
|
||||||
|
#define __pcpu_size_call_return(stem, variable) \
|
||||||
|
({ typeof(variable) pscr_ret__; \
|
||||||
|
switch(sizeof(variable)) { \
|
||||||
|
case 1: pscr_ret__ = stem##1(variable);break; \
|
||||||
|
case 2: pscr_ret__ = stem##2(variable);break; \
|
||||||
|
case 4: pscr_ret__ = stem##4(variable);break; \
|
||||||
|
case 8: pscr_ret__ = stem##8(variable);break; \
|
||||||
|
default: \
|
||||||
|
__bad_size_call_parameter();break; \
|
||||||
|
} \
|
||||||
|
pscr_ret__; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define __pcpu_size_call(stem, variable, ...) \
|
||||||
|
do { \
|
||||||
|
switch(sizeof(variable)) { \
|
||||||
|
case 1: stem##1(variable, __VA_ARGS__);break; \
|
||||||
|
case 2: stem##2(variable, __VA_ARGS__);break; \
|
||||||
|
case 4: stem##4(variable, __VA_ARGS__);break; \
|
||||||
|
case 8: stem##8(variable, __VA_ARGS__);break; \
|
||||||
|
default: \
|
||||||
|
__bad_size_call_parameter();break; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Optimized manipulation for memory allocated through the per cpu
|
||||||
|
* allocator or for addresses of per cpu variables (can be determined
|
||||||
|
* using per_cpu_var(xx).
|
||||||
|
*
|
||||||
|
* These operation guarantee exclusivity of access for other operations
|
||||||
|
* on the *same* processor. The assumption is that per cpu data is only
|
||||||
|
* accessed by a single processor instance (the current one).
|
||||||
|
*
|
||||||
|
* The first group is used for accesses that must be done in a
|
||||||
|
* preemption safe way since we know that the context is not preempt
|
||||||
|
* safe. Interrupts may occur. If the interrupt modifies the variable
|
||||||
|
* too then RMW actions will not be reliable.
|
||||||
|
*
|
||||||
|
* The arch code can provide optimized functions in two ways:
|
||||||
|
*
|
||||||
|
* 1. Override the function completely. F.e. define this_cpu_add().
|
||||||
|
* The arch must then ensure that the various scalar format passed
|
||||||
|
* are handled correctly.
|
||||||
|
*
|
||||||
|
* 2. Provide functions for certain scalar sizes. F.e. provide
|
||||||
|
* this_cpu_add_2() to provide per cpu atomic operations for 2 byte
|
||||||
|
* sized RMW actions. If arch code does not provide operations for
|
||||||
|
* a scalar size then the fallback in the generic code will be
|
||||||
|
* used.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define _this_cpu_generic_read(pcp) \
|
||||||
|
({ typeof(pcp) ret__; \
|
||||||
|
preempt_disable(); \
|
||||||
|
ret__ = *this_cpu_ptr(&(pcp)); \
|
||||||
|
preempt_enable(); \
|
||||||
|
ret__; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#ifndef this_cpu_read
|
||||||
|
# ifndef this_cpu_read_1
|
||||||
|
# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_read_2
|
||||||
|
# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_read_4
|
||||||
|
# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_read_8
|
||||||
|
# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
|
||||||
|
# endif
|
||||||
|
# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define _this_cpu_generic_to_op(pcp, val, op) \
|
||||||
|
do { \
|
||||||
|
preempt_disable(); \
|
||||||
|
*__this_cpu_ptr(&pcp) op val; \
|
||||||
|
preempt_enable(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifndef this_cpu_write
|
||||||
|
# ifndef this_cpu_write_1
|
||||||
|
# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_write_2
|
||||||
|
# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_write_4
|
||||||
|
# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_write_8
|
||||||
|
# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_add
|
||||||
|
# ifndef this_cpu_add_1
|
||||||
|
# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_add_2
|
||||||
|
# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_add_4
|
||||||
|
# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_add_8
|
||||||
|
# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_sub
|
||||||
|
# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_inc
|
||||||
|
# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_dec
|
||||||
|
# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_and
|
||||||
|
# ifndef this_cpu_and_1
|
||||||
|
# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_and_2
|
||||||
|
# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_and_4
|
||||||
|
# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_and_8
|
||||||
|
# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_or
|
||||||
|
# ifndef this_cpu_or_1
|
||||||
|
# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_or_2
|
||||||
|
# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_or_4
|
||||||
|
# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_or_8
|
||||||
|
# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef this_cpu_xor
|
||||||
|
# ifndef this_cpu_xor_1
|
||||||
|
# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_xor_2
|
||||||
|
# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_xor_4
|
||||||
|
# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef this_cpu_xor_8
|
||||||
|
# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Generic percpu operations that do not require preemption handling.
|
||||||
|
* Either we do not care about races or the caller has the
|
||||||
|
* responsibility of handling preemptions issues. Arch code can still
|
||||||
|
* override these instructions since the arch per cpu code may be more
|
||||||
|
* efficient and may actually get race freeness for free (that is the
|
||||||
|
* case for x86 for example).
|
||||||
|
*
|
||||||
|
* If there is no other protection through preempt disable and/or
|
||||||
|
* disabling interupts then one of these RMW operations can show unexpected
|
||||||
|
* behavior because the execution thread was rescheduled on another processor
|
||||||
|
* or an interrupt occurred and the same percpu variable was modified from
|
||||||
|
* the interrupt context.
|
||||||
|
*/
|
||||||
|
#ifndef __this_cpu_read
|
||||||
|
# ifndef __this_cpu_read_1
|
||||||
|
# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_read_2
|
||||||
|
# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_read_4
|
||||||
|
# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_read_8
|
||||||
|
# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
|
||||||
|
# endif
|
||||||
|
# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define __this_cpu_generic_to_op(pcp, val, op) \
|
||||||
|
do { \
|
||||||
|
*__this_cpu_ptr(&(pcp)) op val; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifndef __this_cpu_write
|
||||||
|
# ifndef __this_cpu_write_1
|
||||||
|
# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_write_2
|
||||||
|
# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_write_4
|
||||||
|
# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_write_8
|
||||||
|
# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
|
||||||
|
# endif
|
||||||
|
# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_add
|
||||||
|
# ifndef __this_cpu_add_1
|
||||||
|
# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_add_2
|
||||||
|
# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_add_4
|
||||||
|
# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_add_8
|
||||||
|
# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_sub
|
||||||
|
# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_inc
|
||||||
|
# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_dec
|
||||||
|
# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_and
|
||||||
|
# ifndef __this_cpu_and_1
|
||||||
|
# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_and_2
|
||||||
|
# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_and_4
|
||||||
|
# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_and_8
|
||||||
|
# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_or
|
||||||
|
# ifndef __this_cpu_or_1
|
||||||
|
# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_or_2
|
||||||
|
# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_or_4
|
||||||
|
# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_or_8
|
||||||
|
# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef __this_cpu_xor
|
||||||
|
# ifndef __this_cpu_xor_1
|
||||||
|
# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_xor_2
|
||||||
|
# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_xor_4
|
||||||
|
# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef __this_cpu_xor_8
|
||||||
|
# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IRQ safe versions of the per cpu RMW operations. Note that these operations
|
||||||
|
* are *not* safe against modification of the same variable from another
|
||||||
|
* processors (which one gets when using regular atomic operations)
|
||||||
|
. They are guaranteed to be atomic vs. local interrupts and
|
||||||
|
* preemption only.
|
||||||
|
*/
|
||||||
|
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
|
||||||
|
do { \
|
||||||
|
unsigned long flags; \
|
||||||
|
local_irq_save(flags); \
|
||||||
|
*__this_cpu_ptr(&(pcp)) op val; \
|
||||||
|
local_irq_restore(flags); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_add
|
||||||
|
# ifndef irqsafe_cpu_add_1
|
||||||
|
# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_add_2
|
||||||
|
# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_add_4
|
||||||
|
# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_add_8
|
||||||
|
# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
|
||||||
|
# endif
|
||||||
|
# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_sub
|
||||||
|
# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_inc
|
||||||
|
# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_dec
|
||||||
|
# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_and
|
||||||
|
# ifndef irqsafe_cpu_and_1
|
||||||
|
# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_and_2
|
||||||
|
# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_and_4
|
||||||
|
# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_and_8
|
||||||
|
# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
|
||||||
|
# endif
|
||||||
|
# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_or
|
||||||
|
# ifndef irqsafe_cpu_or_1
|
||||||
|
# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_or_2
|
||||||
|
# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_or_4
|
||||||
|
# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_or_8
|
||||||
|
# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
|
||||||
|
# endif
|
||||||
|
# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef irqsafe_cpu_xor
|
||||||
|
# ifndef irqsafe_cpu_xor_1
|
||||||
|
# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_xor_2
|
||||||
|
# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_xor_4
|
||||||
|
# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# ifndef irqsafe_cpu_xor_8
|
||||||
|
# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
|
||||||
|
# endif
|
||||||
|
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __LINUX_PERCPU_H */
|
#endif /* __LINUX_PERCPU_H */
|
||||||
|
|
|
@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
||||||
|
|
||||||
static inline void __count_vm_event(enum vm_event_item item)
|
static inline void __count_vm_event(enum vm_event_item item)
|
||||||
{
|
{
|
||||||
__get_cpu_var(vm_event_states).event[item]++;
|
__this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void count_vm_event(enum vm_event_item item)
|
static inline void count_vm_event(enum vm_event_item item)
|
||||||
{
|
{
|
||||||
get_cpu_var(vm_event_states).event[item]++;
|
this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
||||||
{
|
{
|
||||||
__get_cpu_var(vm_event_states).event[item] += delta;
|
__this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void count_vm_events(enum vm_event_item item, long delta)
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
||||||
{
|
{
|
||||||
get_cpu_var(vm_event_states).event[item] += delta;
|
this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
|
||||||
put_cpu();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void all_vm_events(unsigned long *);
|
extern void all_vm_events(unsigned long *);
|
||||||
|
|
|
@ -88,12 +88,7 @@ struct neigh_statistics {
|
||||||
unsigned long unres_discards; /* number of unresolved drops */
|
unsigned long unres_discards; /* number of unresolved drops */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define NEIGH_CACHE_STAT_INC(tbl, field) \
|
#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
|
||||||
do { \
|
|
||||||
preempt_disable(); \
|
|
||||||
(per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \
|
|
||||||
preempt_enable(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
struct neighbour {
|
struct neighbour {
|
||||||
struct neighbour *next;
|
struct neighbour *next;
|
||||||
|
|
|
@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size;
|
||||||
extern unsigned int nf_conntrack_max;
|
extern unsigned int nf_conntrack_max;
|
||||||
|
|
||||||
#define NF_CT_STAT_INC(net, count) \
|
#define NF_CT_STAT_INC(net, count) \
|
||||||
(per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++)
|
__this_cpu_inc((net)->ct.stat->count)
|
||||||
#define NF_CT_STAT_INC_ATOMIC(net, count) \
|
#define NF_CT_STAT_INC_ATOMIC(net, count) \
|
||||||
do { \
|
do { \
|
||||||
local_bh_disable(); \
|
local_bh_disable(); \
|
||||||
per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \
|
__this_cpu_inc((net)->ct.stat->count); \
|
||||||
local_bh_enable(); \
|
local_bh_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
|
|
@ -137,44 +137,30 @@ struct linux_xfrm_mib {
|
||||||
#define SNMP_STAT_USRPTR(name) (name[1])
|
#define SNMP_STAT_USRPTR(name) (name[1])
|
||||||
|
|
||||||
#define SNMP_INC_STATS_BH(mib, field) \
|
#define SNMP_INC_STATS_BH(mib, field) \
|
||||||
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
|
__this_cpu_inc(mib[0]->mibs[field])
|
||||||
#define SNMP_INC_STATS_USER(mib, field) \
|
#define SNMP_INC_STATS_USER(mib, field) \
|
||||||
do { \
|
this_cpu_inc(mib[1]->mibs[field])
|
||||||
per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \
|
|
||||||
put_cpu(); \
|
|
||||||
} while (0)
|
|
||||||
#define SNMP_INC_STATS(mib, field) \
|
#define SNMP_INC_STATS(mib, field) \
|
||||||
do { \
|
this_cpu_inc(mib[!in_softirq()]->mibs[field])
|
||||||
per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \
|
|
||||||
put_cpu(); \
|
|
||||||
} while (0)
|
|
||||||
#define SNMP_DEC_STATS(mib, field) \
|
#define SNMP_DEC_STATS(mib, field) \
|
||||||
do { \
|
this_cpu_dec(mib[!in_softirq()]->mibs[field])
|
||||||
per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \
|
|
||||||
put_cpu(); \
|
|
||||||
} while (0)
|
|
||||||
#define SNMP_ADD_STATS(mib, field, addend) \
|
|
||||||
do { \
|
|
||||||
per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \
|
|
||||||
put_cpu(); \
|
|
||||||
} while (0)
|
|
||||||
#define SNMP_ADD_STATS_BH(mib, field, addend) \
|
#define SNMP_ADD_STATS_BH(mib, field, addend) \
|
||||||
(per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
|
__this_cpu_add(mib[0]->mibs[field], addend)
|
||||||
#define SNMP_ADD_STATS_USER(mib, field, addend) \
|
#define SNMP_ADD_STATS_USER(mib, field, addend) \
|
||||||
do { \
|
this_cpu_add(mib[1]->mibs[field], addend)
|
||||||
per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \
|
|
||||||
put_cpu(); \
|
|
||||||
} while (0)
|
|
||||||
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
|
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \
|
||||||
do { \
|
do { \
|
||||||
__typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\
|
__typeof__(mib[0]) ptr; \
|
||||||
|
preempt_disable(); \
|
||||||
|
ptr = this_cpu_ptr((mib)[!in_softirq()]); \
|
||||||
ptr->mibs[basefield##PKTS]++; \
|
ptr->mibs[basefield##PKTS]++; \
|
||||||
ptr->mibs[basefield##OCTETS] += addend;\
|
ptr->mibs[basefield##OCTETS] += addend;\
|
||||||
put_cpu(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
|
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \
|
||||||
do { \
|
do { \
|
||||||
__typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\
|
__typeof__(mib[0]) ptr = \
|
||||||
|
__this_cpu_ptr((mib)[!in_softirq()]); \
|
||||||
ptr->mibs[basefield##PKTS]++; \
|
ptr->mibs[basefield##PKTS]++; \
|
||||||
ptr->mibs[basefield##OCTETS] += addend;\
|
ptr->mibs[basefield##OCTETS] += addend;\
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_LOCK_STAT
|
#ifdef CONFIG_LOCK_STAT
|
||||||
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
|
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
|
||||||
|
cpu_lock_stats);
|
||||||
|
|
||||||
static inline u64 lockstat_clock(void)
|
static inline u64 lockstat_clock(void)
|
||||||
{
|
{
|
||||||
|
@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
|
||||||
memset(&stats, 0, sizeof(struct lock_class_stats));
|
memset(&stats, 0, sizeof(struct lock_class_stats));
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct lock_class_stats *pcs =
|
struct lock_class_stats *pcs =
|
||||||
&per_cpu(lock_stats, cpu)[class - lock_classes];
|
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
|
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
|
||||||
stats.contention_point[i] += pcs->contention_point[i];
|
stats.contention_point[i] += pcs->contention_point[i];
|
||||||
|
@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct lock_class_stats *cpu_stats =
|
struct lock_class_stats *cpu_stats =
|
||||||
&per_cpu(lock_stats, cpu)[class - lock_classes];
|
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
|
||||||
|
|
||||||
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
|
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
|
||||||
}
|
}
|
||||||
|
@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class)
|
||||||
|
|
||||||
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
|
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
|
||||||
{
|
{
|
||||||
return &get_cpu_var(lock_stats)[class - lock_classes];
|
return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_lock_stats(struct lock_class_stats *stats)
|
static void put_lock_stats(struct lock_class_stats *stats)
|
||||||
{
|
{
|
||||||
put_cpu_var(lock_stats);
|
put_cpu_var(cpu_lock_stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void lock_release_holdtime(struct held_lock *hlock)
|
static void lock_release_holdtime(struct held_lock *hlock)
|
||||||
|
|
150
kernel/module.c
150
kernel/module.c
|
@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
|
||||||
|
|
||||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
|
@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
|
||||||
free_percpu(freeme);
|
free_percpu(freeme);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
|
||||||
|
|
||||||
/* Number of blocks used and allocated. */
|
|
||||||
static unsigned int pcpu_num_used, pcpu_num_allocated;
|
|
||||||
/* Size of each block. -ve means used. */
|
|
||||||
static int *pcpu_size;
|
|
||||||
|
|
||||||
static int split_block(unsigned int i, unsigned short size)
|
|
||||||
{
|
|
||||||
/* Reallocation required? */
|
|
||||||
if (pcpu_num_used + 1 > pcpu_num_allocated) {
|
|
||||||
int *new;
|
|
||||||
|
|
||||||
new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!new)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
pcpu_num_allocated *= 2;
|
|
||||||
pcpu_size = new;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Insert a new subblock */
|
|
||||||
memmove(&pcpu_size[i+1], &pcpu_size[i],
|
|
||||||
sizeof(pcpu_size[0]) * (pcpu_num_used - i));
|
|
||||||
pcpu_num_used++;
|
|
||||||
|
|
||||||
pcpu_size[i+1] -= size;
|
|
||||||
pcpu_size[i] = size;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline unsigned int block_size(int val)
|
|
||||||
{
|
|
||||||
if (val < 0)
|
|
||||||
return -val;
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
|
||||||
const char *name)
|
|
||||||
{
|
|
||||||
unsigned long extra;
|
|
||||||
unsigned int i;
|
|
||||||
void *ptr;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
if (align > PAGE_SIZE) {
|
|
||||||
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
|
|
||||||
name, align, PAGE_SIZE);
|
|
||||||
align = PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
ptr = __per_cpu_start;
|
|
||||||
for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
|
|
||||||
/* Extra for alignment requirement. */
|
|
||||||
extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
|
|
||||||
BUG_ON(i == 0 && extra != 0);
|
|
||||||
|
|
||||||
if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Transfer extra to previous block. */
|
|
||||||
if (pcpu_size[i-1] < 0)
|
|
||||||
pcpu_size[i-1] -= extra;
|
|
||||||
else
|
|
||||||
pcpu_size[i-1] += extra;
|
|
||||||
pcpu_size[i] -= extra;
|
|
||||||
ptr += extra;
|
|
||||||
|
|
||||||
/* Split block if warranted */
|
|
||||||
if (pcpu_size[i] - size > sizeof(unsigned long))
|
|
||||||
if (!split_block(i, size))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* add the per-cpu scanning areas */
|
|
||||||
for_each_possible_cpu(cpu)
|
|
||||||
kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
|
|
||||||
GFP_KERNEL);
|
|
||||||
|
|
||||||
/* Mark allocated */
|
|
||||||
pcpu_size[i] = -pcpu_size[i];
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
|
|
||||||
size);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void percpu_modfree(void *freeme)
|
|
||||||
{
|
|
||||||
unsigned int i;
|
|
||||||
void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
/* First entry is core kernel percpu data. */
|
|
||||||
for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
|
|
||||||
if (ptr == freeme) {
|
|
||||||
pcpu_size[i] = -pcpu_size[i];
|
|
||||||
goto free;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BUG();
|
|
||||||
|
|
||||||
free:
|
|
||||||
/* remove the per-cpu scanning areas */
|
|
||||||
for_each_possible_cpu(cpu)
|
|
||||||
kmemleak_free(freeme + per_cpu_offset(cpu));
|
|
||||||
|
|
||||||
/* Merge with previous? */
|
|
||||||
if (pcpu_size[i-1] >= 0) {
|
|
||||||
pcpu_size[i-1] += pcpu_size[i];
|
|
||||||
pcpu_num_used--;
|
|
||||||
memmove(&pcpu_size[i], &pcpu_size[i+1],
|
|
||||||
(pcpu_num_used - i) * sizeof(pcpu_size[0]));
|
|
||||||
i--;
|
|
||||||
}
|
|
||||||
/* Merge with next? */
|
|
||||||
if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
|
|
||||||
pcpu_size[i] += pcpu_size[i+1];
|
|
||||||
pcpu_num_used--;
|
|
||||||
memmove(&pcpu_size[i+1], &pcpu_size[i+2],
|
|
||||||
(pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int percpu_modinit(void)
|
|
||||||
{
|
|
||||||
pcpu_num_used = 2;
|
|
||||||
pcpu_num_allocated = 2;
|
|
||||||
pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
|
|
||||||
GFP_KERNEL);
|
|
||||||
/* Static in-kernel percpu data (used). */
|
|
||||||
pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
|
|
||||||
/* Free room. */
|
|
||||||
pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
|
|
||||||
if (pcpu_size[1] < 0) {
|
|
||||||
printk(KERN_ERR "No per-cpu room for modules.\n");
|
|
||||||
pcpu_num_used = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
__initcall(percpu_modinit);
|
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
|
||||||
|
|
||||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||||
Elf_Shdr *sechdrs,
|
Elf_Shdr *sechdrs,
|
||||||
const char *secstrings)
|
const char *secstrings)
|
||||||
|
|
|
@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
++__get_cpu_var(rcu_torture_count)[pipe_count];
|
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
||||||
completed = cur_ops->completed() - completed;
|
completed = cur_ops->completed() - completed;
|
||||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
completed = RCU_TORTURE_PIPE_LEN;
|
completed = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
++__get_cpu_var(rcu_torture_batch)[completed];
|
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
cur_ops->readunlock(idx);
|
cur_ops->readunlock(idx);
|
||||||
}
|
}
|
||||||
|
@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
++__get_cpu_var(rcu_torture_count)[pipe_count];
|
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
||||||
completed = cur_ops->completed() - completed;
|
completed = cur_ops->completed() - completed;
|
||||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||||
/* Should not happen, but... */
|
/* Should not happen, but... */
|
||||||
completed = RCU_TORTURE_PIPE_LEN;
|
completed = RCU_TORTURE_PIPE_LEN;
|
||||||
}
|
}
|
||||||
++__get_cpu_var(rcu_torture_batch)[completed];
|
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
cur_ops->readunlock(idx);
|
cur_ops->readunlock(idx);
|
||||||
schedule();
|
schedule();
|
||||||
|
|
|
@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
|
||||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||||
#else /* !CONFIG_USER_SCHED */
|
#else /* !CONFIG_USER_SCHED */
|
||||||
#define root_task_group init_task_group
|
#define root_task_group init_task_group
|
||||||
|
@ -8286,14 +8286,14 @@ enum s_alloc {
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SCHED_SMT
|
#ifdef CONFIG_SCHED_SMT
|
||||||
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
|
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
|
||||||
static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
|
static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
|
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
|
||||||
struct sched_group **sg, struct cpumask *unused)
|
struct sched_group **sg, struct cpumask *unused)
|
||||||
{
|
{
|
||||||
if (sg)
|
if (sg)
|
||||||
*sg = &per_cpu(sched_group_cpus, cpu).sg;
|
*sg = &per_cpu(sched_groups, cpu).sg;
|
||||||
return cpu;
|
return cpu;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SCHED_SMT */
|
#endif /* CONFIG_SCHED_SMT */
|
||||||
|
@ -9583,7 +9583,7 @@ void __init sched_init(void)
|
||||||
#elif defined CONFIG_USER_SCHED
|
#elif defined CONFIG_USER_SCHED
|
||||||
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
|
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
|
||||||
init_tg_rt_entry(&init_task_group,
|
init_tg_rt_entry(&init_task_group,
|
||||||
&per_cpu(init_rt_rq, i),
|
&per_cpu(init_rt_rq_var, i),
|
||||||
&per_cpu(init_sched_rt_entity, i), i, 1,
|
&per_cpu(init_sched_rt_entity, i), i, 1,
|
||||||
root_task_group.rt_se[i]);
|
root_task_group.rt_se[i]);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -697,7 +697,7 @@ void __init softirq_init(void)
|
||||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ksoftirqd(void * __bind_cpu)
|
static int run_ksoftirqd(void * __bind_cpu)
|
||||||
{
|
{
|
||||||
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
case CPU_UP_PREPARE_FROZEN:
|
||||||
p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
|
p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
printk("ksoftirqd for %i failed\n", hotcpu);
|
printk("ksoftirqd for %i failed\n", hotcpu);
|
||||||
return NOTIFY_BAD;
|
return NOTIFY_BAD;
|
||||||
|
|
|
@ -22,9 +22,9 @@
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(print_lock);
|
static DEFINE_SPINLOCK(print_lock);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
|
static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
|
||||||
static DEFINE_PER_CPU(unsigned long, print_timestamp);
|
static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
|
||||||
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
|
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
|
||||||
|
|
||||||
static int __read_mostly did_panic;
|
static int __read_mostly did_panic;
|
||||||
int __read_mostly softlockup_thresh = 60;
|
int __read_mostly softlockup_thresh = 60;
|
||||||
|
@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
|
||||||
{
|
{
|
||||||
int this_cpu = raw_smp_processor_id();
|
int this_cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
__raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
|
__raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void touch_softlockup_watchdog(void)
|
void touch_softlockup_watchdog(void)
|
||||||
{
|
{
|
||||||
__raw_get_cpu_var(touch_timestamp) = 0;
|
__raw_get_cpu_var(softlockup_touch_ts) = 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
|
||||||
|
|
||||||
/* Cause each CPU to re-update its timestamp rather than complain */
|
/* Cause each CPU to re-update its timestamp rather than complain */
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
per_cpu(touch_timestamp, cpu) = 0;
|
per_cpu(softlockup_touch_ts, cpu) = 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
|
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
|
||||||
|
|
||||||
|
@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
|
||||||
void softlockup_tick(void)
|
void softlockup_tick(void)
|
||||||
{
|
{
|
||||||
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
||||||
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
|
unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
|
||||||
unsigned long print_timestamp;
|
unsigned long print_ts;
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
unsigned long now;
|
unsigned long now;
|
||||||
|
|
||||||
/* Is detection switched off? */
|
/* Is detection switched off? */
|
||||||
if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
|
if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
|
||||||
/* Be sure we don't false trigger if switched back on */
|
/* Be sure we don't false trigger if switched back on */
|
||||||
if (touch_timestamp)
|
if (touch_ts)
|
||||||
per_cpu(touch_timestamp, this_cpu) = 0;
|
per_cpu(softlockup_touch_ts, this_cpu) = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (touch_timestamp == 0) {
|
if (touch_ts == 0) {
|
||||||
__touch_softlockup_watchdog();
|
__touch_softlockup_watchdog();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
print_timestamp = per_cpu(print_timestamp, this_cpu);
|
print_ts = per_cpu(softlockup_print_ts, this_cpu);
|
||||||
|
|
||||||
/* report at most once a second */
|
/* report at most once a second */
|
||||||
if (print_timestamp == touch_timestamp || did_panic)
|
if (print_ts == touch_ts || did_panic)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* do not print during early bootup: */
|
/* do not print during early bootup: */
|
||||||
|
@ -140,18 +140,18 @@ void softlockup_tick(void)
|
||||||
* Wake up the high-prio watchdog task twice per
|
* Wake up the high-prio watchdog task twice per
|
||||||
* threshold timespan.
|
* threshold timespan.
|
||||||
*/
|
*/
|
||||||
if (now > touch_timestamp + softlockup_thresh/2)
|
if (now > touch_ts + softlockup_thresh/2)
|
||||||
wake_up_process(per_cpu(watchdog_task, this_cpu));
|
wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
|
||||||
|
|
||||||
/* Warn about unreasonable delays: */
|
/* Warn about unreasonable delays: */
|
||||||
if (now <= (touch_timestamp + softlockup_thresh))
|
if (now <= (touch_ts + softlockup_thresh))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
per_cpu(print_timestamp, this_cpu) = touch_timestamp;
|
per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
|
||||||
|
|
||||||
spin_lock(&print_lock);
|
spin_lock(&print_lock);
|
||||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
||||||
this_cpu, now - touch_timestamp,
|
this_cpu, now - touch_ts,
|
||||||
current->comm, task_pid_nr(current));
|
current->comm, task_pid_nr(current));
|
||||||
print_modules();
|
print_modules();
|
||||||
print_irqtrace_events(current);
|
print_irqtrace_events(current);
|
||||||
|
@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
case CPU_UP_PREPARE_FROZEN:
|
||||||
BUG_ON(per_cpu(watchdog_task, hotcpu));
|
BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
|
||||||
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
||||||
if (IS_ERR(p)) {
|
if (IS_ERR(p)) {
|
||||||
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
||||||
return NOTIFY_BAD;
|
return NOTIFY_BAD;
|
||||||
}
|
}
|
||||||
per_cpu(touch_timestamp, hotcpu) = 0;
|
per_cpu(softlockup_touch_ts, hotcpu) = 0;
|
||||||
per_cpu(watchdog_task, hotcpu) = p;
|
per_cpu(softlockup_watchdog, hotcpu) = p;
|
||||||
kthread_bind(p, hotcpu);
|
kthread_bind(p, hotcpu);
|
||||||
break;
|
break;
|
||||||
case CPU_ONLINE:
|
case CPU_ONLINE:
|
||||||
case CPU_ONLINE_FROZEN:
|
case CPU_ONLINE_FROZEN:
|
||||||
wake_up_process(per_cpu(watchdog_task, hotcpu));
|
wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
case CPU_UP_CANCELED:
|
case CPU_UP_CANCELED:
|
||||||
case CPU_UP_CANCELED_FROZEN:
|
case CPU_UP_CANCELED_FROZEN:
|
||||||
if (!per_cpu(watchdog_task, hotcpu))
|
if (!per_cpu(softlockup_watchdog, hotcpu))
|
||||||
break;
|
break;
|
||||||
/* Unbind so it can run. Fall thru. */
|
/* Unbind so it can run. Fall thru. */
|
||||||
kthread_bind(per_cpu(watchdog_task, hotcpu),
|
kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
|
||||||
cpumask_any(cpu_online_mask));
|
cpumask_any(cpu_online_mask));
|
||||||
case CPU_DEAD:
|
case CPU_DEAD:
|
||||||
case CPU_DEAD_FROZEN:
|
case CPU_DEAD_FROZEN:
|
||||||
p = per_cpu(watchdog_task, hotcpu);
|
p = per_cpu(softlockup_watchdog, hotcpu);
|
||||||
per_cpu(watchdog_task, hotcpu) = NULL;
|
per_cpu(softlockup_watchdog, hotcpu) = NULL;
|
||||||
kthread_stop(p);
|
kthread_stop(p);
|
||||||
break;
|
break;
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
#endif /* CONFIG_HOTPLUG_CPU */
|
||||||
|
|
|
@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
|
||||||
/*
|
/*
|
||||||
* Per-CPU lookup locks for fast hash lookup:
|
* Per-CPU lookup locks for fast hash lookup:
|
||||||
*/
|
*/
|
||||||
static DEFINE_PER_CPU(spinlock_t, lookup_lock);
|
static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Mutex to serialize state changes with show-stats activities:
|
* Mutex to serialize state changes with show-stats activities:
|
||||||
|
@ -245,7 +245,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
||||||
if (likely(!timer_stats_active))
|
if (likely(!timer_stats_active))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
lock = &per_cpu(lookup_lock, raw_smp_processor_id());
|
lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
|
||||||
|
|
||||||
input.timer = timer;
|
input.timer = timer;
|
||||||
input.start_func = startf;
|
input.start_func = startf;
|
||||||
|
@ -348,9 +348,10 @@ static void sync_access(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
|
spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
|
||||||
|
spin_lock_irqsave(lock, flags);
|
||||||
/* nothing */
|
/* nothing */
|
||||||
spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
|
spin_unlock_irqrestore(lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -408,7 +409,7 @@ void __init init_timer_stats(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
spin_lock_init(&per_cpu(lookup_lock, cpu));
|
spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init init_tstats_procfs(void)
|
static int __init init_tstats_procfs(void)
|
||||||
|
|
|
@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
|
||||||
*/
|
*/
|
||||||
static int tracing_disabled = 1;
|
static int tracing_disabled = 1;
|
||||||
|
|
||||||
DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
|
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
||||||
|
|
||||||
static inline void ftrace_disable_cpu(void)
|
static inline void ftrace_disable_cpu(void)
|
||||||
{
|
{
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
local_inc(&__get_cpu_var(ftrace_cpu_disabled));
|
__this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void ftrace_enable_cpu(void)
|
static inline void ftrace_enable_cpu(void)
|
||||||
{
|
{
|
||||||
local_dec(&__get_cpu_var(ftrace_cpu_disabled));
|
__this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
|
||||||
*/
|
*/
|
||||||
static struct trace_array max_tr;
|
static struct trace_array max_tr;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
|
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
|
||||||
|
|
||||||
/* tracer_enabled is used to toggle activation of a tracer */
|
/* tracer_enabled is used to toggle activation of a tracer */
|
||||||
static int tracer_enabled = 1;
|
static int tracer_enabled = 1;
|
||||||
|
@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
|
||||||
struct ftrace_entry *entry;
|
struct ftrace_entry *entry;
|
||||||
|
|
||||||
/* If we are reading the ring buffer, don't trace */
|
/* If we are reading the ring buffer, don't trace */
|
||||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
||||||
|
@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
|
||||||
/* Allocate the first page for all buffers */
|
/* Allocate the first page for all buffers */
|
||||||
for_each_tracing_cpu(i) {
|
for_each_tracing_cpu(i) {
|
||||||
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
|
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
|
||||||
max_tr.data[i] = &per_cpu(max_data, i);
|
max_tr.data[i] = &per_cpu(max_tr_data, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_init_cmdlines();
|
trace_init_cmdlines();
|
||||||
|
|
|
@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
|
||||||
|
|
||||||
extern int ring_buffer_expanded;
|
extern int ring_buffer_expanded;
|
||||||
extern bool tracing_selftest_disabled;
|
extern bool tracing_selftest_disabled;
|
||||||
DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
|
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
|
||||||
|
|
||||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||||
extern int trace_selftest_startup_function(struct tracer *trace,
|
extern int trace_selftest_startup_function(struct tracer *trace,
|
||||||
|
|
|
@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
|
||||||
struct ring_buffer *buffer = tr->buffer;
|
struct ring_buffer *buffer = tr->buffer;
|
||||||
struct ftrace_graph_ent_entry *entry;
|
struct ftrace_graph_ent_entry *entry;
|
||||||
|
|
||||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||||
|
@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
|
||||||
struct ring_buffer *buffer = tr->buffer;
|
struct ring_buffer *buffer = tr->buffer;
|
||||||
struct ftrace_graph_ret_entry *entry;
|
struct ftrace_graph_ret_entry *entry;
|
||||||
|
|
||||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||||
|
|
|
@ -20,10 +20,10 @@
|
||||||
|
|
||||||
#define BTS_BUFFER_SIZE (1 << 13)
|
#define BTS_BUFFER_SIZE (1 << 13)
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
|
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
|
||||||
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
|
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
|
||||||
|
|
||||||
#define this_tracer per_cpu(tracer, smp_processor_id())
|
#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
|
||||||
|
|
||||||
static int trace_hw_branches_enabled __read_mostly;
|
static int trace_hw_branches_enabled __read_mostly;
|
||||||
static int trace_hw_branches_suspended __read_mostly;
|
static int trace_hw_branches_suspended __read_mostly;
|
||||||
|
@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
|
||||||
|
|
||||||
static void bts_trace_init_cpu(int cpu)
|
static void bts_trace_init_cpu(int cpu)
|
||||||
{
|
{
|
||||||
per_cpu(tracer, cpu) =
|
per_cpu(hwb_tracer, cpu) =
|
||||||
ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
|
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
|
||||||
NULL, (size_t)-1, BTS_KERNEL);
|
BTS_BUFFER_SIZE, NULL, (size_t)-1,
|
||||||
|
BTS_KERNEL);
|
||||||
|
|
||||||
if (IS_ERR(per_cpu(tracer, cpu)))
|
if (IS_ERR(per_cpu(hwb_tracer, cpu)))
|
||||||
per_cpu(tracer, cpu) = NULL;
|
per_cpu(hwb_tracer, cpu) = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bts_trace_init(struct trace_array *tr)
|
static int bts_trace_init(struct trace_array *tr)
|
||||||
|
@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
bts_trace_init_cpu(cpu);
|
bts_trace_init_cpu(cpu);
|
||||||
|
|
||||||
if (likely(per_cpu(tracer, cpu)))
|
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||||
trace_hw_branches_enabled = 1;
|
trace_hw_branches_enabled = 1;
|
||||||
}
|
}
|
||||||
trace_hw_branches_suspended = 0;
|
trace_hw_branches_suspended = 0;
|
||||||
|
@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
if (likely(per_cpu(tracer, cpu))) {
|
if (likely(per_cpu(hwb_tracer, cpu))) {
|
||||||
ds_release_bts(per_cpu(tracer, cpu));
|
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
||||||
per_cpu(tracer, cpu) = NULL;
|
per_cpu(hwb_tracer, cpu) = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
trace_hw_branches_enabled = 0;
|
trace_hw_branches_enabled = 0;
|
||||||
|
@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
if (likely(per_cpu(tracer, cpu)))
|
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||||
ds_resume_bts(per_cpu(tracer, cpu));
|
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
||||||
trace_hw_branches_suspended = 0;
|
trace_hw_branches_suspended = 0;
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
if (likely(per_cpu(tracer, cpu)))
|
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||||
ds_suspend_bts(per_cpu(tracer, cpu));
|
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||||
trace_hw_branches_suspended = 1;
|
trace_hw_branches_suspended = 1;
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
||||||
bts_trace_init_cpu(cpu);
|
bts_trace_init_cpu(cpu);
|
||||||
|
|
||||||
if (trace_hw_branches_suspended &&
|
if (trace_hw_branches_suspended &&
|
||||||
likely(per_cpu(tracer, cpu)))
|
likely(per_cpu(hwb_tracer, cpu)))
|
||||||
ds_suspend_bts(per_cpu(tracer, cpu));
|
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
case CPU_DOWN_PREPARE:
|
||||||
/* The notification is sent with interrupts enabled. */
|
/* The notification is sent with interrupts enabled. */
|
||||||
if (likely(per_cpu(tracer, cpu))) {
|
if (likely(per_cpu(hwb_tracer, cpu))) {
|
||||||
ds_release_bts(per_cpu(tracer, cpu));
|
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
||||||
per_cpu(tracer, cpu) = NULL;
|
per_cpu(hwb_tracer, cpu) = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
if (likely(per_cpu(tracer, cpu)))
|
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||||
ds_suspend_bts(per_cpu(tracer, cpu));
|
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||||
/*
|
/*
|
||||||
* We need to collect the trace on the respective cpu since ftrace
|
* We need to collect the trace on the respective cpu since ftrace
|
||||||
* implicitly adds the record for the current cpu.
|
* implicitly adds the record for the current cpu.
|
||||||
|
@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
|
||||||
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
||||||
|
|
||||||
for_each_online_cpu(cpu)
|
for_each_online_cpu(cpu)
|
||||||
if (likely(per_cpu(tracer, cpu)))
|
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||||
ds_resume_bts(per_cpu(tracer, cpu));
|
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
|
||||||
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
|
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
|
||||||
obj-$(CONFIG_FS_XIP) += filemap_xip.o
|
obj-$(CONFIG_FS_XIP) += filemap_xip.o
|
||||||
obj-$(CONFIG_MIGRATION) += migrate.o
|
obj-$(CONFIG_MIGRATION) += migrate.o
|
||||||
ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
|
||||||
obj-$(CONFIG_SMP) += percpu.o
|
obj-$(CONFIG_SMP) += percpu.o
|
||||||
else
|
|
||||||
obj-$(CONFIG_SMP) += allocpercpu.o
|
|
||||||
endif
|
|
||||||
obj-$(CONFIG_QUICKLIST) += quicklist.o
|
obj-$(CONFIG_QUICKLIST) += quicklist.o
|
||||||
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
|
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
|
||||||
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
|
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
|
||||||
|
|
177
mm/allocpercpu.c
177
mm/allocpercpu.c
|
@ -1,177 +0,0 @@
|
||||||
/*
|
|
||||||
* linux/mm/allocpercpu.c
|
|
||||||
*
|
|
||||||
* Separated from slab.c August 11, 2006 Christoph Lameter
|
|
||||||
*/
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/bootmem.h>
|
|
||||||
#include <asm/sections.h>
|
|
||||||
|
|
||||||
#ifndef cache_line_size
|
|
||||||
#define cache_line_size() L1_CACHE_BYTES
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
|
||||||
* percpu_depopulate - depopulate per-cpu data for given cpu
|
|
||||||
* @__pdata: per-cpu data to depopulate
|
|
||||||
* @cpu: depopulate per-cpu data for this cpu
|
|
||||||
*
|
|
||||||
* Depopulating per-cpu data for a cpu going offline would be a typical
|
|
||||||
* use case. You need to register a cpu hotplug handler for that purpose.
|
|
||||||
*/
|
|
||||||
static void percpu_depopulate(void *__pdata, int cpu)
|
|
||||||
{
|
|
||||||
struct percpu_data *pdata = __percpu_disguise(__pdata);
|
|
||||||
|
|
||||||
kfree(pdata->ptrs[cpu]);
|
|
||||||
pdata->ptrs[cpu] = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* percpu_depopulate_mask - depopulate per-cpu data for some cpu's
|
|
||||||
* @__pdata: per-cpu data to depopulate
|
|
||||||
* @mask: depopulate per-cpu data for cpu's selected through mask bits
|
|
||||||
*/
|
|
||||||
static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
|
|
||||||
{
|
|
||||||
int cpu;
|
|
||||||
for_each_cpu_mask_nr(cpu, *mask)
|
|
||||||
percpu_depopulate(__pdata, cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define percpu_depopulate_mask(__pdata, mask) \
|
|
||||||
__percpu_depopulate_mask((__pdata), &(mask))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* percpu_populate - populate per-cpu data for given cpu
|
|
||||||
* @__pdata: per-cpu data to populate further
|
|
||||||
* @size: size of per-cpu object
|
|
||||||
* @gfp: may sleep or not etc.
|
|
||||||
* @cpu: populate per-data for this cpu
|
|
||||||
*
|
|
||||||
* Populating per-cpu data for a cpu coming online would be a typical
|
|
||||||
* use case. You need to register a cpu hotplug handler for that purpose.
|
|
||||||
* Per-cpu object is populated with zeroed buffer.
|
|
||||||
*/
|
|
||||||
static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
|
|
||||||
{
|
|
||||||
struct percpu_data *pdata = __percpu_disguise(__pdata);
|
|
||||||
int node = cpu_to_node(cpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* We should make sure each CPU gets private memory.
|
|
||||||
*/
|
|
||||||
size = roundup(size, cache_line_size());
|
|
||||||
|
|
||||||
BUG_ON(pdata->ptrs[cpu]);
|
|
||||||
if (node_online(node))
|
|
||||||
pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
|
|
||||||
else
|
|
||||||
pdata->ptrs[cpu] = kzalloc(size, gfp);
|
|
||||||
return pdata->ptrs[cpu];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* percpu_populate_mask - populate per-cpu data for more cpu's
|
|
||||||
* @__pdata: per-cpu data to populate further
|
|
||||||
* @size: size of per-cpu object
|
|
||||||
* @gfp: may sleep or not etc.
|
|
||||||
* @mask: populate per-cpu data for cpu's selected through mask bits
|
|
||||||
*
|
|
||||||
* Per-cpu objects are populated with zeroed buffers.
|
|
||||||
*/
|
|
||||||
static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
|
|
||||||
cpumask_t *mask)
|
|
||||||
{
|
|
||||||
cpumask_t populated;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
cpus_clear(populated);
|
|
||||||
for_each_cpu_mask_nr(cpu, *mask)
|
|
||||||
if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
|
|
||||||
__percpu_depopulate_mask(__pdata, &populated);
|
|
||||||
return -ENOMEM;
|
|
||||||
} else
|
|
||||||
cpu_set(cpu, populated);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define percpu_populate_mask(__pdata, size, gfp, mask) \
|
|
||||||
__percpu_populate_mask((__pdata), (size), (gfp), &(mask))
|
|
||||||
|
|
||||||
/**
|
|
||||||
* alloc_percpu - initial setup of per-cpu data
|
|
||||||
* @size: size of per-cpu object
|
|
||||||
* @align: alignment
|
|
||||||
*
|
|
||||||
* Allocate dynamic percpu area. Percpu objects are populated with
|
|
||||||
* zeroed buffers.
|
|
||||||
*/
|
|
||||||
void *__alloc_percpu(size_t size, size_t align)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* We allocate whole cache lines to avoid false sharing
|
|
||||||
*/
|
|
||||||
size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
|
|
||||||
void *pdata = kzalloc(sz, GFP_KERNEL);
|
|
||||||
void *__pdata = __percpu_disguise(pdata);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Can't easily make larger alignment work with kmalloc. WARN
|
|
||||||
* on it. Larger alignment should only be used for module
|
|
||||||
* percpu sections on SMP for which this path isn't used.
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(align > SMP_CACHE_BYTES);
|
|
||||||
|
|
||||||
if (unlikely(!pdata))
|
|
||||||
return NULL;
|
|
||||||
if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
|
|
||||||
&cpu_possible_map)))
|
|
||||||
return __pdata;
|
|
||||||
kfree(pdata);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__alloc_percpu);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* free_percpu - final cleanup of per-cpu data
|
|
||||||
* @__pdata: object to clean up
|
|
||||||
*
|
|
||||||
* We simply clean up any per-cpu object left. No need for the client to
|
|
||||||
* track and specify through a bis mask which per-cpu objects are to free.
|
|
||||||
*/
|
|
||||||
void free_percpu(void *__pdata)
|
|
||||||
{
|
|
||||||
if (unlikely(!__pdata))
|
|
||||||
return;
|
|
||||||
__percpu_depopulate_mask(__pdata, cpu_possible_mask);
|
|
||||||
kfree(__percpu_disguise(__pdata));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(free_percpu);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Generic percpu area setup.
|
|
||||||
*/
|
|
||||||
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
|
||||||
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
|
|
||||||
|
|
||||||
EXPORT_SYMBOL(__per_cpu_offset);
|
|
||||||
|
|
||||||
void __init setup_per_cpu_areas(void)
|
|
||||||
{
|
|
||||||
unsigned long size, i;
|
|
||||||
char *ptr;
|
|
||||||
unsigned long nr_possible_cpus = num_possible_cpus();
|
|
||||||
|
|
||||||
/* Copy section for each CPU (we discard the original) */
|
|
||||||
size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
|
|
||||||
ptr = alloc_bootmem_pages(size * nr_possible_cpus);
|
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
|
||||||
__per_cpu_offset[i] = ptr - __per_cpu_start;
|
|
||||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
|
||||||
ptr += size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
|
|
24
mm/percpu.c
24
mm/percpu.c
|
@ -46,8 +46,6 @@
|
||||||
*
|
*
|
||||||
* To use this allocator, arch code should do the followings.
|
* To use this allocator, arch code should do the followings.
|
||||||
*
|
*
|
||||||
* - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
|
||||||
*
|
|
||||||
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
|
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
|
||||||
* regular address to percpu pointer and back if they need to be
|
* regular address to percpu pointer and back if they need to be
|
||||||
* different from the default
|
* different from the default
|
||||||
|
@ -74,6 +72,7 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/io.h>
|
||||||
|
|
||||||
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
|
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
|
||||||
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
|
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
|
||||||
|
@ -1302,6 +1301,27 @@ void free_percpu(void *ptr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(free_percpu);
|
EXPORT_SYMBOL_GPL(free_percpu);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* per_cpu_ptr_to_phys - convert translated percpu address to physical address
|
||||||
|
* @addr: the address to be converted to physical address
|
||||||
|
*
|
||||||
|
* Given @addr which is dereferenceable address obtained via one of
|
||||||
|
* percpu access macros, this function translates it into its physical
|
||||||
|
* address. The caller is responsible for ensuring @addr stays valid
|
||||||
|
* until this function finishes.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* The physical address for @addr.
|
||||||
|
*/
|
||||||
|
phys_addr_t per_cpu_ptr_to_phys(void *addr)
|
||||||
|
{
|
||||||
|
if ((unsigned long)addr < VMALLOC_START ||
|
||||||
|
(unsigned long)addr >= VMALLOC_END)
|
||||||
|
return __pa(addr);
|
||||||
|
else
|
||||||
|
return page_to_phys(vmalloc_to_page(addr));
|
||||||
|
}
|
||||||
|
|
||||||
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
|
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
|
||||||
size_t reserved_size,
|
size_t reserved_size,
|
||||||
ssize_t *dyn_sizep)
|
ssize_t *dyn_sizep)
|
||||||
|
|
18
mm/slab.c
18
mm/slab.c
|
@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
|
||||||
static DEFINE_MUTEX(cache_chain_mutex);
|
static DEFINE_MUTEX(cache_chain_mutex);
|
||||||
static struct list_head cache_chain;
|
static struct list_head cache_chain;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct delayed_work, reap_work);
|
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
|
||||||
|
|
||||||
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
|
@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
|
||||||
* objects freed on different nodes from which they were allocated) and the
|
* objects freed on different nodes from which they were allocated) and the
|
||||||
* flushing of remote pcps by calling drain_node_pages.
|
* flushing of remote pcps by calling drain_node_pages.
|
||||||
*/
|
*/
|
||||||
static DEFINE_PER_CPU(unsigned long, reap_node);
|
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
|
||||||
|
|
||||||
static void init_reap_node(int cpu)
|
static void init_reap_node(int cpu)
|
||||||
{
|
{
|
||||||
|
@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
|
||||||
if (node == MAX_NUMNODES)
|
if (node == MAX_NUMNODES)
|
||||||
node = first_node(node_online_map);
|
node = first_node(node_online_map);
|
||||||
|
|
||||||
per_cpu(reap_node, cpu) = node;
|
per_cpu(slab_reap_node, cpu) = node;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void next_reap_node(void)
|
static void next_reap_node(void)
|
||||||
{
|
{
|
||||||
int node = __get_cpu_var(reap_node);
|
int node = __get_cpu_var(slab_reap_node);
|
||||||
|
|
||||||
node = next_node(node, node_online_map);
|
node = next_node(node, node_online_map);
|
||||||
if (unlikely(node >= MAX_NUMNODES))
|
if (unlikely(node >= MAX_NUMNODES))
|
||||||
node = first_node(node_online_map);
|
node = first_node(node_online_map);
|
||||||
__get_cpu_var(reap_node) = node;
|
__get_cpu_var(slab_reap_node) = node;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -875,7 +875,7 @@ static void next_reap_node(void)
|
||||||
*/
|
*/
|
||||||
static void __cpuinit start_cpu_timer(int cpu)
|
static void __cpuinit start_cpu_timer(int cpu)
|
||||||
{
|
{
|
||||||
struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
|
struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When this gets called from do_initcalls via cpucache_init(),
|
* When this gets called from do_initcalls via cpucache_init(),
|
||||||
|
@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
|
||||||
*/
|
*/
|
||||||
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
|
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
|
||||||
{
|
{
|
||||||
int node = __get_cpu_var(reap_node);
|
int node = __get_cpu_var(slab_reap_node);
|
||||||
|
|
||||||
if (l3->alien) {
|
if (l3->alien) {
|
||||||
struct array_cache *ac = l3->alien[node];
|
struct array_cache *ac = l3->alien[node];
|
||||||
|
@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
|
||||||
* anything expensive but will only modify reap_work
|
* anything expensive but will only modify reap_work
|
||||||
* and reschedule the timer.
|
* and reschedule the timer.
|
||||||
*/
|
*/
|
||||||
cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
|
cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
|
||||||
/* Now the cache_reaper is guaranteed to be not running. */
|
/* Now the cache_reaper is guaranteed to be not running. */
|
||||||
per_cpu(reap_work, cpu).work.func = NULL;
|
per_cpu(slab_reap_work, cpu).work.func = NULL;
|
||||||
break;
|
break;
|
||||||
case CPU_DOWN_FAILED:
|
case CPU_DOWN_FAILED:
|
||||||
case CPU_DOWN_FAILED_FROZEN:
|
case CPU_DOWN_FAILED_FROZEN:
|
||||||
|
|
|
@ -761,7 +761,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
|
||||||
spin_lock(&vbq->lock);
|
spin_lock(&vbq->lock);
|
||||||
list_add(&vb->free_list, &vbq->free);
|
list_add(&vb->free_list, &vbq->free);
|
||||||
spin_unlock(&vbq->lock);
|
spin_unlock(&vbq->lock);
|
||||||
put_cpu_var(vmap_cpu_blocks);
|
put_cpu_var(vmap_block_queue);
|
||||||
|
|
||||||
return vb;
|
return vb;
|
||||||
}
|
}
|
||||||
|
@ -826,7 +826,7 @@ again:
|
||||||
}
|
}
|
||||||
spin_unlock(&vb->lock);
|
spin_unlock(&vb->lock);
|
||||||
}
|
}
|
||||||
put_cpu_var(vmap_cpu_blocks);
|
put_cpu_var(vmap_block_queue);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
|
|
|
@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w)
|
||||||
|
|
||||||
static void __cpuinit start_cpu_timer(int cpu)
|
static void __cpuinit start_cpu_timer(int cpu)
|
||||||
{
|
{
|
||||||
struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
|
struct delayed_work *work = &per_cpu(vmstat_work, cpu);
|
||||||
|
|
||||||
INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
|
INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
|
||||||
schedule_delayed_work_on(cpu, vmstat_work,
|
schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
|
||||||
__round_jiffies_relative(HZ, cpu));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue