mirror of
https://github.com/adulau/aha.git
synced 2024-12-05 00:17:23 +00:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (34 commits) m68k: rename global variable vmalloc_end to m68k_vmalloc_end percpu: add missing per_cpu_ptr_to_phys() definition for UP percpu: Fix kdump failure if booted with percpu_alloc=page percpu: make misc percpu symbols unique percpu: make percpu symbols in ia64 unique percpu: make percpu symbols in powerpc unique percpu: make percpu symbols in x86 unique percpu: make percpu symbols in xen unique percpu: make percpu symbols in cpufreq unique percpu: make percpu symbols in oprofile unique percpu: make percpu symbols in tracer unique percpu: make percpu symbols under kernel/ and mm/ unique percpu: remove some sparse warnings percpu: make alloc_percpu() handle array types vmalloc: fix use of non-existent percpu variable in put_cpu_var() this_cpu: Use this_cpu_xx in trace_functions_graph.c this_cpu: Use this_cpu_xx for ftrace this_cpu: Use this_cpu_xx in nmi handling this_cpu: Use this_cpu operations in RCU this_cpu: Use this_cpu ops for VM statistics ... Fix up trivial (famous last words) global per-cpu naming conflicts in arch/x86/kvm/svm.c mm/slab.c
This commit is contained in:
commit
d0316554d3
79 changed files with 1222 additions and 978 deletions
|
@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
|
|||
bool
|
||||
default y
|
||||
|
||||
config HAVE_LEGACY_PER_CPU_AREA
|
||||
def_bool y
|
||||
|
||||
config HAVE_SETUP_PER_CPU_AREA
|
||||
def_bool y
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
|
|||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
|
||||
extern unsigned long vmalloc_end;
|
||||
extern unsigned long VMALLOC_END;
|
||||
extern struct page *vmem_map;
|
||||
extern int find_largest_hole(u64 start, u64 end, void *arg);
|
||||
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
|
||||
|
|
|
@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
|
|||
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
|
||||
# define VMALLOC_END vmalloc_end
|
||||
extern unsigned long vmalloc_end;
|
||||
extern unsigned long VMALLOC_END;
|
||||
#else
|
||||
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
|
||||
|
|
|
@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
|
|||
#endif
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
|
||||
DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
|
||||
|
||||
/*
|
||||
* The "local" data variable. It refers to the per-CPU data of the currently executing
|
||||
|
@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
|
|||
* Do not use the address of local_cpu_data, since it will be different from
|
||||
* cpu_data(smp_processor_id())!
|
||||
*/
|
||||
#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
|
||||
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
|
||||
#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
|
||||
#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
|
||||
|
||||
extern void print_cpu_info (struct cpuinfo_ia64 *);
|
||||
|
||||
|
|
|
@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
|
|||
printk(KERN_ERR PREFIX
|
||||
"Error parsing MADT - no LAPIC entries\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (available_cpus == 0) {
|
||||
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
|
||||
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
|
||||
smp_boot_data.cpu_phys_id[available_cpus] =
|
||||
hard_smp_processor_id();
|
||||
available_cpus = 1; /* We've got at least one of these, no? */
|
||||
}
|
||||
smp_boot_data.cpu_count = available_cpus;
|
||||
#endif
|
||||
/* Make boot-up look pretty */
|
||||
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
|
||||
total_cpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int __init acpi_boot_init(void)
|
||||
{
|
||||
|
||||
|
@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
|
|||
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
|
||||
printk(KERN_ERR PREFIX "Can't find FADT\n");
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
#ifdef CONFIG_SMP
|
||||
if (available_cpus == 0) {
|
||||
printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
|
||||
printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
|
||||
smp_boot_data.cpu_phys_id[available_cpus] =
|
||||
hard_smp_processor_id();
|
||||
available_cpus = 1; /* We've got at least one of these, no? */
|
||||
}
|
||||
smp_boot_data.cpu_count = available_cpus;
|
||||
|
||||
smp_build_cpu_map();
|
||||
# ifdef CONFIG_ACPI_NUMA
|
||||
if (srat_num_cpus == 0) {
|
||||
int cpu, i = 1;
|
||||
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
|
||||
|
@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
|
|||
node_cpuid[i++].phys_id =
|
||||
smp_boot_data.cpu_phys_id[cpu];
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
build_cpu_to_node_map();
|
||||
#endif
|
||||
/* Make boot-up look pretty */
|
||||
printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
|
||||
total_cpus);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
|
|||
* intermediate precision so that we can produce a full 64-bit result.
|
||||
*/
|
||||
GLOBAL_ENTRY(ia64_native_sched_clock)
|
||||
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
|
||||
;;
|
||||
ldf8 f8=[r8]
|
||||
|
@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
|
|||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
GLOBAL_ENTRY(cycle_to_cputime)
|
||||
alloc r16=ar.pfs,1,0,0,0
|
||||
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||
;;
|
||||
ldf8 f8=[r8]
|
||||
;;
|
||||
|
|
|
@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
|
|||
#endif
|
||||
|
||||
#include <asm/processor.h>
|
||||
EXPORT_SYMBOL(per_cpu__cpu_info);
|
||||
EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
|
||||
#ifdef CONFIG_SMP
|
||||
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
|
||||
#endif
|
||||
|
|
|
@ -59,7 +59,7 @@
|
|||
ia64_do_tlb_purge:
|
||||
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
||||
|
||||
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
|
||||
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
|
||||
;;
|
||||
addl r17=O(PTCE_STRIDE),r2
|
||||
addl r2=O(PTCE_BASE),r2
|
||||
|
|
|
@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
|
|||
|
||||
// purge all TC entries
|
||||
#define O(member) IA64_CPUINFO_##member##_OFFSET
|
||||
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
|
||||
GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
|
||||
;;
|
||||
addl r17=O(PTCE_STRIDE),r2
|
||||
addl r2=O(PTCE_BASE),r2
|
||||
|
|
|
@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
|
|||
EXPORT_SYMBOL(__per_cpu_offset);
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
|
||||
DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
|
||||
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
|
||||
unsigned long ia64_cycles_per_usec;
|
||||
struct ia64_boot_param *ia64_boot_param;
|
||||
|
@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
|
|||
early_acpi_boot_init();
|
||||
# ifdef CONFIG_ACPI_NUMA
|
||||
acpi_numa_init();
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
# ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
prefill_possible_map();
|
||||
#endif
|
||||
# endif
|
||||
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
|
||||
32 : cpus_weight(early_cpu_possible_map)),
|
||||
additional_cpus > 0 ? additional_cpus : 0);
|
||||
# endif
|
||||
#else
|
||||
# ifdef CONFIG_SMP
|
||||
smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
|
||||
# endif
|
||||
#endif /* CONFIG_APCI_BOOT */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
smp_build_cpu_map();
|
||||
#endif
|
||||
find_memory();
|
||||
|
||||
/* process SAL system table: */
|
||||
|
@ -855,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
|
|||
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
|
||||
}
|
||||
|
||||
/*
|
||||
* In UP configuration, setup_per_cpu_areas() is defined in
|
||||
* include/linux/percpu.h
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
void __init
|
||||
setup_per_cpu_areas (void)
|
||||
{
|
||||
/* start_kernel() requires this... */
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do the following calculations:
|
||||
*
|
||||
|
@ -980,7 +967,7 @@ cpu_init (void)
|
|||
* depends on the data returned by identify_cpu(). We break the dependency by
|
||||
* accessing cpu_data() through the canonical per-CPU address.
|
||||
*/
|
||||
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
|
||||
cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
|
||||
identify_cpu(cpu_info);
|
||||
|
||||
#ifdef CONFIG_MCKINLEY
|
||||
|
|
|
@ -166,6 +166,12 @@ SECTIONS
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||
__cpu0_per_cpu = .;
|
||||
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
||||
#endif
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
|
||||
|
@ -198,11 +204,6 @@ SECTIONS
|
|||
data : { } :data
|
||||
.data : AT(ADDR(.data) - LOAD_OFFSET)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
. = ALIGN(PERCPU_PAGE_SIZE);
|
||||
__cpu0_per_cpu = .;
|
||||
. = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
|
||||
#endif
|
||||
INIT_TASK_DATA(PAGE_SIZE)
|
||||
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
|
||||
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
|
||||
|
|
|
@ -154,38 +154,99 @@ static void *cpu_data;
|
|||
void * __cpuinit
|
||||
per_cpu_init (void)
|
||||
{
|
||||
int cpu;
|
||||
static int first_time=1;
|
||||
static bool first_time = true;
|
||||
void *cpu0_data = __cpu0_per_cpu;
|
||||
unsigned int cpu;
|
||||
|
||||
if (!first_time)
|
||||
goto skip;
|
||||
first_time = false;
|
||||
|
||||
/*
|
||||
* get_free_pages() cannot be used before cpu_init() done. BSP
|
||||
* allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
|
||||
* get_zeroed_page().
|
||||
* get_free_pages() cannot be used before cpu_init() done.
|
||||
* BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
|
||||
* to avoid that AP calls get_zeroed_page().
|
||||
*/
|
||||
if (first_time) {
|
||||
void *cpu0_data = __cpu0_per_cpu;
|
||||
for_each_possible_cpu(cpu) {
|
||||
void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
|
||||
|
||||
first_time=0;
|
||||
memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
|
||||
__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
|
||||
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
||||
|
||||
__per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
|
||||
per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
|
||||
/*
|
||||
* percpu area for cpu0 is moved from the __init area
|
||||
* which is setup by head.S and used till this point.
|
||||
* Update ar.k3. This move is ensures that percpu
|
||||
* area for cpu0 is on the correct node and its
|
||||
* virtual address isn't insanely far from other
|
||||
* percpu areas which is important for congruent
|
||||
* percpu allocator.
|
||||
*/
|
||||
if (cpu == 0)
|
||||
ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
|
||||
(unsigned long)__per_cpu_start);
|
||||
|
||||
for (cpu = 1; cpu < NR_CPUS; cpu++) {
|
||||
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
|
||||
cpu_data += PERCPU_PAGE_SIZE;
|
||||
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
|
||||
}
|
||||
cpu_data += PERCPU_PAGE_SIZE;
|
||||
}
|
||||
skip:
|
||||
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
|
||||
}
|
||||
|
||||
static inline void
|
||||
alloc_per_cpu_data(void)
|
||||
{
|
||||
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
|
||||
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
|
||||
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
/**
|
||||
* setup_per_cpu_areas - setup percpu areas
|
||||
*
|
||||
* Arch code has already allocated and initialized percpu areas. All
|
||||
* this function has to do is to teach the determined layout to the
|
||||
* dynamic percpu allocator, which happens to be more complex than
|
||||
* creating whole new ones using helpers.
|
||||
*/
|
||||
void __init
|
||||
setup_per_cpu_areas(void)
|
||||
{
|
||||
struct pcpu_alloc_info *ai;
|
||||
struct pcpu_group_info *gi;
|
||||
unsigned int cpu;
|
||||
ssize_t static_size, reserved_size, dyn_size;
|
||||
int rc;
|
||||
|
||||
ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
|
||||
if (!ai)
|
||||
panic("failed to allocate pcpu_alloc_info");
|
||||
gi = &ai->groups[0];
|
||||
|
||||
/* units are assigned consecutively to possible cpus */
|
||||
for_each_possible_cpu(cpu)
|
||||
gi->cpu_map[gi->nr_units++] = cpu;
|
||||
|
||||
/* set parameters */
|
||||
static_size = __per_cpu_end - __per_cpu_start;
|
||||
reserved_size = PERCPU_MODULE_RESERVE;
|
||||
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
|
||||
if (dyn_size < 0)
|
||||
panic("percpu area overflow static=%zd reserved=%zd\n",
|
||||
static_size, reserved_size);
|
||||
|
||||
ai->static_size = static_size;
|
||||
ai->reserved_size = reserved_size;
|
||||
ai->dyn_size = dyn_size;
|
||||
ai->unit_size = PERCPU_PAGE_SIZE;
|
||||
ai->atom_size = PAGE_SIZE;
|
||||
ai->alloc_size = PERCPU_PAGE_SIZE;
|
||||
|
||||
rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
|
||||
if (rc)
|
||||
panic("failed to setup percpu area (err=%d)", rc);
|
||||
|
||||
pcpu_free_alloc_info(ai);
|
||||
}
|
||||
#else
|
||||
#define alloc_per_cpu_data() do { } while (0)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -270,8 +331,8 @@ paging_init (void)
|
|||
|
||||
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
||||
sizeof(struct page));
|
||||
vmalloc_end -= map_size;
|
||||
vmem_map = (struct page *) vmalloc_end;
|
||||
VMALLOC_END -= map_size;
|
||||
vmem_map = (struct page *) VMALLOC_END;
|
||||
efi_memmap_walk(create_mem_map_page_table, NULL);
|
||||
|
||||
/*
|
||||
|
|
|
@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
|
|||
int cpu;
|
||||
|
||||
for_each_possible_early_cpu(cpu) {
|
||||
if (cpu == 0) {
|
||||
void *cpu0_data = __cpu0_per_cpu;
|
||||
__per_cpu_offset[cpu] = (char*)cpu0_data -
|
||||
__per_cpu_start;
|
||||
} else if (node == node_cpuid[cpu].nid) {
|
||||
memcpy(__va(cpu_data), __phys_per_cpu_start,
|
||||
__per_cpu_end - __per_cpu_start);
|
||||
__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
|
||||
__per_cpu_start;
|
||||
cpu_data += PERCPU_PAGE_SIZE;
|
||||
}
|
||||
void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
|
||||
|
||||
if (node != node_cpuid[cpu].nid)
|
||||
continue;
|
||||
|
||||
memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
|
||||
__per_cpu_offset[cpu] = (char *)__va(cpu_data) -
|
||||
__per_cpu_start;
|
||||
|
||||
/*
|
||||
* percpu area for cpu0 is moved from the __init area
|
||||
* which is setup by head.S and used till this point.
|
||||
* Update ar.k3. This move is ensures that percpu
|
||||
* area for cpu0 is on the correct node and its
|
||||
* virtual address isn't insanely far from other
|
||||
* percpu areas which is important for congruent
|
||||
* percpu allocator.
|
||||
*/
|
||||
if (cpu == 0)
|
||||
ia64_set_kr(IA64_KR_PER_CPU_DATA,
|
||||
(unsigned long)cpu_data -
|
||||
(unsigned long)__per_cpu_start);
|
||||
|
||||
cpu_data += PERCPU_PAGE_SIZE;
|
||||
}
|
||||
#endif
|
||||
return cpu_data;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* setup_per_cpu_areas - setup percpu areas
|
||||
*
|
||||
* Arch code has already allocated and initialized percpu areas. All
|
||||
* this function has to do is to teach the determined layout to the
|
||||
* dynamic percpu allocator, which happens to be more complex than
|
||||
* creating whole new ones using helpers.
|
||||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
struct pcpu_alloc_info *ai;
|
||||
struct pcpu_group_info *uninitialized_var(gi);
|
||||
unsigned int *cpu_map;
|
||||
void *base;
|
||||
unsigned long base_offset;
|
||||
unsigned int cpu;
|
||||
ssize_t static_size, reserved_size, dyn_size;
|
||||
int node, prev_node, unit, nr_units, rc;
|
||||
|
||||
ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
|
||||
if (!ai)
|
||||
panic("failed to allocate pcpu_alloc_info");
|
||||
cpu_map = ai->groups[0].cpu_map;
|
||||
|
||||
/* determine base */
|
||||
base = (void *)ULONG_MAX;
|
||||
for_each_possible_cpu(cpu)
|
||||
base = min(base,
|
||||
(void *)(__per_cpu_offset[cpu] + __per_cpu_start));
|
||||
base_offset = (void *)__per_cpu_start - base;
|
||||
|
||||
/* build cpu_map, units are grouped by node */
|
||||
unit = 0;
|
||||
for_each_node(node)
|
||||
for_each_possible_cpu(cpu)
|
||||
if (node == node_cpuid[cpu].nid)
|
||||
cpu_map[unit++] = cpu;
|
||||
nr_units = unit;
|
||||
|
||||
/* set basic parameters */
|
||||
static_size = __per_cpu_end - __per_cpu_start;
|
||||
reserved_size = PERCPU_MODULE_RESERVE;
|
||||
dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
|
||||
if (dyn_size < 0)
|
||||
panic("percpu area overflow static=%zd reserved=%zd\n",
|
||||
static_size, reserved_size);
|
||||
|
||||
ai->static_size = static_size;
|
||||
ai->reserved_size = reserved_size;
|
||||
ai->dyn_size = dyn_size;
|
||||
ai->unit_size = PERCPU_PAGE_SIZE;
|
||||
ai->atom_size = PAGE_SIZE;
|
||||
ai->alloc_size = PERCPU_PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* CPUs are put into groups according to node. Walk cpu_map
|
||||
* and create new groups at node boundaries.
|
||||
*/
|
||||
prev_node = -1;
|
||||
ai->nr_groups = 0;
|
||||
for (unit = 0; unit < nr_units; unit++) {
|
||||
cpu = cpu_map[unit];
|
||||
node = node_cpuid[cpu].nid;
|
||||
|
||||
if (node == prev_node) {
|
||||
gi->nr_units++;
|
||||
continue;
|
||||
}
|
||||
prev_node = node;
|
||||
|
||||
gi = &ai->groups[ai->nr_groups++];
|
||||
gi->nr_units = 1;
|
||||
gi->base_offset = __per_cpu_offset[cpu] + base_offset;
|
||||
gi->cpu_map = &cpu_map[unit];
|
||||
}
|
||||
|
||||
rc = pcpu_setup_first_chunk(ai, base);
|
||||
if (rc)
|
||||
panic("failed to setup percpu area (err=%d)", rc);
|
||||
|
||||
pcpu_free_alloc_info(ai);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* fill_pernode - initialize pernode data.
|
||||
* @node: the node id.
|
||||
|
@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
|
|||
/* Set the node_data pointer for each per-cpu struct */
|
||||
for_each_possible_early_cpu(cpu) {
|
||||
node = node_cpuid[cpu].nid;
|
||||
per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
|
||||
per_cpu(ia64_cpu_info, cpu).node_data =
|
||||
mem_data[node].node_data;
|
||||
}
|
||||
#else
|
||||
{
|
||||
|
@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
|
|||
cpu = 0;
|
||||
node = node_cpuid[cpu].nid;
|
||||
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
|
||||
((char *)&per_cpu__cpu_info - __per_cpu_start));
|
||||
((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
|
||||
cpu0_cpu_info->node_data = mem_data[node].node_data;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -666,9 +765,9 @@ void __init paging_init(void)
|
|||
sparse_init();
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
||||
VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
|
||||
sizeof(struct page));
|
||||
vmem_map = (struct page *) vmalloc_end;
|
||||
vmem_map = (struct page *) VMALLOC_END;
|
||||
efi_memmap_walk(create_mem_map_page_table, NULL);
|
||||
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
|
||||
#endif
|
||||
|
|
|
@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
|
|||
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
|
||||
|
||||
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
||||
unsigned long vmalloc_end = VMALLOC_END_INIT;
|
||||
EXPORT_SYMBOL(vmalloc_end);
|
||||
unsigned long VMALLOC_END = VMALLOC_END_INIT;
|
||||
EXPORT_SYMBOL(VMALLOC_END);
|
||||
struct page *vmem_map;
|
||||
EXPORT_SYMBOL(vmem_map);
|
||||
#endif
|
||||
|
|
|
@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
|
|||
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
|
||||
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
|
||||
stat->deadlocks,
|
||||
1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
||||
1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
|
||||
1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
|
||||
1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
|
||||
1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
|
||||
1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
|
||||
stat->shub_ptc_flushes_not_my_mm,
|
||||
stat->deadlocks2,
|
||||
stat->shub_ipi_flushes,
|
||||
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
|
||||
1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
|
|||
}
|
||||
|
||||
|
||||
static DEFINE_PER_CPU(int, timer_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, ipi_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, resched_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, cmc_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, cmcp_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, cpep_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
|
||||
static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
|
||||
#define NAME_SIZE 15
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
|
||||
static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
|
||||
#undef NAME_SIZE
|
||||
|
||||
struct saved_irq {
|
||||
|
@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
|
|||
if (xen_slab_ready) {
|
||||
switch (vec) {
|
||||
case IA64_TIMER_VECTOR:
|
||||
snprintf(per_cpu(timer_name, cpu),
|
||||
sizeof(per_cpu(timer_name, cpu)),
|
||||
snprintf(per_cpu(xen_timer_name, cpu),
|
||||
sizeof(per_cpu(xen_timer_name, cpu)),
|
||||
"%s%d", action->name, cpu);
|
||||
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
|
||||
action->handler, action->flags,
|
||||
per_cpu(timer_name, cpu), action->dev_id);
|
||||
per_cpu(timer_irq, cpu) = irq;
|
||||
per_cpu(xen_timer_name, cpu), action->dev_id);
|
||||
per_cpu(xen_timer_irq, cpu) = irq;
|
||||
break;
|
||||
case IA64_IPI_RESCHEDULE:
|
||||
snprintf(per_cpu(resched_name, cpu),
|
||||
sizeof(per_cpu(resched_name, cpu)),
|
||||
snprintf(per_cpu(xen_resched_name, cpu),
|
||||
sizeof(per_cpu(xen_resched_name, cpu)),
|
||||
"%s%d", action->name, cpu);
|
||||
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
|
||||
action->handler, action->flags,
|
||||
per_cpu(resched_name, cpu), action->dev_id);
|
||||
per_cpu(resched_irq, cpu) = irq;
|
||||
per_cpu(xen_resched_name, cpu), action->dev_id);
|
||||
per_cpu(xen_resched_irq, cpu) = irq;
|
||||
break;
|
||||
case IA64_IPI_VECTOR:
|
||||
snprintf(per_cpu(ipi_name, cpu),
|
||||
sizeof(per_cpu(ipi_name, cpu)),
|
||||
snprintf(per_cpu(xen_ipi_name, cpu),
|
||||
sizeof(per_cpu(xen_ipi_name, cpu)),
|
||||
"%s%d", action->name, cpu);
|
||||
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
|
||||
action->handler, action->flags,
|
||||
per_cpu(ipi_name, cpu), action->dev_id);
|
||||
per_cpu(ipi_irq, cpu) = irq;
|
||||
per_cpu(xen_ipi_name, cpu), action->dev_id);
|
||||
per_cpu(xen_ipi_irq, cpu) = irq;
|
||||
break;
|
||||
case IA64_CMC_VECTOR:
|
||||
snprintf(per_cpu(cmc_name, cpu),
|
||||
sizeof(per_cpu(cmc_name, cpu)),
|
||||
snprintf(per_cpu(xen_cmc_name, cpu),
|
||||
sizeof(per_cpu(xen_cmc_name, cpu)),
|
||||
"%s%d", action->name, cpu);
|
||||
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
|
||||
action->handler,
|
||||
action->flags,
|
||||
per_cpu(cmc_name, cpu),
|
||||
action->dev_id);
|
||||
per_cpu(cmc_irq, cpu) = irq;
|
||||
action->handler,
|
||||
action->flags,
|
||||
per_cpu(xen_cmc_name, cpu),
|
||||
action->dev_id);
|
||||
per_cpu(xen_cmc_irq, cpu) = irq;
|
||||
break;
|
||||
case IA64_CMCP_VECTOR:
|
||||
snprintf(per_cpu(cmcp_name, cpu),
|
||||
sizeof(per_cpu(cmcp_name, cpu)),
|
||||
snprintf(per_cpu(xen_cmcp_name, cpu),
|
||||
sizeof(per_cpu(xen_cmcp_name, cpu)),
|
||||
"%s%d", action->name, cpu);
|
||||
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
|
||||
action->handler,
|
||||
action->flags,
|
||||
per_cpu(cmcp_name, cpu),
|
||||
action->dev_id);
|
||||
per_cpu(cmcp_irq, cpu) = irq;
|
||||
action->handler,
|
||||
action->flags,
|
||||
per_cpu(xen_cmcp_name, cpu),
|
||||
action->dev_id);
|
||||
per_cpu(xen_cmcp_irq, cpu) = irq;
|
||||
break;
|
||||
case IA64_CPEP_VECTOR:
|
||||
snprintf(per_cpu(cpep_name, cpu),
|
||||
sizeof(per_cpu(cpep_name, cpu)),
|
||||
snprintf(per_cpu(xen_cpep_name, cpu),
|
||||
sizeof(per_cpu(xen_cpep_name, cpu)),
|
||||
"%s%d", action->name, cpu);
|
||||
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
|
||||
action->handler,
|
||||
action->flags,
|
||||
per_cpu(cpep_name, cpu),
|
||||
action->dev_id);
|
||||
per_cpu(cpep_irq, cpu) = irq;
|
||||
action->handler,
|
||||
action->flags,
|
||||
per_cpu(xen_cpep_name, cpu),
|
||||
action->dev_id);
|
||||
per_cpu(xen_cpep_irq, cpu) = irq;
|
||||
break;
|
||||
case IA64_CPE_VECTOR:
|
||||
case IA64_MCA_RENDEZ_VECTOR:
|
||||
|
@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
|
|||
|
||||
if (action == CPU_DEAD) {
|
||||
/* Unregister evtchn. */
|
||||
if (per_cpu(cpep_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
|
||||
per_cpu(cpep_irq, cpu) = -1;
|
||||
if (per_cpu(xen_cpep_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
|
||||
NULL);
|
||||
per_cpu(xen_cpep_irq, cpu) = -1;
|
||||
}
|
||||
if (per_cpu(cmcp_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
|
||||
per_cpu(cmcp_irq, cpu) = -1;
|
||||
if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
|
||||
NULL);
|
||||
per_cpu(xen_cmcp_irq, cpu) = -1;
|
||||
}
|
||||
if (per_cpu(cmc_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
|
||||
per_cpu(cmc_irq, cpu) = -1;
|
||||
if (per_cpu(xen_cmc_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
|
||||
per_cpu(xen_cmc_irq, cpu) = -1;
|
||||
}
|
||||
if (per_cpu(ipi_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
|
||||
per_cpu(ipi_irq, cpu) = -1;
|
||||
if (per_cpu(xen_ipi_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
|
||||
per_cpu(xen_ipi_irq, cpu) = -1;
|
||||
}
|
||||
if (per_cpu(resched_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(resched_irq, cpu),
|
||||
NULL);
|
||||
per_cpu(resched_irq, cpu) = -1;
|
||||
if (per_cpu(xen_resched_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
|
||||
NULL);
|
||||
per_cpu(xen_resched_irq, cpu) = -1;
|
||||
}
|
||||
if (per_cpu(timer_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
|
||||
per_cpu(timer_irq, cpu) = -1;
|
||||
if (per_cpu(xen_timer_irq, cpu) >= 0) {
|
||||
unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
|
||||
NULL);
|
||||
per_cpu(xen_timer_irq, cpu) = -1;
|
||||
}
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
|
|
|
@ -34,15 +34,15 @@
|
|||
|
||||
#include "../kernel/fsyscall_gtod_data.h"
|
||||
|
||||
DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
||||
DEFINE_PER_CPU(unsigned long, processed_stolen_time);
|
||||
DEFINE_PER_CPU(unsigned long, processed_blocked_time);
|
||||
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
|
||||
static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
|
||||
static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
|
||||
|
||||
/* taken from i386/kernel/time-xen.c */
|
||||
static void xen_init_missing_ticks_accounting(int cpu)
|
||||
{
|
||||
struct vcpu_register_runstate_memory_area area;
|
||||
struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
|
||||
struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
|
||||
int rc;
|
||||
|
||||
memset(runstate, 0, sizeof(*runstate));
|
||||
|
@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
|
|||
&area);
|
||||
WARN_ON(rc && rc != -ENOSYS);
|
||||
|
||||
per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
|
||||
per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
|
||||
per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
|
||||
per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
|
||||
+ runstate->time[RUNSTATE_offline];
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
|
|||
|
||||
BUG_ON(preemptible());
|
||||
|
||||
state = &__get_cpu_var(runstate);
|
||||
state = &__get_cpu_var(xen_runstate);
|
||||
|
||||
/*
|
||||
* The runstate info is always updated by the hypervisor on
|
||||
|
@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
|
|||
* This function just checks and reject this effect.
|
||||
*/
|
||||
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
|
||||
per_cpu(processed_blocked_time, cpu)))
|
||||
per_cpu(xen_blocked_time, cpu)))
|
||||
blocked = 0;
|
||||
|
||||
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
|
||||
runstate.time[RUNSTATE_offline],
|
||||
per_cpu(processed_stolen_time, cpu)))
|
||||
per_cpu(xen_stolen_time, cpu)))
|
||||
stolen = 0;
|
||||
|
||||
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
|
||||
|
@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
|
|||
} else {
|
||||
local_cpu_data->itm_next = delta_itm + new_itm;
|
||||
}
|
||||
per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
|
||||
per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
|
||||
per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
|
||||
per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
|
||||
}
|
||||
return delta_itm;
|
||||
}
|
||||
|
|
|
@ -83,9 +83,9 @@
|
|||
#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
||||
#define VMALLOC_END KMAP_START
|
||||
#else
|
||||
extern unsigned long vmalloc_end;
|
||||
extern unsigned long m68k_vmalloc_end;
|
||||
#define VMALLOC_START 0x0f800000
|
||||
#define VMALLOC_END vmalloc_end
|
||||
#define VMALLOC_END m68k_vmalloc_end
|
||||
#endif /* CONFIG_SUN3 */
|
||||
|
||||
/* zero page used for uninitialized stuff */
|
||||
|
|
|
@ -45,8 +45,8 @@
|
|||
** Globals
|
||||
*/
|
||||
|
||||
unsigned long vmalloc_end;
|
||||
EXPORT_SYMBOL(vmalloc_end);
|
||||
unsigned long m68k_vmalloc_end;
|
||||
EXPORT_SYMBOL(m68k_vmalloc_end);
|
||||
|
||||
unsigned long pmeg_vaddr[PMEGS_NUM];
|
||||
unsigned char pmeg_alloc[PMEGS_NUM];
|
||||
|
@ -172,8 +172,8 @@ void mmu_emu_init(unsigned long bootmem_end)
|
|||
#endif
|
||||
// the lowest mapping here is the end of our
|
||||
// vmalloc region
|
||||
if(!vmalloc_end)
|
||||
vmalloc_end = seg;
|
||||
if (!m68k_vmalloc_end)
|
||||
m68k_vmalloc_end = seg;
|
||||
|
||||
// mark the segmap alloc'd, and reserve any
|
||||
// of the first 0xbff pages the hardware is
|
||||
|
|
|
@ -31,13 +31,13 @@ const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
|
|||
#define KPROBE_HIT_ACTIVE 0x00000001
|
||||
#define KPROBE_HIT_SS 0x00000002
|
||||
|
||||
static struct kprobe *current_kprobe;
|
||||
static unsigned long current_kprobe_orig_pc;
|
||||
static unsigned long current_kprobe_next_pc;
|
||||
static int current_kprobe_ss_flags;
|
||||
static struct kprobe *cur_kprobe;
|
||||
static unsigned long cur_kprobe_orig_pc;
|
||||
static unsigned long cur_kprobe_next_pc;
|
||||
static int cur_kprobe_ss_flags;
|
||||
static unsigned long kprobe_status;
|
||||
static kprobe_opcode_t current_kprobe_ss_buf[MAX_INSN_SIZE + 2];
|
||||
static unsigned long current_kprobe_bp_addr;
|
||||
static kprobe_opcode_t cur_kprobe_ss_buf[MAX_INSN_SIZE + 2];
|
||||
static unsigned long cur_kprobe_bp_addr;
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
|
||||
|
@ -399,26 +399,25 @@ void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
|||
{
|
||||
unsigned long nextpc;
|
||||
|
||||
current_kprobe_orig_pc = regs->pc;
|
||||
memcpy(current_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
|
||||
regs->pc = (unsigned long) current_kprobe_ss_buf;
|
||||
cur_kprobe_orig_pc = regs->pc;
|
||||
memcpy(cur_kprobe_ss_buf, &p->ainsn.insn[0], MAX_INSN_SIZE);
|
||||
regs->pc = (unsigned long) cur_kprobe_ss_buf;
|
||||
|
||||
nextpc = find_nextpc(regs, ¤t_kprobe_ss_flags);
|
||||
if (current_kprobe_ss_flags & SINGLESTEP_PCREL)
|
||||
current_kprobe_next_pc =
|
||||
current_kprobe_orig_pc + (nextpc - regs->pc);
|
||||
nextpc = find_nextpc(regs, &cur_kprobe_ss_flags);
|
||||
if (cur_kprobe_ss_flags & SINGLESTEP_PCREL)
|
||||
cur_kprobe_next_pc = cur_kprobe_orig_pc + (nextpc - regs->pc);
|
||||
else
|
||||
current_kprobe_next_pc = nextpc;
|
||||
cur_kprobe_next_pc = nextpc;
|
||||
|
||||
/* branching instructions need special handling */
|
||||
if (current_kprobe_ss_flags & SINGLESTEP_BRANCH)
|
||||
if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH)
|
||||
nextpc = singlestep_branch_setup(regs);
|
||||
|
||||
current_kprobe_bp_addr = nextpc;
|
||||
cur_kprobe_bp_addr = nextpc;
|
||||
|
||||
*(u8 *) nextpc = BREAKPOINT_INSTRUCTION;
|
||||
mn10300_dcache_flush_range2((unsigned) current_kprobe_ss_buf,
|
||||
sizeof(current_kprobe_ss_buf));
|
||||
mn10300_dcache_flush_range2((unsigned) cur_kprobe_ss_buf,
|
||||
sizeof(cur_kprobe_ss_buf));
|
||||
mn10300_icache_inv();
|
||||
}
|
||||
|
||||
|
@ -440,7 +439,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
disarm_kprobe(p, regs);
|
||||
ret = 1;
|
||||
} else {
|
||||
p = current_kprobe;
|
||||
p = cur_kprobe;
|
||||
if (p->break_handler && p->break_handler(p, regs))
|
||||
goto ss_probe;
|
||||
}
|
||||
|
@ -464,7 +463,7 @@ static inline int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
current_kprobe = p;
|
||||
cur_kprobe = p;
|
||||
if (p->pre_handler(p, regs)) {
|
||||
/* handler has already set things up, so skip ss setup */
|
||||
return 1;
|
||||
|
@ -491,8 +490,8 @@ no_kprobe:
|
|||
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
/* we may need to fixup regs/stack after singlestepping a call insn */
|
||||
if (current_kprobe_ss_flags & SINGLESTEP_BRANCH) {
|
||||
regs->pc = current_kprobe_orig_pc;
|
||||
if (cur_kprobe_ss_flags & SINGLESTEP_BRANCH) {
|
||||
regs->pc = cur_kprobe_orig_pc;
|
||||
switch (p->ainsn.insn[0]) {
|
||||
case 0xcd: /* CALL (d16,PC) */
|
||||
*(unsigned *) regs->sp = regs->mdr = regs->pc + 5;
|
||||
|
@ -523,8 +522,8 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
regs->pc = current_kprobe_next_pc;
|
||||
current_kprobe_bp_addr = 0;
|
||||
regs->pc = cur_kprobe_next_pc;
|
||||
cur_kprobe_bp_addr = 0;
|
||||
}
|
||||
|
||||
static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
|
@ -532,10 +531,10 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
|||
if (!kprobe_running())
|
||||
return 0;
|
||||
|
||||
if (current_kprobe->post_handler)
|
||||
current_kprobe->post_handler(current_kprobe, regs, 0);
|
||||
if (cur_kprobe->post_handler)
|
||||
cur_kprobe->post_handler(cur_kprobe, regs, 0);
|
||||
|
||||
resume_execution(current_kprobe, regs);
|
||||
resume_execution(cur_kprobe, regs);
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
|
@ -545,12 +544,12 @@ static inline int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
|||
static inline
|
||||
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
if (current_kprobe->fault_handler &&
|
||||
current_kprobe->fault_handler(current_kprobe, regs, trapnr))
|
||||
if (cur_kprobe->fault_handler &&
|
||||
cur_kprobe->fault_handler(cur_kprobe, regs, trapnr))
|
||||
return 1;
|
||||
|
||||
if (kprobe_status & KPROBE_HIT_SS) {
|
||||
resume_execution(current_kprobe, regs);
|
||||
resume_execution(cur_kprobe, regs);
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
@ -567,7 +566,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
|||
|
||||
switch (val) {
|
||||
case DIE_BREAKPOINT:
|
||||
if (current_kprobe_bp_addr != args->regs->pc) {
|
||||
if (cur_kprobe_bp_addr != args->regs->pc) {
|
||||
if (kprobe_handler(args->regs))
|
||||
return NOTIFY_STOP;
|
||||
} else {
|
||||
|
|
|
@ -37,7 +37,7 @@ extern void cpu_die(void);
|
|||
extern void smp_send_debugger_break(int cpu);
|
||||
extern void smp_message_recv(int);
|
||||
|
||||
DECLARE_PER_CPU(unsigned int, pvr);
|
||||
DECLARE_PER_CPU(unsigned int, cpu_pvr);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern void fixup_irqs(cpumask_t map);
|
||||
|
|
|
@ -487,11 +487,11 @@ static void perf_callchain_user_32(struct pt_regs *regs,
|
|||
* Since we can't get PMU interrupts inside a PMU interrupt handler,
|
||||
* we don't need separate irq and nmi entries here.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
|
||||
static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
|
||||
|
||||
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
|
||||
struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
|
||||
|
||||
entry->nr = 0;
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ extern u32 cpu_temp_both(unsigned long cpu);
|
|||
#endif /* CONFIG_TAU */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
DEFINE_PER_CPU(unsigned int, pvr);
|
||||
DEFINE_PER_CPU(unsigned int, cpu_pvr);
|
||||
#endif
|
||||
|
||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
|
@ -209,7 +209,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
pvr = per_cpu(pvr, cpu_id);
|
||||
pvr = per_cpu(cpu_pvr, cpu_id);
|
||||
#else
|
||||
pvr = mfspr(SPRN_PVR);
|
||||
#endif
|
||||
|
|
|
@ -235,7 +235,7 @@ struct thread_info *current_set[NR_CPUS];
|
|||
|
||||
static void __devinit smp_store_cpu_info(int id)
|
||||
{
|
||||
per_cpu(pvr, id) = mfspr(SPRN_PVR);
|
||||
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
|
||||
}
|
||||
|
||||
static void __init smp_create_idle(unsigned int cpu)
|
||||
|
|
|
@ -54,7 +54,7 @@ struct iic {
|
|||
struct device_node *node;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct iic, iic);
|
||||
static DEFINE_PER_CPU(struct iic, cpu_iic);
|
||||
#define IIC_NODE_COUNT 2
|
||||
static struct irq_host *iic_host;
|
||||
|
||||
|
@ -82,7 +82,7 @@ static void iic_unmask(unsigned int irq)
|
|||
|
||||
static void iic_eoi(unsigned int irq)
|
||||
{
|
||||
struct iic *iic = &__get_cpu_var(iic);
|
||||
struct iic *iic = &__get_cpu_var(cpu_iic);
|
||||
out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
|
||||
BUG_ON(iic->eoi_ptr < 0);
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ static unsigned int iic_get_irq(void)
|
|||
struct iic *iic;
|
||||
unsigned int virq;
|
||||
|
||||
iic = &__get_cpu_var(iic);
|
||||
iic = &__get_cpu_var(cpu_iic);
|
||||
*(unsigned long *) &pending =
|
||||
in_be64((u64 __iomem *) &iic->regs->pending_destr);
|
||||
if (!(pending.flags & CBE_IIC_IRQ_VALID))
|
||||
|
@ -161,12 +161,12 @@ static unsigned int iic_get_irq(void)
|
|||
|
||||
void iic_setup_cpu(void)
|
||||
{
|
||||
out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
|
||||
out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
|
||||
}
|
||||
|
||||
u8 iic_get_target_id(int cpu)
|
||||
{
|
||||
return per_cpu(iic, cpu).target_id;
|
||||
return per_cpu(cpu_iic, cpu).target_id;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(iic_get_target_id);
|
||||
|
@ -181,7 +181,7 @@ static inline int iic_ipi_to_irq(int ipi)
|
|||
|
||||
void iic_cause_IPI(int cpu, int mesg)
|
||||
{
|
||||
out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
|
||||
out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
|
||||
}
|
||||
|
||||
struct irq_host *iic_get_irq_host(int node)
|
||||
|
@ -348,7 +348,7 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
|
|||
/* XXX FIXME: should locate the linux CPU number from the HW cpu
|
||||
* number properly. We are lucky for now
|
||||
*/
|
||||
struct iic *iic = &per_cpu(iic, hw_cpu);
|
||||
struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
|
||||
|
||||
iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
|
||||
BUG_ON(iic->regs == NULL);
|
||||
|
|
|
@ -54,7 +54,7 @@ struct dtl {
|
|||
int buf_entries;
|
||||
u64 last_idx;
|
||||
};
|
||||
static DEFINE_PER_CPU(struct dtl, dtl);
|
||||
static DEFINE_PER_CPU(struct dtl, cpu_dtl);
|
||||
|
||||
/*
|
||||
* Dispatch trace log event mask:
|
||||
|
@ -261,7 +261,7 @@ static int dtl_init(void)
|
|||
|
||||
/* set up the per-cpu log structures */
|
||||
for_each_possible_cpu(i) {
|
||||
struct dtl *dtl = &per_cpu(dtl, i);
|
||||
struct dtl *dtl = &per_cpu(cpu_dtl, i);
|
||||
dtl->cpu = i;
|
||||
|
||||
rc = dtl_setup_file(dtl);
|
||||
|
|
|
@ -47,7 +47,7 @@ static DEFINE_PER_CPU(short, wd_enabled);
|
|||
static int endflag __initdata;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, last_irq_sum);
|
||||
static DEFINE_PER_CPU(local_t, alert_counter);
|
||||
static DEFINE_PER_CPU(long, alert_counter);
|
||||
static DEFINE_PER_CPU(int, nmi_touch);
|
||||
|
||||
void touch_nmi_watchdog(void)
|
||||
|
@ -112,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
|
|||
touched = 1;
|
||||
}
|
||||
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
|
||||
local_inc(&__get_cpu_var(alert_counter));
|
||||
if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
|
||||
__this_cpu_inc(per_cpu_var(alert_counter));
|
||||
if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
|
||||