mirror of
https://github.com/adulau/aha.git
synced 2024-12-26 18:56:14 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (39 commits) cpumask: Move deprecated functions to end of header. cpumask: remove unused deprecated functions, avoid accusations of insanity cpumask: use new-style cpumask ops in mm/quicklist. cpumask: use mm_cpumask() wrapper: x86 cpumask: use mm_cpumask() wrapper: um cpumask: use mm_cpumask() wrapper: mips cpumask: use mm_cpumask() wrapper: mn10300 cpumask: use mm_cpumask() wrapper: m32r cpumask: use mm_cpumask() wrapper: arm cpumask: Use accessors for cpu_*_mask: um cpumask: Use accessors for cpu_*_mask: powerpc cpumask: Use accessors for cpu_*_mask: mips cpumask: Use accessors for cpu_*_mask: m32r cpumask: remove arch_send_call_function_ipi cpumask: arch_send_call_function_ipi_mask: s390 cpumask: arch_send_call_function_ipi_mask: powerpc cpumask: arch_send_call_function_ipi_mask: mips cpumask: arch_send_call_function_ipi_mask: m32r cpumask: arch_send_call_function_ipi_mask: alpha cpumask: remove obsolete topology_core_siblings and topology_thread_siblings: ia64 ...
This commit is contained in:
commit
94a8d5caba
77 changed files with 403 additions and 724 deletions
|
@ -47,7 +47,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
|
|||
extern int smp_num_cpus;
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -22,23 +22,6 @@ static inline int cpu_to_node(int cpu)
|
|||
return node;
|
||||
}
|
||||
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
cpumask_t node_cpu_mask = CPU_MASK_NONE;
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu_to_node(cpu) == node)
|
||||
cpu_set(cpu, node_cpu_mask);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_NUMA
|
||||
printk("node %d: cpu_mask: %016lx\n", node, node_cpu_mask);
|
||||
#endif
|
||||
|
||||
return node_cpu_mask;
|
||||
}
|
||||
|
||||
extern struct cpumask node_to_cpumask_map[];
|
||||
/* FIXME: This is dumb, recalculating every time. But simple. */
|
||||
static const struct cpumask *cpumask_of_node(int node)
|
||||
|
@ -55,7 +38,6 @@ static const struct cpumask *cpumask_of_node(int node)
|
|||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
#define pcibus_to_cpumask(bus) (cpu_online_map)
|
||||
#define cpumask_of_pcibus(bus) (cpu_online_mask)
|
||||
|
||||
#endif /* !CONFIG_NUMA */
|
||||
|
|
|
@ -548,16 +548,16 @@ setup_profiling_timer(unsigned int multiplier)
|
|||
|
||||
|
||||
static void
|
||||
send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
|
||||
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
|
||||
{
|
||||
int i;
|
||||
|
||||
mb();
|
||||
for_each_cpu_mask(i, to_whom)
|
||||
for_each_cpu(i, to_whom)
|
||||
set_bit(operation, &ipi_data[i].bits);
|
||||
|
||||
mb();
|
||||
for_each_cpu_mask(i, to_whom)
|
||||
for_each_cpu(i, to_whom)
|
||||
wripir(i);
|
||||
}
|
||||
|
||||
|
@ -624,7 +624,7 @@ smp_send_reschedule(int cpu)
|
|||
printk(KERN_WARNING
|
||||
"smp_send_reschedule: Sending IPI to self.\n");
|
||||
#endif
|
||||
send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
|
||||
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -636,17 +636,17 @@ smp_send_stop(void)
|
|||
if (hard_smp_processor_id() != boot_cpu_id)
|
||||
printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
|
||||
#endif
|
||||
send_ipi_message(to_whom, IPI_CPU_STOP);
|
||||
send_ipi_message(&to_whom, IPI_CPU_STOP);
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
send_ipi_message(mask, IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
|
|||
#ifndef CONFIG_CPU_CACHE_VIPT
|
||||
static inline void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
|
||||
__cpuc_flush_user_all();
|
||||
}
|
||||
|
||||
static inline void
|
||||
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
|
||||
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
|
||||
vma->vm_flags);
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
|
|||
static inline void
|
||||
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
unsigned long addr = user_addr & PAGE_MASK;
|
||||
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
|||
unsigned long uaddr, void *kaddr,
|
||||
unsigned long len, int write)
|
||||
{
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
unsigned long addr = (unsigned long)kaddr;
|
||||
__cpuc_coherent_kern_range(addr, addr + len);
|
||||
}
|
||||
|
|
|
@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
/* check for possible thread migration */
|
||||
if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
|
||||
if (!cpumask_empty(mm_cpumask(next)) &&
|
||||
!cpumask_test_cpu(cpu, mm_cpumask(next)))
|
||||
__flush_icache_all();
|
||||
#endif
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
|
||||
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
|
||||
check_context(next);
|
||||
cpu_switch_mm(next->pgd, next);
|
||||
if (cache_is_vivt())
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -93,7 +93,6 @@ extern void platform_cpu_enable(unsigned int cpu);
|
|||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
|
||||
/*
|
||||
* show local interrupt info
|
||||
|
|
|
@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
|||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
|
||||
if (tlb_flag(TLB_V3_FULL))
|
||||
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
|
||||
if (tlb_flag(TLB_V4_U_FULL))
|
||||
|
@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
|||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
if (tlb_flag(TLB_V3_PAGE))
|
||||
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
|
||||
if (tlb_flag(TLB_V4_U_PAGE))
|
||||
|
|
|
@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
|
|||
read_lock(&tasklist_lock);
|
||||
for_each_process(p) {
|
||||
if (p->mm)
|
||||
cpu_clear(cpu, p->mm->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
|
@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
atomic_inc(&mm->mm_users);
|
||||
atomic_inc(&mm->mm_count);
|
||||
current->active_mm = mm;
|
||||
cpu_set(cpu, mm->cpu_vm_mask);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
enter_lazy_tlb(mm, current);
|
||||
local_flush_tlb_all();
|
||||
|
@ -643,7 +643,7 @@ void flush_tlb_all(void)
|
|||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
|||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = uaddr;
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
}
|
||||
|
@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
|
|||
ta.ta_vma = vma;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
|
|
@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
|
|||
}
|
||||
spin_unlock(&cpu_asid_lock);
|
||||
|
||||
mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
|
||||
mm->context.id = asid;
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
|
|||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (cache_is_vivt()) {
|
||||
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
|
||||
__cpuc_flush_user_all();
|
||||
return;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
|
||||
{
|
||||
if (cache_is_vivt()) {
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
|
||||
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
|
||||
vma->vm_flags);
|
||||
return;
|
||||
|
@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
|
|||
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
|
||||
{
|
||||
if (cache_is_vivt()) {
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
unsigned long addr = user_addr & PAGE_MASK;
|
||||
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
|
||||
}
|
||||
|
@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
|||
unsigned long len, int write)
|
||||
{
|
||||
if (cache_is_vivt()) {
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
unsigned long addr = (unsigned long)kaddr;
|
||||
__cpuc_coherent_kern_range(addr, addr + len);
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
|||
}
|
||||
|
||||
/* VIPT non-aliasing cache */
|
||||
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
|
||||
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
|
||||
vma->vm_flags & VM_EXEC) {
|
||||
unsigned long addr = (unsigned long)kaddr;
|
||||
/* only flushing the kernel mapping on non-aliasing VIPT */
|
||||
|
|
|
@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void);
|
|||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
/*
|
||||
* Returns a bitmask of CPUs on Node 'node'.
|
||||
*/
|
||||
#define node_to_cpumask(node) (node_to_cpu_mask[node])
|
||||
#define cpumask_of_node(node) (&node_to_cpu_mask[node])
|
||||
|
||||
/*
|
||||
|
@ -104,8 +103,6 @@ void build_cpu_to_node_map(void);
|
|||
#ifdef CONFIG_SMP
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
|
||||
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define smt_capable() (smp_num_siblings > 1)
|
||||
|
|
|
@ -302,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
|
|||
return;
|
||||
}
|
||||
|
||||
smp_call_function_mask(mm->cpu_vm_mask,
|
||||
smp_call_function_many(mm_cpumask(mm),
|
||||
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
|
||||
local_irq_disable();
|
||||
local_finish_flush_tlb_mm(mm);
|
||||
|
|
|
@ -127,7 +127,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
|||
|
||||
if (prev != next) {
|
||||
#ifdef CONFIG_SMP
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
#endif /* CONFIG_SMP */
|
||||
/* Set MPTB = next->pgd */
|
||||
*(volatile unsigned long *)MPTB = (unsigned long)next->pgd;
|
||||
|
@ -135,7 +135,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
|||
}
|
||||
#ifdef CONFIG_SMP
|
||||
else
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask))
|
||||
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
|
||||
activate_context(next);
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ extern void smp_send_timer(void);
|
|||
extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#endif /* not __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ void smp_ipi_timer_interrupt(struct pt_regs *);
|
|||
void smp_local_timer_interrupt(void);
|
||||
|
||||
static void send_IPI_allbutself(int, int);
|
||||
static void send_IPI_mask(cpumask_t, int, int);
|
||||
static void send_IPI_mask(const struct cpumask *, int, int);
|
||||
unsigned long send_IPI_mask_phys(cpumask_t, int, int);
|
||||
|
||||
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
|
||||
|
@ -113,7 +113,7 @@ unsigned long send_IPI_mask_phys(cpumask_t, int, int);
|
|||
void smp_send_reschedule(int cpu_id)
|
||||
{
|
||||
WARN_ON(cpu_is_offline(cpu_id));
|
||||
send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
|
||||
send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
|
||||
}
|
||||
|
||||
/*==========================================================================*
|
||||
|
@ -168,7 +168,7 @@ void smp_flush_cache_all(void)
|
|||
spin_lock(&flushcache_lock);
|
||||
mask=cpus_addr(cpumask);
|
||||
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
|
||||
send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
|
||||
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
|
||||
_flush_cache_copyback_all();
|
||||
while (flushcache_cpumask)
|
||||
mb();
|
||||
|
@ -264,7 +264,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
|
|||
preempt_disable();
|
||||
cpu_id = smp_processor_id();
|
||||
mmc = &mm->context[cpu_id];
|
||||
cpu_mask = mm->cpu_vm_mask;
|
||||
cpu_mask = *mm_cpumask(mm);
|
||||
cpu_clear(cpu_id, cpu_mask);
|
||||
|
||||
if (*mmc != NO_CONTEXT) {
|
||||
|
@ -273,7 +273,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
|
|||
if (mm == current->mm)
|
||||
activate_context(mm);
|
||||
else
|
||||
cpu_clear(cpu_id, mm->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
if (!cpus_empty(cpu_mask))
|
||||
|
@ -334,7 +334,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
|
|||
preempt_disable();
|
||||
cpu_id = smp_processor_id();
|
||||
mmc = &mm->context[cpu_id];
|
||||
cpu_mask = mm->cpu_vm_mask;
|
||||
cpu_mask = *mm_cpumask(mm);
|
||||
cpu_clear(cpu_id, cpu_mask);
|
||||
|
||||
#ifdef DEBUG_SMP
|
||||
|
@ -424,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
|
|||
* We have to send the IPI only to
|
||||
* CPUs affected.
|
||||
*/
|
||||
send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
|
||||
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
|
||||
|
||||
while (!cpus_empty(flush_cpumask)) {
|
||||
/* nothing. lockup detection does not belong here */
|
||||
|
@ -469,7 +469,7 @@ void smp_invalidate_interrupt(void)
|
|||
if (flush_mm == current->active_mm)
|
||||
activate_context(flush_mm);
|
||||
else
|
||||
cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
|
||||
} else {
|
||||
unsigned long va = flush_va;
|
||||
|
||||
|
@ -546,14 +546,14 @@ static void stop_this_cpu(void *dummy)
|
|||
for ( ; ; );
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
|
||||
}
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
|
||||
send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
|
||||
}
|
||||
|
||||
/*==========================================================================*
|
||||
|
@ -729,7 +729,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
|
|||
cpumask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), cpumask);
|
||||
|
||||
send_IPI_mask(cpumask, ipi_num, try);
|
||||
send_IPI_mask(&cpumask, ipi_num, try);
|
||||
}
|
||||
|
||||
/*==========================================================================*
|
||||
|
@ -752,7 +752,7 @@ static void send_IPI_allbutself(int ipi_num, int try)
|
|||
* ---------- --- --------------------------------------------------------
|
||||
*
|
||||
*==========================================================================*/
|
||||
static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
|
||||
static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
|
||||
{
|
||||
cpumask_t physid_mask, tmp;
|
||||
int cpu_id, phys_id;
|
||||
|
@ -761,11 +761,11 @@ static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
|
|||
if (num_cpus <= 1) /* NO MP */
|
||||
return;
|
||||
|
||||
cpus_and(tmp, cpumask, cpu_online_map);
|
||||
BUG_ON(!cpus_equal(cpumask, tmp));
|
||||
cpumask_and(&tmp, cpumask, cpu_online_mask);
|
||||
BUG_ON(!cpumask_equal(cpumask, &tmp));
|
||||
|
||||
physid_mask = CPU_MASK_NONE;
|
||||
for_each_cpu_mask(cpu_id, cpumask){
|
||||
for_each_cpu(cpu_id, cpumask) {
|
||||
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
|
||||
cpu_set(phys_id, physid_mask);
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
|
||||
physid_set(phys_id, phys_cpu_present_map);
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
cpu_present_map = cpu_possible_map;
|
||||
init_cpu_present(&cpu_possible_map);
|
||||
#endif
|
||||
|
||||
show_mp_info(nr_cpu);
|
||||
|
|
|
@ -88,7 +88,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
|
|||
.irq = AU1000_RTC_MATCH2_INT,
|
||||
.set_next_event = au1x_rtcmatch2_set_next_event,
|
||||
.set_mode = au1x_rtcmatch2_set_mode,
|
||||
.cpumask = CPU_MASK_ALL_PTR,
|
||||
.cpumask = cpu_all_mask,
|
||||
};
|
||||
|
||||
static struct irqaction au1x_rtcmatch2_irqaction = {
|
||||
|
|
|
@ -24,12 +24,10 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
|
|||
|
||||
#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
|
||||
#define parent_node(node) (node)
|
||||
#define node_to_cpumask(node) (hub_data(node)->h_cpus)
|
||||
#define cpumask_of_node(node) (&hub_data(node)->h_cpus)
|
||||
struct pci_bus;
|
||||
extern int pcibus_to_node(struct pci_bus *);
|
||||
|
||||
#define pcibus_to_cpumask(bus) (cpu_online_map)
|
||||
#define cpumask_of_pcibus(bus) (cpu_online_mask)
|
||||
|
||||
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
|
||||
|
|
|
@ -178,8 +178,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
* Mark current->active_mm as not "active" anymore.
|
||||
* We don't want to mislead possible IPI tlb flush routines.
|
||||
*/
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -235,8 +235,8 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||
|
||||
/* mark mmu ownership change */
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -258,7 +258,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
|||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (cpu_isset(cpu, mm->cpu_vm_mask)) {
|
||||
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
||||
get_new_mmu_context(mm, cpu);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
|
|
|
@ -19,7 +19,7 @@ struct task_struct;
|
|||
|
||||
struct plat_smp_ops {
|
||||
void (*send_ipi_single)(int cpu, unsigned int action);
|
||||
void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
|
||||
void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
|
||||
void (*init_secondary)(void);
|
||||
void (*smp_finish)(void);
|
||||
void (*cpus_done)(void);
|
||||
|
|
|
@ -78,6 +78,6 @@ extern void play_dead(void);
|
|||
extern asmlinkage void smp_call_function_interrupt(void);
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#endif /* __ASM_SMP_H */
|
||||
|
|
|
@ -80,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
cmp_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ void __init cmp_smp_setup(void)
|
|||
|
||||
for (i = 1; i < NR_CPUS; i++) {
|
||||
if (amon_cpu_avail(i)) {
|
||||
cpu_set(i, cpu_possible_map);
|
||||
set_cpu_possible(i, true);
|
||||
__cpu_number_map[i] = ++ncpu;
|
||||
__cpu_logical_map[ncpu] = i;
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
|
|||
write_vpe_c0_vpeconf0(tmp);
|
||||
|
||||
/* Record this as available CPU */
|
||||
cpu_set(tc, cpu_possible_map);
|
||||
set_cpu_possible(tc, true);
|
||||
__cpu_number_map[tc] = ++ncpu;
|
||||
__cpu_logical_map[ncpu] = tc;
|
||||
}
|
||||
|
@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
vsmp_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
|
|||
panic(KERN_ERR "%s called", __func__);
|
||||
}
|
||||
|
||||
static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static inline void up_send_ipi_mask(const struct cpumask *mask,
|
||||
unsigned int action)
|
||||
{
|
||||
panic(KERN_ERR "%s called", __func__);
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ asmlinkage __cpuinit void start_secondary(void)
|
|||
cpu_idle();
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
|
||||
}
|
||||
|
@ -183,15 +183,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
mp_ops->prepare_cpus(max_cpus);
|
||||
set_cpu_sibling_map(0);
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
cpu_present_map = cpu_possible_map;
|
||||
init_cpu_present(&cpu_possible_map);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* preload SMP state for boot cpu */
|
||||
void __devinit smp_prepare_boot_cpu(void)
|
||||
{
|
||||
cpu_set(0, cpu_possible_map);
|
||||
cpu_set(0, cpu_online_map);
|
||||
set_cpu_possible(0, true);
|
||||
set_cpu_online(0, true);
|
||||
cpu_set(0, cpu_callin_map);
|
||||
}
|
||||
|
||||
|
|
|
@ -305,7 +305,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
|
|||
*/
|
||||
ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
|
||||
for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
|
||||
cpu_set(i, cpu_possible_map);
|
||||
set_cpu_possible(i, true);
|
||||
__cpu_number_map[i] = i;
|
||||
__cpu_logical_map[i] = i;
|
||||
}
|
||||
|
@ -525,8 +525,8 @@ void smtc_prepare_cpus(int cpus)
|
|||
* Pull any physically present but unused TCs out of circulation.
|
||||
*/
|
||||
while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
|
||||
cpu_clear(tc, cpu_possible_map);
|
||||
cpu_clear(tc, cpu_present_map);
|
||||
set_cpu_possible(tc, false);
|
||||
set_cpu_present(tc, false);
|
||||
tc++;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,11 +43,12 @@ static void ssmtc_send_ipi_single(int cpu, unsigned int action)
|
|||
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
|
||||
}
|
||||
|
||||
static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static inline void ssmtc_send_ipi_mask(const struct cpumask *mask,
|
||||
unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
ssmtc_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
|
|||
* cores it has been used on
|
||||
*/
|
||||
if (vma)
|
||||
mask = vma->vm_mm->cpu_vm_mask;
|
||||
mask = *mm_cpumask(vma->vm_mm);
|
||||
else
|
||||
mask = cpu_online_map;
|
||||
cpu_clear(cpu, mask);
|
||||
|
|
|
@ -21,11 +21,11 @@ static void msmtc_send_ipi_single(int cpu, unsigned int action)
|
|||
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
|
||||
}
|
||||
|
||||
static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
msmtc_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -97,11 +97,11 @@ static void yos_send_ipi_single(int cpu, unsigned int action)
|
|||
}
|
||||
}
|
||||
|
||||
static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
yos_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ static void __init node_mem_init(cnodeid_t node)
|
|||
|
||||
/*
|
||||
* A node with nothing. We use it to avoid any special casing in
|
||||
* node_to_cpumask
|
||||
* cpumask_of_node
|
||||
*/
|
||||
static struct node_data null_node = {
|
||||
.hub = {
|
||||
|
|
|
@ -165,11 +165,11 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
|
|||
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
|
||||
}
|
||||
|
||||
static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
ip27_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -82,11 +82,12 @@ static void bcm1480_send_ipi_single(int cpu, unsigned int action)
|
|||
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
|
||||
}
|
||||
|
||||
static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static void bcm1480_send_ipi_mask(const struct cpumask *mask,
|
||||
unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
bcm1480_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -70,11 +70,12 @@ static void sb1250_send_ipi_single(int cpu, unsigned int action)
|
|||
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
|
||||
}
|
||||
|
||||
static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
|
||||
static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
|
||||
unsigned int action)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for_each_cpu_mask(i, mask)
|
||||
for_each_cpu(i, mask)
|
||||
sb1250_send_ipi_single(i, action);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,13 +38,13 @@ extern unsigned long mmu_context_cache[NR_CPUS];
|
|||
#define enter_lazy_tlb(mm, tsk) do {} while (0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define cpu_ran_vm(cpu, task) \
|
||||
cpu_set((cpu), (task)->cpu_vm_mask)
|
||||
#define cpu_maybe_ran_vm(cpu, task) \
|
||||
cpu_test_and_set((cpu), (task)->cpu_vm_mask)
|
||||
#define cpu_ran_vm(cpu, mm) \
|
||||
cpumask_set_cpu((cpu), mm_cpumask(mm))
|
||||
#define cpu_maybe_ran_vm(cpu, mm) \
|
||||
cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
|
||||
#else
|
||||
#define cpu_ran_vm(cpu, task) do {} while (0)
|
||||
#define cpu_maybe_ran_vm(cpu, task) true
|
||||
#define cpu_ran_vm(cpu, mm) do {} while (0)
|
||||
#define cpu_maybe_ran_vm(cpu, mm) true
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
|
|
|
@ -30,7 +30,6 @@ extern void smp_send_all_nop(void);
|
|||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
|
||||
#endif /* !ASSEMBLY */
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ extern void smp_generic_take_timebase(void);
|
|||
extern struct smp_ops_t *smp_ops;
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
/* Definitions relative to the secondary CPU spin loop
|
||||
* and entry point. Not all of them exist on both 32 and
|
||||
|
|
|
@ -17,11 +17,6 @@ static inline int cpu_to_node(int cpu)
|
|||
|
||||
#define parent_node(node) (node)
|
||||
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return numa_cpumask_lookup_table[node];
|
||||
}
|
||||
|
||||
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
|
||||
|
||||
int of_node_to_nid(struct device_node *device);
|
||||
|
@ -36,11 +31,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
|
||||
CPU_MASK_ALL : \
|
||||
node_to_cpumask(pcibus_to_node(bus)) \
|
||||
)
|
||||
|
||||
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
|
||||
cpu_all_mask : \
|
||||
cpumask_of_node(pcibus_to_node(bus)))
|
||||
|
@ -104,8 +94,6 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
|
|||
#ifdef CONFIG_PPC64
|
||||
#include <asm/smp.h>
|
||||
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
|
||||
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
|
||||
|
|
|
@ -431,9 +431,9 @@ void __init smp_setup_cpu_maps(void)
|
|||
for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
|
||||
DBG(" thread %d -> cpu %d (hard id %d)\n",
|
||||
j, cpu, intserv[j]);
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
set_hard_smp_processor_id(cpu, intserv[j]);
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
set_cpu_possible(cpu, true);
|
||||
cpu++;
|
||||
}
|
||||
}
|
||||
|
@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
|
|||
maxcpus);
|
||||
|
||||
for (cpu = 0; cpu < maxcpus; cpu++)
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
set_cpu_possible(cpu, true);
|
||||
out:
|
||||
of_node_put(dn);
|
||||
}
|
||||
|
|
|
@ -189,11 +189,11 @@ void arch_send_call_function_single_ipi(int cpu)
|
|||
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
for_each_cpu(cpu, mask)
|
||||
smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ void __devinit smp_prepare_boot_cpu(void)
|
|||
{
|
||||
BUG_ON(smp_processor_id() != boot_cpuid);
|
||||
|
||||
cpu_set(boot_cpuid, cpu_online_map);
|
||||
set_cpu_online(boot_cpuid, true);
|
||||
cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
|
||||
cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
|
||||
#ifdef CONFIG_PPC64
|
||||
|
@ -307,7 +307,7 @@ int generic_cpu_disable(void)
|
|||
if (cpu == boot_cpuid)
|
||||
return -EBUSY;
|
||||
|
||||
cpu_clear(cpu, cpu_online_map);
|
||||
set_cpu_online(cpu, false);
|
||||
#ifdef CONFIG_PPC64
|
||||
vdso_data->processorCount--;
|
||||
fixup_irqs(cpu_online_map);
|
||||
|
@ -361,7 +361,7 @@ void generic_mach_cpu_die(void)
|
|||
smp_wmb();
|
||||
while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
|
||||
cpu_relax();
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
set_cpu_online(cpu, true);
|
||||
local_irq_enable();
|
||||
}
|
||||
#endif
|
||||
|
@ -508,7 +508,7 @@ int __devinit start_secondary(void *unused)
|
|||
|
||||
ipi_call_lock();
|
||||
notify_cpu_starting(cpu);
|
||||
cpu_set(cpu, cpu_online_map);
|
||||
set_cpu_online(cpu, true);
|
||||
/* Update sibling maps */
|
||||
base = cpu_first_thread_in_core(cpu);
|
||||
for (i = 0; i < threads_per_core; i++) {
|
||||
|
|
|
@ -320,7 +320,7 @@ static int __init smp_psurge_probe(void)
|
|||
if (ncpus > NR_CPUS)
|
||||
ncpus = NR_CPUS;
|
||||
for (i = 1; i < ncpus ; ++i)
|
||||
cpu_set(i, cpu_present_map);
|
||||
set_cpu_present(i, true);
|
||||
|
||||
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
|
||||
|
||||
|
@ -867,7 +867,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
|
|||
|
||||
int smp_core99_cpu_disable(void)
|
||||
{
|
||||
cpu_clear(smp_processor_id(), cpu_online_map);
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
|
||||
/* XXX reset cpu affinity here */
|
||||
mpic_cpu_set_priority(0xf);
|
||||
|
@ -952,7 +952,7 @@ void __init pmac_setup_smp(void)
|
|||
int cpu;
|
||||
|
||||
for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
set_cpu_possible(cpu, true);
|
||||
smp_ops = &psurge_smp_ops;
|
||||
}
|
||||
#endif /* CONFIG_PPC32 */
|
||||
|
|
|
@ -94,7 +94,7 @@ static int pseries_cpu_disable(void)
|
|||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
cpu_clear(cpu, cpu_online_map);
|
||||
set_cpu_online(cpu, false);
|
||||
vdso_data->processorCount--;
|
||||
|
||||
/*fix boot_cpuid here*/
|
||||
|
@ -185,7 +185,7 @@ static int pseries_add_processor(struct device_node *np)
|
|||
|
||||
for_each_cpu_mask(cpu, tmp) {
|
||||
BUG_ON(cpu_isset(cpu, cpu_present_map));
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
set_hard_smp_processor_id(cpu, *intserv++);
|
||||
}
|
||||
err = 0;
|
||||
|
@ -217,7 +217,7 @@ static void pseries_remove_processor(struct device_node *np)
|
|||
if (get_hard_smp_processor_id(cpu) != intserv[i])
|
||||
continue;
|
||||
BUG_ON(cpu_online(cpu));
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, false);
|
||||
set_hard_smp_processor_id(cpu, -1);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ extern struct mutex smp_cpu_state_mutex;
|
|||
extern int smp_cpu_polarization[];
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi(cpumask_t mask);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -9,7 +9,6 @@ const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
|
|||
|
||||
extern cpumask_t cpu_core_map[NR_CPUS];
|
||||
|
||||
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
|
||||
int topology_set_cpu_management(int fc);
|
||||
|
|
|
@ -147,11 +147,11 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
|
|||
udelay(10);
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi(cpumask_t mask)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask)
|
||||
for_each_cpu(cpu, mask)
|
||||
smp_ext_bitcall(cpu, ec_call_function);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ void plat_send_ipi(unsigned int cpu, unsigned int message);
|
|||
|
||||
void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#define cpu_to_node(cpu) ((void)(cpu),0)
|
||||
#define parent_node(node) ((void)(node),0)
|
||||
|
||||
#define node_to_cpumask(node) ((void)node, cpu_online_map)
|
||||
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
|
||||
|
||||
#define pcibus_to_node(bus) ((void)(bus), -1)
|
||||
|
|
|
@ -36,7 +36,6 @@ extern int sparc64_multi_core;
|
|||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
|
||||
/*
|
||||
* General functions that each host system must provide.
|
||||
|
|
|
@ -12,22 +12,8 @@ static inline int cpu_to_node(int cpu)
|
|||
|
||||
#define parent_node(node) (node)
|
||||
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return numa_cpumask_lookup_table[node];
|
||||
}
|
||||
#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
|
||||
|
||||
/*
|
||||
* Returns a pointer to the cpumask of CPUs on Node 'node'.
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
cpumask_t *v = &(numa_cpumask_lookup_table[node])
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = &(numa_cpumask_lookup_table[node])
|
||||
|
||||
struct pci_bus;
|
||||
#ifdef CONFIG_PCI
|
||||
extern int pcibus_to_node(struct pci_bus *pbus);
|
||||
|
@ -71,8 +57,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
|
|||
#ifdef CONFIG_SMP
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
||||
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
#define mc_capable() (sparc64_multi_core)
|
||||
|
|
|
@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
unsigned cpu = smp_processor_id();
|
||||
|
||||
if(prev != next){
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
if(next != &init_mm)
|
||||
__switch_mm(&next->context.id);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ncpus; ++i)
|
||||
cpu_set(i, cpu_possible_map);
|
||||
set_cpu_possible(i, true);
|
||||
|
||||
cpu_clear(me, cpu_online_map);
|
||||
cpu_set(me, cpu_online_map);
|
||||
|
|
|
@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
|
||||
if (likely(prev != next)) {
|
||||
/* stop flush ipis for the previous mm */
|
||||
cpu_clear(cpu, prev->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
#ifdef CONFIG_SMP
|
||||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
percpu_write(cpu_tlbstate.active_mm, next);
|
||||
#endif
|
||||
cpu_set(cpu, next->cpu_vm_mask);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
|
||||
/* Re-load page tables */
|
||||
load_cr3(next->pgd);
|
||||
|
@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
|
||||
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
|
||||
|
||||
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
|
||||
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
|
||||
/* We were in lazy tlb mode and leave_mm disabled
|
||||
* tlb flush IPI delivery. We must reload CR3
|
||||
* to make sure to use no freed page tables.
|
||||
|
|
|
@ -121,7 +121,6 @@ static inline void arch_send_call_function_single_ipi(int cpu)
|
|||
smp_ops.send_call_func_single_ipi(cpu);
|
||||
}
|
||||
|
||||
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
|
||||
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
smp_ops.send_call_func_ipi(mask);
|
||||
|
|
|
@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
|
|||
|
||||
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
|
||||
if (cfg) {
|
||||
if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
|
||||
if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else if (!alloc_cpumask_var_node(&cfg->old_domain,
|
||||
} else if (!zalloc_cpumask_var_node(&cfg->old_domain,
|
||||
GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(cfg->domain);
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else {
|
||||
cpumask_clear(cfg->domain);
|
||||
cpumask_clear(cfg->old_domain);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
|||
#ifdef CONFIG_SMP
|
||||
preempt_disable();
|
||||
load_LDT(pc);
|
||||
if (!cpus_equal(current->mm->cpu_vm_mask,
|
||||
cpumask_of_cpu(smp_processor_id())))
|
||||
if (!cpumask_equal(mm_cpumask(current->mm),
|
||||
cpumask_of(smp_processor_id())))
|
||||
smp_call_function(flush_ldt, current->mm, 1);
|
||||
preempt_enable();
|
||||
#else
|
||||
|
|
|
@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
|
|||
void __init init_c1e_mask(void)
|
||||
{
|
||||
/* If we're using c1e_idle, we need to allocate c1e_mask. */
|
||||
if (pm_idle == c1e_idle) {
|
||||
alloc_cpumask_var(&c1e_mask, GFP_KERNEL);
|
||||
cpumask_clear(c1e_mask);
|
||||
}
|
||||
if (pm_idle == c1e_idle)
|
||||
zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int __init idle_setup(char *str)
|
||||
|
|
|
@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|||
#endif
|
||||
current_thread_info()->cpu = 0; /* needed? */
|
||||
for_each_possible_cpu(i) {
|
||||
alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
||||
alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
||||
alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
||||
cpumask_clear(per_cpu(cpu_core_map, i));
|
||||
cpumask_clear(per_cpu(cpu_sibling_map, i));
|
||||
cpumask_clear(cpu_data(i).llc_shared_map);
|
||||
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
|
||||
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
|
||||
zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
|
||||
}
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
|
|
|
@ -93,7 +93,6 @@ static struct irqaction irq0 = {
|
|||
|
||||
void __init setup_default_timer_irq(void)
|
||||
{
|
||||
irq0.mask = cpumask_of_cpu(0);
|
||||
setup_irq(0, &irq0);
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,8 @@ void leave_mm(int cpu)
|
|||
{
|
||||
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
|
||||
BUG();
|
||||
cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
|
||||
cpumask_clear_cpu(cpu,
|
||||
mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
|
||||
load_cr3(swapper_pg_dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(leave_mm);
|
||||
|
@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
|
|||
preempt_disable();
|
||||
|
||||
local_flush_tlb();
|
||||
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
|
@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
|
|||
else
|
||||
leave_mm(smp_processor_id());
|
||||
}
|
||||
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
|
|||
leave_mm(smp_processor_id());
|
||||
}
|
||||
|
||||
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(&mm->cpu_vm_mask, mm, va);
|
||||
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
||||
flush_tlb_others(mm_cpumask(mm), mm, va);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
|
|||
/* Get the "official" set of cpus referring to our pagetable. */
|
||||
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
|
||||
for_each_online_cpu(cpu) {
|
||||
if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
|
||||
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
|
||||
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
|
||||
continue;
|
||||
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
cpumask_copy(mask, &mm->cpu_vm_mask);
|
||||
cpumask_copy(mask, mm_cpumask(mm));
|
||||
|
||||
/* It's possible that a vcpu may have a stale reference to our
|
||||
cr3, because its in lazy mode, and it hasn't yet flushed
|
||||
|
|
|
@ -193,7 +193,7 @@ acpi_status __init acpi_os_initialize(void)
|
|||
|
||||
static void bind_to_cpu0(struct work_struct *work)
|
||||
{
|
||||
set_cpus_allowed(current, cpumask_of_cpu(0));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(0));
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
|
|
|
@ -511,7 +511,7 @@ int acpi_processor_preregister_performance(
|
|||
struct acpi_processor *match_pr;
|
||||
struct acpi_psd_package *match_pdomain;
|
||||
|
||||
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&performance_mutex);
|
||||
|
@ -558,7 +558,6 @@ int acpi_processor_preregister_performance(
|
|||
* Now that we have _PSD data from all CPUs, lets setup P-state
|
||||
* domain info.
|
||||
*/
|
||||
cpumask_clear(covered_cpus);
|
||||
for_each_possible_cpu(i) {
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
|
|
|
@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
struct acpi_tsd_package *pdomain, *match_pdomain;
|
||||
struct acpi_processor_throttling *pthrottling, *match_pthrottling;
|
||||
|
||||
if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
|
@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void)
|
|||
if (retval)
|
||||
goto err_ret;
|
||||
|
||||
cpumask_clear(covered_cpus);
|
||||
for_each_possible_cpu(i) {
|
||||
pr = per_cpu(processors, i);
|
||||
if (!pr)
|
||||
|
|
|
@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void)
|
|||
int count;
|
||||
int cpu;
|
||||
|
||||
if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) {
|
||||
if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) {
|
||||
printk(KERN_WARNING
|
||||
"sfc: RSS disabled due to allocation failure\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
cpumask_clear(core_mask);
|
||||
count = 0;
|
||||
for_each_online_cpu(cpu) {
|
||||
if (!cpumask_test_cpu(cpu, core_mask)) {
|
||||
|
|
|
@ -154,9 +154,8 @@ int sync_start(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
cpumask_clear(marked_cpus);
|
||||
|
||||
start_cpu_work();
|
||||
|
||||
|
|
|
@ -37,9 +37,6 @@
|
|||
#ifndef parent_node
|
||||
#define parent_node(node) ((void)(node),0)
|
||||
#endif
|
||||
#ifndef node_to_cpumask
|
||||
#define node_to_cpumask(node) ((void)node, cpu_online_map)
|
||||
#endif
|
||||
#ifndef cpumask_of_node
|
||||
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
|
||||
#endif
|
||||
|
@ -55,18 +52,4 @@
|
|||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
/*
|
||||
* returns pointer to cpumask for specified node
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#ifndef node_to_cpumask_ptr
|
||||
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
cpumask_t _##v = node_to_cpumask(node); \
|
||||
const cpumask_t *v = &_##v
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
_##v = node_to_cpumask(node)
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_GENERIC_TOPOLOGY_H */
|
||||
|
|
|
@ -3,444 +3,37 @@
|
|||
|
||||
/*
|
||||
* Cpumasks provide a bitmap suitable for representing the
|
||||
* set of CPU's in a system, one bit position per CPU number.
|
||||
*
|
||||
* The new cpumask_ ops take a "struct cpumask *"; the old ones
|
||||
* use cpumask_t.
|
||||
*
|
||||
* See detailed comments in the file linux/bitmap.h describing the
|
||||
* data type on which these cpumasks are based.
|
||||
*
|
||||
* For details of cpumask_scnprintf() and cpumask_parse_user(),
|
||||
* see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c.
|
||||
* For details of cpulist_scnprintf() and cpulist_parse(), see
|
||||
* bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
|
||||
* For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
|
||||
* For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
|
||||
* For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
|
||||
* For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
|
||||
*
|
||||
* . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
|
||||
* Note: The alternate operations with the suffix "_nr" are used
|
||||
* to limit the range of the loop to nr_cpu_ids instead of
|
||||
* NR_CPUS when NR_CPUS > 64 for performance reasons.
|
||||
* If NR_CPUS is <= 64 then most assembler bitmask
|
||||
* operators execute faster with a constant range, so
|
||||
* the operator will continue to use NR_CPUS.
|
||||
*
|
||||
* Another consideration is that nr_cpu_ids is initialized
|
||||
* to NR_CPUS and isn't lowered until the possible cpus are
|
||||
* discovered (including any disabled cpus). So early uses
|
||||
* will span the entire range of NR_CPUS.
|
||||
* . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
|
||||
*
|
||||
* The obsolescent cpumask operations are:
|
||||
*
|
||||
* void cpu_set(cpu, mask) turn on bit 'cpu' in mask
|
||||
* void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
|
||||
* void cpus_setall(mask) set all bits
|
||||
* void cpus_clear(mask) clear all bits
|
||||
* int cpu_isset(cpu, mask) true iff bit 'cpu' set in mask
|
||||
* int cpu_test_and_set(cpu, mask) test and set bit 'cpu' in mask
|
||||
*
|
||||
* int cpus_and(dst, src1, src2) dst = src1 & src2 [intersection]
|
||||
* void cpus_or(dst, src1, src2) dst = src1 | src2 [union]
|
||||
* void cpus_xor(dst, src1, src2) dst = src1 ^ src2
|
||||
* int cpus_andnot(dst, src1, src2) dst = src1 & ~src2
|
||||
* void cpus_complement(dst, src) dst = ~src
|
||||
*
|
||||
* int cpus_equal(mask1, mask2) Does mask1 == mask2?
|
||||
* int cpus_intersects(mask1, mask2) Do mask1 and mask2 intersect?
|
||||
* int cpus_subset(mask1, mask2) Is mask1 a subset of mask2?
|
||||
* int cpus_empty(mask) Is mask empty (no bits sets)?
|
||||
* int cpus_full(mask) Is mask full (all bits sets)?
|
||||
* int cpus_weight(mask) Hamming weigh - number of set bits
|
||||
* int cpus_weight_nr(mask) Same using nr_cpu_ids instead of NR_CPUS
|
||||
*
|
||||
* void cpus_shift_right(dst, src, n) Shift right
|
||||
* void cpus_shift_left(dst, src, n) Shift left
|
||||
*
|
||||
* int first_cpu(mask) Number lowest set bit, or NR_CPUS
|
||||
* int next_cpu(cpu, mask) Next cpu past 'cpu', or NR_CPUS
|
||||
* int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
|
||||
*
|
||||
* cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
|
||||
* (can be used as an lvalue)
|
||||
* CPU_MASK_ALL Initializer - all bits set
|
||||
* CPU_MASK_NONE Initializer - no bits set
|
||||
* unsigned long *cpus_addr(mask) Array of unsigned long's in mask
|
||||
*
|
||||
* CPUMASK_ALLOC kmalloc's a structure that is a composite of many cpumask_t
|
||||
* variables, and CPUMASK_PTR provides pointers to each field.
|
||||
*
|
||||
* The structure should be defined something like this:
|
||||
* struct my_cpumasks {
|
||||
* cpumask_t mask1;
|
||||
* cpumask_t mask2;
|
||||
* };
|
||||
*
|
||||
* Usage is then:
|
||||
* CPUMASK_ALLOC(my_cpumasks);
|
||||
* CPUMASK_PTR(mask1, my_cpumasks);
|
||||
* CPUMASK_PTR(mask2, my_cpumasks);
|
||||
*
|
||||
* --- DO NOT reference cpumask_t pointers until this check ---
|
||||
* if (my_cpumasks == NULL)
|
||||
* "kmalloc failed"...
|
||||
*
|
||||
* References are now pointers to the cpumask_t variables (*mask1, ...)
|
||||
*
|
||||
*if NR_CPUS > BITS_PER_LONG
|
||||
* CPUMASK_ALLOC(m) Declares and allocates struct m *m =
|
||||
* kmalloc(sizeof(*m), GFP_KERNEL)
|
||||
* CPUMASK_FREE(m) Macro for kfree(m)
|
||||
*else
|
||||
* CPUMASK_ALLOC(m) Declares struct m _m, *m = &_m
|
||||
* CPUMASK_FREE(m) Nop
|
||||
*endif
|
||||
* CPUMASK_PTR(v, m) Declares cpumask_t *v = &(m->v)
|
||||
* ------------------------------------------------------------------------
|
||||
*
|
||||
* int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
|
||||
* int cpumask_parse_user(ubuf, ulen, mask) Parse ascii string as cpumask
|
||||
* int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
|
||||
* int cpulist_parse(buf, map) Parse ascii string as cpulist
|
||||
* int cpu_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
|
||||
* void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
|
||||
* void cpus_onto(dst, orig, relmap) *dst = orig relative to relmap
|
||||
* void cpus_fold(dst, orig, sz) dst bits = orig bits mod sz
|
||||
*
|
||||
* for_each_cpu_mask(cpu, mask) for-loop cpu over mask using NR_CPUS
|
||||
* for_each_cpu_mask_nr(cpu, mask) for-loop cpu over mask using nr_cpu_ids
|
||||
*
|
||||
* int num_online_cpus() Number of online CPUs
|
||||
* int num_possible_cpus() Number of all possible CPUs
|
||||
* int num_present_cpus() Number of present CPUs
|
||||
*
|
||||
* int cpu_online(cpu) Is some cpu online?
|
||||
* int cpu_possible(cpu) Is some cpu possible?
|
||||
* int cpu_present(cpu) Is some cpu present (can schedule)?
|
||||
*
|
||||
* int any_online_cpu(mask) First online cpu in mask
|
||||
*
|
||||
* for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
|
||||
* for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
|
||||
* for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
|
||||
*
|
||||
* Subtlety:
|
||||
* 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
|
||||
* to generate slightly worse code. Note for example the additional
|
||||
* 40 lines of assembly code compiling the "for each possible cpu"
|
||||
* loops buried in the disk_stat_read() macros calls when compiling
|
||||
* drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
|
||||
* one-line #define for cpu_isset(), instead of wrapping an inline
|
||||
* inside a macro, the way we do the other calls.
|
||||
* set of CPU's in a system, one bit position per CPU number. In general,
|
||||
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/threads.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
|
||||
extern cpumask_t _unused_cpumask_arg_;
|
||||
|
||||
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
|
||||
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
|
||||
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
|
||||
{
|
||||
set_bit(cpu, dstp->bits);
|
||||
}
|
||||
|
||||
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
|
||||
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
|
||||
{
|
||||
clear_bit(cpu, dstp->bits);
|
||||
}
|
||||
|
||||
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
|
||||
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
|
||||
{
|
||||
bitmap_fill(dstp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
|
||||
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
|
||||
{
|
||||
bitmap_zero(dstp->bits, nbits);
|
||||
}
|
||||
|
||||
/* No static inline type checking - see Subtlety (1) above. */
|
||||
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
|
||||
|
||||
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
|
||||
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
|
||||
{
|
||||
return test_and_set_bit(cpu, addr->bits);
|
||||
}
|
||||
|
||||
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_andnot(dst, src1, src2) \
|
||||
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
|
||||
static inline void __cpus_complement(cpumask_t *dstp,
|
||||
const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
bitmap_complement(dstp->bits, srcp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_equal(const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_equal(src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_intersects(const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_subset(const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_subset(src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
|
||||
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return bitmap_empty(srcp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
|
||||
static inline int __cpus_full(const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return bitmap_full(srcp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
|
||||
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return bitmap_weight(srcp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_shift_right(dst, src, n) \
|
||||
__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
|
||||
static inline void __cpus_shift_right(cpumask_t *dstp,
|
||||
const cpumask_t *srcp, int n, int nbits)
|
||||
{
|
||||
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
|
||||
}
|
||||
|
||||
#define cpus_shift_left(dst, src, n) \
|
||||
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
|
||||
static inline void __cpus_shift_left(cpumask_t *dstp,
|
||||
const cpumask_t *srcp, int n, int nbits)
|
||||
{
|
||||
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
|
||||
}
|
||||
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
|
||||
|
||||
/**
|
||||
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
|
||||
* @bitmap: the bitmap
|
||||
* cpumask_bits - get the bits in a cpumask
|
||||
* @maskp: the struct cpumask *
|
||||
*
|
||||
* There are a few places where cpumask_var_t isn't appropriate and
|
||||
* static cpumasks must be used (eg. very early boot), yet we don't
|
||||
* expose the definition of 'struct cpumask'.
|
||||
*
|
||||
* This does the conversion, and can be used as a constant initializer.
|
||||
* You should only assume nr_cpu_ids bits of this mask are valid. This is
|
||||
* a macro so it's const-correct.
|
||||
*/
|
||||
#define to_cpumask(bitmap) \
|
||||
((struct cpumask *)(1 ? (bitmap) \
|
||||
: (void *)sizeof(__check_is_bitmap(bitmap))))
|
||||
|
||||
static inline int __check_is_bitmap(const unsigned long *bitmap)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Special-case data structure for "single bit set only" constant CPU masks.
|
||||
*
|
||||
* We pre-generate all the 64 (or 32) possible bit positions, with enough
|
||||
* padding to the left and the right, and return the constant pointer
|
||||
* appropriately offset.
|
||||
*/
|
||||
extern const unsigned long
|
||||
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
|
||||
|
||||
static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
|
||||
{
|
||||
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
|
||||
p -= cpu / BITS_PER_LONG;
|
||||
return to_cpumask(p);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
|
||||
/*
|
||||
* In cases where we take the address of the cpumask immediately,
|
||||
* gcc optimizes it out (it's a constant) and there's no huge stack
|
||||
* variable created:
|
||||
*/
|
||||
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
|
||||
|
||||
|
||||
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
|
||||
|
||||
#if NR_CPUS <= BITS_PER_LONG
|
||||
|
||||
#define CPU_MASK_ALL \
|
||||
(cpumask_t) { { \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
} }
|
||||
|
||||
#define CPU_MASK_ALL_PTR (&CPU_MASK_ALL)
|
||||
|
||||
#else
|
||||
|
||||
#define CPU_MASK_ALL \
|
||||
(cpumask_t) { { \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
} }
|
||||
|
||||
/* cpu_mask_all is in init/main.c */
|
||||
extern cpumask_t cpu_mask_all;
|
||||
#define CPU_MASK_ALL_PTR (&cpu_mask_all)
|
||||
|
||||
#endif
|
||||
|
||||
#define CPU_MASK_NONE \
|
||||
(cpumask_t) { { \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
|
||||
} }
|
||||
|
||||
#define CPU_MASK_CPU0 \
|
||||
(cpumask_t) { { \
|
||||
[0] = 1UL \
|
||||
} }
|
||||
|
||||
#define cpus_addr(src) ((src).bits)
|
||||
|
||||
#if NR_CPUS > BITS_PER_LONG
|
||||
#define CPUMASK_ALLOC(m) struct m *m = kmalloc(sizeof(*m), GFP_KERNEL)
|
||||
#define CPUMASK_FREE(m) kfree(m)
|
||||
#else
|
||||
#define CPUMASK_ALLOC(m) struct m _m, *m = &_m
|
||||
#define CPUMASK_FREE(m)
|
||||
#endif
|
||||
#define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v)
|
||||
|
||||
#define cpu_remap(oldbit, old, new) \
|
||||
__cpu_remap((oldbit), &(old), &(new), NR_CPUS)
|
||||
static inline int __cpu_remap(int oldbit,
|
||||
const cpumask_t *oldp, const cpumask_t *newp, int nbits)
|
||||
{
|
||||
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_remap(dst, src, old, new) \
|
||||
__cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
|
||||
static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
|
||||
const cpumask_t *oldp, const cpumask_t *newp, int nbits)
|
||||
{
|
||||
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_onto(dst, orig, relmap) \
|
||||
__cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
|
||||
static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
|
||||
const cpumask_t *relmapp, int nbits)
|
||||
{
|
||||
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_fold(dst, orig, sz) \
|
||||
__cpus_fold(&(dst), &(orig), sz, NR_CPUS)
|
||||
static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
|
||||
int sz, int nbits)
|
||||
{
|
||||
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
|
||||
}
|
||||
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
|
||||
#define cpumask_bits(maskp) ((maskp)->bits)
|
||||
|
||||
#if NR_CPUS == 1
|
||||
|
||||
#define nr_cpu_ids 1
|
||||
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
|
||||
#define first_cpu(src) ({ (void)(src); 0; })
|
||||
#define next_cpu(n, src) ({ (void)(src); 1; })
|
||||
#define any_online_cpu(mask) 0
|
||||
#define for_each_cpu_mask(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
|
||||
#else /* NR_CPUS > 1 */
|
||||
|
||||
#else
|
||||
extern int nr_cpu_ids;
|
||||
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
|
||||
int __first_cpu(const cpumask_t *srcp);
|
||||
int __next_cpu(int n, const cpumask_t *srcp);
|
||||
int __any_online_cpu(const cpumask_t *mask);
|
||||
|
||||
#define first_cpu(src) __first_cpu(&(src))
|
||||
#define next_cpu(n, src) __next_cpu((n), &(src))
|
||||
#define any_online_cpu(mask) __any_online_cpu(&(mask))
|
||||
#define for_each_cpu_mask(cpu, mask) \
|
||||
for ((cpu) = -1; \
|
||||
(cpu) = next_cpu((cpu), (mask)), \
|
||||
(cpu) < NR_CPUS; )
|
||||
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
|
||||
#if NR_CPUS <= 64
|
||||
|
||||
#define next_cpu_nr(n, src) next_cpu(n, src)
|
||||
#define cpus_weight_nr(cpumask) cpus_weight(cpumask)
|
||||
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
|
||||
|
||||
#else /* NR_CPUS > 64 */
|
||||
|
||||
int __next_cpu_nr(int n, const cpumask_t *srcp);
|
||||
#define next_cpu_nr(n, src) __next_cpu_nr((n), &(src))
|
||||
#define cpus_weight_nr(cpumask) __cpus_weight(&(cpumask), nr_cpu_ids)
|
||||
#define for_each_cpu_mask_nr(cpu, mask) \
|
||||
for ((cpu) = -1; \
|
||||
(cpu) = next_cpu_nr((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids; )
|
||||
|
||||
#endif /* NR_CPUS > 64 */
|
||||
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
|
||||
* not all bits may be allocated. */
|
||||
#define nr_cpumask_bits nr_cpu_ids
|
||||
#else
|
||||
#define nr_cpumask_bits NR_CPUS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The following particular system cpumasks and operations manage
|
||||
|
@ -487,12 +80,6 @@ extern const struct cpumask *const cpu_online_mask;
|
|||
extern const struct cpumask *const cpu_present_mask;
|
||||
extern const struct cpumask *const cpu_active_mask;
|
||||
|
||||
/* These strip const, as traditionally they weren't const. */
|
||||
#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
|
||||
#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
|
||||
#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
|
||||
#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
|
||||
|
||||
#if NR_CPUS > 1
|
||||
#define num_online_cpus() cpumask_weight(cpu_online_mask)
|
||||
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
|
||||
|
@ -511,35 +98,6 @@ extern const struct cpumask *const cpu_active_mask;
|
|||
#define cpu_active(cpu) ((cpu) == 0)
|
||||
#endif
|
||||
|
||||
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
|
||||
|
||||
/* These are the new versions of the cpumask operators: passed by pointer.
|
||||
* The older versions will be implemented in terms of these, then deleted. */
|
||||
#define cpumask_bits(maskp) ((maskp)->bits)
|
||||
|
||||
#if NR_CPUS <= BITS_PER_LONG
|
||||
#define CPU_BITS_ALL \
|
||||
{ \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
}
|
||||
|
||||
#else /* NR_CPUS > BITS_PER_LONG */
|
||||
|
||||
#define CPU_BITS_ALL \
|
||||
{ \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
}
|
||||
#endif /* NR_CPUS > BITS_PER_LONG */
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
|
||||
* not all bits may be allocated. */
|
||||
#define nr_cpumask_bits nr_cpu_ids
|
||||
#else
|
||||
#define nr_cpumask_bits NR_CPUS
|
||||
#endif
|
||||
|
||||
/* verify cpu argument to cpumask_* operators */
|
||||
static inline unsigned int cpumask_check(unsigned int cpu)
|
||||
{
|
||||
|
@ -1100,4 +658,241 @@ void set_cpu_active(unsigned int cpu, bool active);
|
|||
void init_cpu_present(const struct cpumask *src);
|
||||
void init_cpu_possible(const struct cpumask *src);
|
||||
void init_cpu_online(const struct cpumask *src);
|
||||
|
||||
/**
|
||||
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
|
||||
* @bitmap: the bitmap
|
||||
*
|
||||
* There are a few places where cpumask_var_t isn't appropriate and
|
||||
* static cpumasks must be used (eg. very early boot), yet we don't
|
||||
* expose the definition of 'struct cpumask'.
|
||||
*
|
||||
* This does the conversion, and can be used as a constant initializer.
|
||||
*/
|
||||
#define to_cpumask(bitmap) \
|
||||
((struct cpumask *)(1 ? (bitmap) \
|
||||
: (void *)sizeof(__check_is_bitmap(bitmap))))
|
||||
|
||||
static inline int __check_is_bitmap(const unsigned long *bitmap)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Special-case data structure for "single bit set only" constant CPU masks.
|
||||
*
|
||||
* We pre-generate all the 64 (or 32) possible bit positions, with enough
|
||||
* padding to the left and the right, and return the constant pointer
|
||||
* appropriately offset.
|
||||
*/
|
||||
extern const unsigned long
|
||||
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
|
||||
|
||||
static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
|
||||
{
|
||||
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
|
||||
p -= cpu / BITS_PER_LONG;
|
||||
return to_cpumask(p);
|
||||
}
|
||||
|
||||
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
|
||||
|
||||
#if NR_CPUS <= BITS_PER_LONG
|
||||
#define CPU_BITS_ALL \
|
||||
{ \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
}
|
||||
|
||||
#else /* NR_CPUS > BITS_PER_LONG */
|
||||
|
||||
#define CPU_BITS_ALL \
|
||||
{ \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
}
|
||||
#endif /* NR_CPUS > BITS_PER_LONG */
|
||||
|
||||
/*
|
||||
*
|
||||
* From here down, all obsolete. Use cpumask_ variants!
|
||||
*
|
||||
*/
|
||||
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
|
||||
/* These strip const, as traditionally they weren't const. */
|
||||
#define cpu_possible_map (*(cpumask_t *)cpu_possible_mask)
|
||||
#define cpu_online_map (*(cpumask_t *)cpu_online_mask)
|
||||
#define cpu_present_map (*(cpumask_t *)cpu_present_mask)
|
||||
#define cpu_active_map (*(cpumask_t *)cpu_active_mask)
|
||||
|
||||
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
|
||||
|
||||
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
|
||||
|
||||
#if NR_CPUS <= BITS_PER_LONG
|
||||
|
||||
#define CPU_MASK_ALL \
|
||||
(cpumask_t) { { \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
} }
|
||||
|
||||
#else
|
||||
|
||||
#define CPU_MASK_ALL \
|
||||
(cpumask_t) { { \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
|
||||
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
|
||||
} }
|
||||
|
||||
#endif
|
||||
|
||||
#define CPU_MASK_NONE \
|
||||
(cpumask_t) { { \
|
||||
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
|
||||
} }
|
||||
|
||||
#define CPU_MASK_CPU0 \
|
||||
(cpumask_t) { { \
|
||||
[0] = 1UL \
|
||||
} }
|
||||
|
||||
#if NR_CPUS == 1
|
||||
#define first_cpu(src) ({ (void)(src); 0; })
|
||||
#define next_cpu(n, src) ({ (void)(src); 1; })
|
||||
#define any_online_cpu(mask) 0
|
||||
#define for_each_cpu_mask(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#else /* NR_CPUS > 1 */
|
||||
int __first_cpu(const cpumask_t *srcp);
|
||||
int __next_cpu(int n, const cpumask_t *srcp);
|
||||
int __any_online_cpu(const cpumask_t *mask);
|
||||
|
||||
#define first_cpu(src) __first_cpu(&(src))
|
||||
#define next_cpu(n, src) __next_cpu((n), &(src))
|
||||
#define any_online_cpu(mask) __any_online_cpu(&(mask))
|
||||
#define for_each_cpu_mask(cpu, mask) \
|
||||
for ((cpu) = -1; \
|
||||
(cpu) = next_cpu((cpu), (mask)), \
|
||||
(cpu) < NR_CPUS; )
|
||||
#endif /* SMP */
|
||||
|
||||
#if NR_CPUS <= 64
|
||||
|
||||
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
|
||||
|
||||
#else /* NR_CPUS > 64 */
|
||||
|
||||
int __next_cpu_nr(int n, const cpumask_t *srcp);
|
||||
#define for_each_cpu_mask_nr(cpu, mask) \
|
||||
for ((cpu) = -1; \
|
||||
(cpu) = __next_cpu_nr((cpu), &(mask)), \
|
||||
(cpu) < nr_cpu_ids; )
|
||||
|
||||
#endif /* NR_CPUS > 64 */
|
||||
|
||||
#define cpus_addr(src) ((src).bits)
|
||||
|
||||
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
|
||||
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
|
||||
{
|
||||
set_bit(cpu, dstp->bits);
|
||||
}
|
||||
|
||||
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
|
||||
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
|
||||
{
|
||||
clear_bit(cpu, dstp->bits);
|
||||
}
|
||||
|
||||
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
|
||||
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
|
||||
{
|
||||
bitmap_fill(dstp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
|
||||
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
|
||||
{
|
||||
bitmap_zero(dstp->bits, nbits);
|
||||
}
|
||||
|
||||
/* No static inline type checking - see Subtlety (1) above. */
|
||||
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
|
||||
|
||||
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
|
||||
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
|
||||
{
|
||||
return test_and_set_bit(cpu, addr->bits);
|
||||
}
|
||||
|
||||
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_andnot(dst, src1, src2) \
|
||||
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_equal(const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_equal(src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_intersects(const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
|
||||
static inline int __cpus_subset(const cpumask_t *src1p,
|
||||
const cpumask_t *src2p, int nbits)
|
||||
{
|
||||
return bitmap_subset(src1p->bits, src2p->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
|
||||
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return bitmap_empty(srcp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
|
||||
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
|
||||
{
|
||||
return bitmap_weight(srcp->bits, nbits);
|
||||
}
|
||||
|
||||
#define cpus_shift_left(dst, src, n) \
|
||||
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
|
||||
static inline void __cpus_shift_left(cpumask_t *dstp,
|
||||
const cpumask_t *srcp, int n, int nbits)
|
||||
{
|
||||
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
|
||||
}
|
||||
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
|
||||
|
||||
#endif /* __LINUX_CPUMASK_H */
|
||||
|
|
|
@ -84,7 +84,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
|
|||
* struct irqaction - per interrupt action descriptor
|
||||
* @handler: interrupt handler function
|
||||
* @flags: flags (see IRQF_* above)
|
||||
* @mask: no comment as it is useless and about to be removed
|
||||
* @name: name of the device
|
||||
* @dev_id: cookie to identify the device
|
||||
* @next: pointer to the next irqaction for shared interrupts
|
||||
|
@ -97,7 +96,6 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
|
|||
struct irqaction {
|
||||
irq_handler_t handler;
|
||||
unsigned long flags;
|
||||
cpumask_t mask;
|
||||
const char *name;
|
||||
void *dev_id;
|
||||
struct irqaction *next;
|
||||
|
|
|
@ -1817,10 +1817,13 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_CPUMASK_OFFSTACK
|
||||
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
|
||||
{
|
||||
return set_cpus_allowed_ptr(p, &new_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures can set this to 1 if they have specified
|
||||
|
|
|
@ -73,15 +73,6 @@ int smp_call_function(void(*func)(void *info), void *info, int wait);
|
|||
void smp_call_function_many(const struct cpumask *mask,
|
||||
void (*func)(void *info), void *info, bool wait);
|
||||
|
||||
/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
|
||||
static inline int
|
||||
smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
||||
int wait)
|
||||
{
|
||||
smp_call_function_many(&mask, func, info, wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __smp_call_function_single(int cpuid, struct call_single_data *data,
|
||||
int wait);
|
||||
|
||||
|
@ -144,8 +135,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
|||
static inline void smp_send_reschedule(int cpu) { }
|
||||
#define num_booting_cpus() 1
|
||||
#define smp_prepare_boot_cpu() do {} while (0)
|
||||
#define smp_call_function_mask(mask, func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
#define smp_call_function_many(mask, func, info, wait) \
|
||||
(up_smp_call_function(func, info))
|
||||
static inline void init_call_single_data(void)
|
||||
|
|
|
@ -211,12 +211,6 @@ int arch_update_cpu_topology(void);
|
|||
#ifndef topology_core_id
|
||||
#define topology_core_id(cpu) ((void)(cpu), 0)
|
||||
#endif
|
||||
#ifndef topology_thread_siblings
|
||||
#define topology_thread_siblings(cpu) cpumask_of_cpu(cpu)
|
||||
#endif
|
||||
#ifndef topology_core_siblings
|
||||
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
|
||||
#endif
|
||||
#ifndef topology_thread_cpumask
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
|
|
|
@ -359,11 +359,6 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { }
|
|||
|
||||
#else
|
||||
|
||||
#if NR_CPUS > BITS_PER_LONG
|
||||
cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL;
|
||||
EXPORT_SYMBOL(cpu_mask_all);
|
||||
#endif
|
||||
|
||||
/* Setup number of possible processor ids */
|
||||
int nr_cpu_ids __read_mostly = NR_CPUS;
|
||||
EXPORT_SYMBOL(nr_cpu_ids);
|
||||
|
|
|
@ -347,13 +347,6 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
|
|||
generic_exec_single(cpu, data, wait);
|
||||
}
|
||||
|
||||
/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
|
||||
|
||||
#ifndef arch_send_call_function_ipi_mask
|
||||
# define arch_send_call_function_ipi_mask(maskp) \
|
||||
arch_send_call_function_ipi(*(maskp))
|
||||
#endif
|
||||
|
||||
/**
|
||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on (only runs on online subset).
|
||||
|
|
|
@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file)
|
|||
if (current_trace)
|
||||
*iter->trace = *current_trace;
|
||||
|
||||
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
|
||||
goto fail;
|
||||
|
||||
cpumask_clear(iter->started);
|
||||
|
||||
if (current_trace && current_trace->print_max)
|
||||
iter->tr = &max_tr;
|
||||
else
|
||||
|
@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void)
|
|||
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
|
||||
goto out_free_buffer_mask;
|
||||
|
||||
if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
|
||||
if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
|
||||
goto out_free_tracing_cpumask;
|
||||
|
||||
/* To save memory, keep the ring buffer size to its minimum */
|
||||
|
@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void)
|
|||
|
||||
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
|
||||
cpumask_copy(tracing_cpumask, cpu_all_mask);
|
||||
cpumask_clear(tracing_reader_cpumask);
|
||||
|
||||
/* TODO: make the number of buffers hot pluggable with CPUS */
|
||||
global_trace.buffer = ring_buffer_alloc(ring_buf_size,
|
||||
|
|
|
@ -29,7 +29,6 @@ static unsigned long max_pages(unsigned long min_pages)
|
|||
int node = numa_node_id();
|
||||
struct zone *zones = NODE_DATA(node)->node_zones;
|
||||
int num_cpus_on_node;
|
||||
const struct cpumask *cpumask_on_node = cpumask_of_node(node);
|
||||
|
||||
node_free_pages =
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
|
@ -42,7 +41,7 @@ static unsigned long max_pages(unsigned long min_pages)
|
|||
|
||||
max = node_free_pages / FRACTION_OF_NODE_MEM;
|
||||
|
||||
num_cpus_on_node = cpus_weight_nr(*cpumask_on_node);
|
||||
num_cpus_on_node = cpumask_weight(cpumask_of_node(node));
|
||||
max /= num_cpus_on_node;
|
||||
|
||||
return max(max, min_pages);
|
||||
|
|
|
@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|||
bool called = true;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
|
||||
cpumask_clear(cpus);
|
||||
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
||||
|
||||
spin_lock(&kvm->requests_lock);
|
||||
me = smp_processor_id();
|
||||
|
|
Loading…
Reference in a new issue