diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 64970b9885f..dc69f28489f 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -227,17 +227,14 @@ static struct irq_cfg *get_one_free_irq_cfg(int node) cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); if (cfg) { - if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { + if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) { kfree(cfg); cfg = NULL; - } else if (!alloc_cpumask_var_node(&cfg->old_domain, + } else if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_ATOMIC, node)) { free_cpumask_var(cfg->domain); kfree(cfg); cfg = NULL; - } else { - cpumask_clear(cfg->domain); - cpumask_clear(cfg->old_domain); } } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 847ab416031..5284cd2b577 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -555,10 +555,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) void __init init_c1e_mask(void) { /* If we're using c1e_idle, we need to allocate c1e_mask. */ - if (pm_idle == c1e_idle) { - alloc_cpumask_var(&c1e_mask, GFP_KERNEL); - cpumask_clear(c1e_mask); - } + if (pm_idle == c1e_idle) + zalloc_cpumask_var(&c1e_mask, GFP_KERNEL); } static int __init idle_setup(char *str) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 09c5e077dff..565ebc65920 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1059,12 +1059,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) #endif current_thread_info()->cpu = 0; /* needed? */ for_each_possible_cpu(i) { - alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); - alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); - alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); - cpumask_clear(per_cpu(cpu_core_map, i)); - cpumask_clear(per_cpu(cpu_sibling_map, i)); - cpumask_clear(cpu_data(i).llc_shared_map); + zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); + zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); + zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); } set_cpu_sibling_map(0); diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 11088cf1031..8ba0ed0b9dd 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -511,7 +511,7 @@ int acpi_processor_preregister_performance( struct acpi_processor *match_pr; struct acpi_psd_package *match_pdomain; - if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) return -ENOMEM; mutex_lock(&performance_mutex); @@ -558,7 +558,6 @@ int acpi_processor_preregister_performance( * Now that we have _PSD data from all CPUs, lets setup P-state * domain info. */ - cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index ce7cf3bc510..4c6c14c1e30 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -77,7 +77,7 @@ static int acpi_processor_update_tsd_coord(void) struct acpi_tsd_package *pdomain, *match_pdomain; struct acpi_processor_throttling *pthrottling, *match_pthrottling; - if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) return -ENOMEM; /* @@ -105,7 +105,6 @@ static int acpi_processor_update_tsd_coord(void) if (retval) goto err_ret; - cpumask_clear(covered_cpus); for_each_possible_cpu(i) { pr = per_cpu(processors, i); if (!pr) diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 07a7e4b8f8f..cc4b2f99989 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -884,13 +884,12 @@ static int efx_wanted_rx_queues(void) int count; int cpu; - if (unlikely(!alloc_cpumask_var(&core_mask, GFP_KERNEL))) { + if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { printk(KERN_WARNING "sfc: RSS disabled due to allocation failure\n"); return 1; } - cpumask_clear(core_mask); count = 0; for_each_online_cpu(cpu) { if (!cpumask_test_cpu(cpu, core_mask)) { diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 8574622e36a..c9e2ae90f19 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -154,9 +154,8 @@ int sync_start(void) { int err; - if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; - cpumask_clear(marked_cpus); start_cpu_work(); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 6c0f6a8a22e..411af37f4be 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1984,11 +1984,9 @@ __tracing_open(struct inode *inode, struct file *file) if (current_trace) *iter->trace = *current_trace; - if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) + if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; - cpumask_clear(iter->started); - if (current_trace && current_trace->print_max) iter->tr = &max_tr; else @@ -4389,7 +4387,7 @@ __init static int tracer_alloc_buffers(void) if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; - if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) goto out_free_tracing_cpumask; /* To save memory, keep the ring buffer size to its minimum */ @@ -4400,7 +4398,6 @@ __init static int tracer_alloc_buffers(void) cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(tracing_cpumask, cpu_all_mask); - cpumask_clear(tracing_reader_cpumask); /* TODO: make the number of buffers hot pluggable with CPUS */ global_trace.buffer = ring_buffer_alloc(ring_buf_size, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 897bff3b7df..034a798b043 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -738,8 +738,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) bool called = true; struct kvm_vcpu *vcpu; - if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) - cpumask_clear(cpus); + zalloc_cpumask_var(&cpus, GFP_ATOMIC); spin_lock(&kvm->requests_lock); me = smp_processor_id();