[PATCH 1/4] cpumask: Migrate 'alloc_cpumask_var()' users to 'zalloc_cpumask_var()'
From: Ingo Molnar
Date: Mon Dec 07 2015 - 03:51:02 EST
Xunlei Pang reported a scheduler bug in init_rootdomain(), which is
caused by improper use of alloc_cpumask_var(), which results in
uninitialized cpumasks being allocated.
No-one noticed this scheduler bug for a long time, probably because
alloc_cpumask_var() does result in initialized cpumasks in the
!CPUMASK_OFFSTACK case - which is the vast majority of systems
out there.
So migrate all alloc_cpumask_var() users over to zalloc_cpumask_var(),
to be on the safe side.
Reported-by: Xunlei Pang <xlpang@xxxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/ia64/kernel/smp.c | 2 +-
arch/mips/kernel/mips-mt-fpaff.c | 6 +++---
arch/powerpc/kernel/irq.c | 2 +-
arch/powerpc/kernel/rtas.c | 4 ++--
arch/powerpc/kernel/smp.c | 2 +-
arch/powerpc/platforms/powernv/subcore.c | 2 +-
arch/powerpc/platforms/pseries/suspend.c | 2 +-
arch/x86/kernel/apic/vector.c | 4 ++--
arch/x86/kernel/cpu/mcheck/mce-inject.c | 2 +-
arch/x86/kernel/smp.c | 2 +-
arch/x86/mm/mmio-mod.c | 2 +-
arch/x86/platform/uv/uv_nmi.c | 2 +-
arch/x86/xen/mmu.c | 2 +-
arch/x86/xen/smp.c | 2 +-
block/blk-mq-cpumap.c | 2 +-
crypto/pcrypt.c | 4 ++--
drivers/acpi/acpi_pad.c | 2 +-
drivers/acpi/processor_throttling.c | 2 +-
drivers/base/cpu.c | 2 +-
drivers/cpufreq/cpufreq.c | 2 +-
drivers/crypto/n2_core.c | 2 +-
drivers/firmware/dcdbas.c | 2 +-
drivers/hwmon/dell-smm-hwmon.c | 2 +-
drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +-
drivers/pci/host/pci-xgene-msi.c | 2 +-
drivers/scsi/mpt3sas/mpt3sas_base.c | 2 +-
drivers/virtio/virtio_pci_common.c | 2 +-
include/linux/cpumask.h | 6 +++---
kernel/compat.c | 4 ++--
kernel/cpu.c | 2 +-
kernel/cpuset.c | 16 ++++++++--------
kernel/irq/irqdesc.c | 2 +-
kernel/irq/manage.c | 6 +++---
kernel/irq/proc.c | 4 ++--
kernel/padata.c | 10 +++++-----
kernel/profile.c | 4 ++--
kernel/sched/core.c | 14 +++++++-------
kernel/smpboot.c | 4 ++--
kernel/taskstats.c | 4 ++--
kernel/time/tick-sched.c | 4 ++--
kernel/torture.c | 2 +-
kernel/trace/ring_buffer.c | 2 +-
kernel/trace/trace.c | 10 +++++-----
kernel/workqueue.c | 4 ++--
lib/cpumask.c | 2 +-
mm/vmstat.c | 2 +-
net/core/flow.c | 2 +-
net/core/net-sysfs.c | 4 ++--
net/core/sysctl_net_core.c | 2 +-
49 files changed, 87 insertions(+), 87 deletions(-)
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 7f706d4f84f7..d52b4d8de013 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -301,7 +301,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
preempt_enable();
return;
}
- if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
+ if (!zalloc_cpumask_var(&cpus, GFP_ATOMIC)) {
smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
mm, 1);
} else {
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 789d7bf4fef3..9b8241da4c4c 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -87,15 +87,15 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
get_task_struct(p);
rcu_read_unlock();
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
- if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_new_mask;
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 290559df1e8b..48558d1ae075 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -429,7 +429,7 @@ void migrate_irqs(void)
cpumask_var_t mask;
const struct cpumask *map = cpu_online_mask;
- alloc_cpumask_var(&mask, GFP_KERNEL);
+ zalloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq_desc(irq, desc) {
struct irq_data *data;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 5a753fae8265..0631d6a1ea6f 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -897,7 +897,7 @@ int rtas_online_cpus_mask(cpumask_var_t cpus)
if (ret) {
cpumask_var_t tmp_mask;
- if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
+ if (!zalloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
return ret;
/* Use tmp_mask to preserve cpus mask from first failure */
@@ -945,7 +945,7 @@ int rtas_ibm_suspend_me(u64 handle)
return -EIO;
}
- if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ if (!zalloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
return -ENOMEM;
atomic_set(&data.working, 0);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec2058d2d..62896a4eb825 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -767,7 +767,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
* init thread may have been "borrowed" by another CPU in the meantime
* se we pin us down to CPU 0 for a short while
*/
- alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+ zalloc_cpumask_var(&old_mask, GFP_NOWAIT);
cpumask_copy(old_mask, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 503a73f59359..49ab03d342f5 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -417,7 +417,7 @@ static int subcore_init(void)
if (setup_max_cpus % threads_per_core)
return 0;
- BUG_ON(!alloc_cpumask_var(&cpu_offline_mask, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&cpu_offline_mask, GFP_KERNEL));
set_subcores_per_core(1);
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index e76aefae2aa2..1ae11002472f 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -151,7 +151,7 @@ static ssize_t store_hibernate(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
+ if (!zalloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
return -ENOMEM;
stream_id = simple_strtoul(buf, NULL, 16);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 861bc59c8f25..c7b5904276d5 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -403,7 +403,7 @@ int __init arch_early_irq_init(void)
arch_init_msi_domain(x86_vector_domain);
arch_init_htirq_domain(x86_vector_domain);
- BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
return arch_early_ioapic_init();
}
@@ -513,7 +513,7 @@ static void __send_cleanup_vector(struct apic_chip_data *data)
{
cpumask_var_t cleanup_mask;
- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+ if (unlikely(!zalloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
unsigned int i;
for_each_cpu_and(i, data->old_domain, cpu_online_mask)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 4cfba4371a71..f81bcbc7044e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -239,7 +239,7 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
static int inject_init(void)
{
- if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n");
register_mce_write_callback(mce_write);
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 658777cf3851..3f012b907209 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -137,7 +137,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
{
cpumask_var_t allbutself;
- if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
+ if (!zalloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
return;
}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 0057a7accfb1..9af43c2113bc 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -386,7 +386,7 @@ static void enter_uniprocessor(void)
int err;
if (downed_cpus == NULL &&
- !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
+ !zalloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
pr_notice("Failed to allocate mask\n");
goto out;
}
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 327f21c3bde1..327120da28e8 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -718,6 +718,6 @@ void uv_nmi_setup(void)
}
uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
}
- BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
uv_register_nmi_notifier();
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 5e509d086d57..07e5e5133e5c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1063,7 +1063,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
}
/* Get the "official" set of cpus referring to our pagetable. */
- if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
+ if (!zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3f4ebf0261f2..e5196a77e1c2 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -362,7 +362,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
if (xen_smp_intr_init(0))
BUG();
- if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
panic("could not allocate xen_cpu_initialized_map\n");
cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 8764c241e5bb..bd84397e025c 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -37,7 +37,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
cpumask_var_t cpus;
- if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
+ if (!zalloc_cpumask_var(&cpus, GFP_ATOMIC))
return 1;
cpumask_clear(cpus);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index ee9cfb99fe25..e2c433a740ce 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -371,7 +371,7 @@ static int pcrypt_cpumask_change_notify(struct notifier_block *self,
new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
if (!new_mask)
return -ENOMEM;
- if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
kfree(new_mask);
return -ENOMEM;
}
@@ -419,7 +419,7 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
if (!mask)
goto err_free_padata;
- if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
kfree(mask);
goto err_free_padata;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211b2d58..45f93d95d3c5 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -95,7 +95,7 @@ static void round_robin_cpu(unsigned int tsk_index)
unsigned long min_weight = -1;
unsigned long uninitialized_var(preferred_cpu);
- if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tmp, GFP_KERNEL))
return;
mutex_lock(&round_robin_lock);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d746336d..a5d8e0a8bcf2 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -903,7 +903,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&saved_mask, GFP_KERNEL))
return -ENOMEM;
/*
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 91bbb1959d8d..6e7141400ddc 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -243,7 +243,7 @@ static ssize_t print_cpus_offline(struct device *dev,
cpumask_var_t offline;
/* display offline cpus < nr_cpu_ids */
- if (!alloc_cpumask_var(&offline, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&offline, GFP_KERNEL))
return -ENOMEM;
cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8412ce5f93a7..2524b702897f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1034,7 +1034,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!policy)
return NULL;
- if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&policy->cpus, GFP_KERNEL))
goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 5450880abb7b..0c8a5160a334 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1660,7 +1660,7 @@ static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
if (cpumask_empty(&p->sharing))
return -EINVAL;
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&old_allowed, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(old_allowed, ¤t->cpus_allowed);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 829eec8959f2..968d09476cb8 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -255,7 +255,7 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
}
/* SMI requires CPU 0 */
- if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&old_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(old_mask, ¤t->cpus_allowed);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c8487894b312..90903cea6d9c 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -137,7 +137,7 @@ static int i8k_smm(struct smm_regs *regs)
cpumask_var_t old_mask;
/* SMM requires CPU 0 */
- if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&old_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(old_mask, ¤t->cpus_allowed);
rc = set_cpus_allowed_ptr(current, cpumask_of(0));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b825f978d441..4e4126c95b27 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2705,7 +2705,7 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
netif_set_xps_queue(ring->netdev,
&ring->q_vector->affinity_mask,
ring->queue_index);
- } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ } else if (zalloc_cpumask_var(&mask, GFP_KERNEL)) {
/* Disable XPS to allow selection based on TC */
bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index a6456b578269..0f21851cc681 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -405,7 +405,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
* to each core.
*/
- if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ if (zalloc_cpumask_var(&mask, GFP_KERNEL)) {
cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
err = irq_set_affinity(msi_group->gic_irq, mask);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 11393ebf1a68..fd2cd64f9430 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1829,7 +1829,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
reply_q->msix_index = index;
reply_q->vector = vector;
- if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL))
return -ENOMEM;
cpumask_clear(reply_q->affinity_hint);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 78f804af6c20..d123ed3c1660 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -161,7 +161,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
if (!vp_dev->msix_affinity_masks)
goto error;
for (i = 0; i < nvectors; ++i)
- if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+ if (!zalloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
GFP_KERNEL))
goto error;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 59915ea5373c..608beca04fc6 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -620,7 +620,7 @@ static inline size_t cpumask_size(void)
*
* ie.
* cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
* return -ENOMEM;
*
* ... use 'tmpmask' like a normal struct cpumask * ...
@@ -628,12 +628,12 @@ static inline size_t cpumask_size(void)
* free_cpumask_var(tmpmask);
*
*
- * However, one notable exception is there. alloc_cpumask_var() allocates
+ * However, one notable exception is there. zalloc_cpumask_var() allocates
* only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
* NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
*
* cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
* return -ENOMEM;
*
* var = *tmpmask;
diff --git a/kernel/compat.c b/kernel/compat.c
index 333d364be29d..5b1d2236bbec 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -620,7 +620,7 @@ COMPAT_SYSCALL_DEFINE3(sched_setaffinity, compat_pid_t, pid,
cpumask_var_t new_mask;
int retval;
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask);
@@ -644,7 +644,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
if (len & (sizeof(compat_ulong_t)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 85ff5e26e23b..ecde871f7c12 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -654,7 +654,7 @@ void enable_nonboot_cpus(void)
static int __init alloc_frozen_cpus(void)
{
- if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
+ if (!zalloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
return -ENOMEM;
return 0;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 10ae73611d80..7a53d7c40e95 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -403,9 +403,9 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
if (!trial)
return NULL;
- if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
goto free_cs;
- if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
goto free_cpus;
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
@@ -633,7 +633,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
dattr = NULL;
csa = NULL;
- if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
goto done;
cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
@@ -1909,9 +1909,9 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
- if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
goto free_cs;
- if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
goto free_cpus;
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
@@ -2065,9 +2065,9 @@ int __init cpuset_init(void)
{
int err = 0;
- if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
BUG();
- if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
BUG();
cpumask_setall(top_cpuset.cpus_allowed);
@@ -2083,7 +2083,7 @@ int __init cpuset_init(void)
if (err < 0)
return err;
- if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&cpus_attach, GFP_KERNEL))
BUG();
return 0;
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 239e2ae2c947..a09b4d5bf747 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -26,7 +26,7 @@ static struct lock_class_key irq_desc_lock_class;
#if defined(CONFIG_SMP)
static void __init init_irq_default_affinity(void)
{
- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
cpumask_setall(irq_default_affinity);
}
#else
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0eebaeef317b..93ec1f6aee23 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -266,7 +266,7 @@ static void irq_affinity_notify(struct work_struct *work)
cpumask_var_t cpumask;
unsigned long flags;
- if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ if (!desc || !zalloc_cpumask_var(&cpumask, GFP_KERNEL))
goto out;
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -826,7 +826,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
* In case we are out of memory we set IRQTF_AFFINITY again and
* try again next time
*/
- if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
set_bit(IRQTF_AFFINITY, &action->thread_flags);
return;
}
@@ -1158,7 +1158,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
}
- if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_thread;
}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a2c02fd5d6d0..6cac4389af22 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -99,7 +99,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
if (!irq_can_set_affinity(irq) || no_irq_affinity)
return -EIO;
- if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
if (type)
@@ -195,7 +195,7 @@ static ssize_t default_affinity_write(struct file *file,
cpumask_var_t new_value;
int err;
- if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(buffer, count, new_value);
diff --git a/kernel/padata.c b/kernel/padata.c
index b38bea9c466a..c0e438e40f8a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -351,11 +351,11 @@ static int padata_setup_cpumasks(struct parallel_data *pd,
const struct cpumask *pcpumask,
const struct cpumask *cbcpumask)
{
- if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
return -ENOMEM;
cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
- if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pd->cpumask.cbcpu);
return -ENOMEM;
}
@@ -931,7 +931,7 @@ static ssize_t store_cpumask(struct padata_instance *pinst,
ssize_t ret;
int mask_type;
- if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&new_cpumask, GFP_KERNEL))
return -ENOMEM;
ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
@@ -1045,9 +1045,9 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
goto err;
get_online_cpus();
- if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_inst;
- if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
free_cpumask_var(pinst->cpumask.pcpu);
goto err_free_inst;
}
diff --git a/kernel/profile.c b/kernel/profile.c
index 99513e1160e5..ade1179de386 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -106,7 +106,7 @@ int __ref profile_init(void)
prof_len = (_etext - _stext) >> prof_shift;
buffer_bytes = prof_len*sizeof(atomic_t);
- if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
@@ -437,7 +437,7 @@ static ssize_t prof_cpu_mask_proc_write(struct file *file,
cpumask_var_t new_value;
int err;
- if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(buffer, count, new_value);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f33c94d974c9..aa7f8e965386 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4518,11 +4518,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
retval = -EINVAL;
goto out_put_task;
}
- if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
@@ -4610,7 +4610,7 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
cpumask_var_t new_mask;
int retval;
- if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM;
retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
@@ -4666,7 +4666,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
if (len & (sizeof(unsigned long)-1))
return -EINVAL;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
ret = sched_getaffinity(pid, mask);
@@ -7163,7 +7163,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
if (!doms)
return NULL;
for (i = 0; i < ndoms; i++) {
- if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&doms[i], GFP_KERNEL)) {
free_sched_domains(doms, i);
return NULL;
}
@@ -7402,8 +7402,8 @@ void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
- alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
- alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
+ zalloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
+ zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
sched_init_numa();
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d264f59bff56..54c80114727e 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -285,7 +285,7 @@ int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_threa
unsigned int cpu;
int ret = 0;
- if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(plug_thread->cpumask, cpumask);
@@ -343,7 +343,7 @@ int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
cpumask_var_t tmp;
unsigned int cpu;
- if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tmp, GFP_KERNEL))
return -ENOMEM;
get_online_cpus();
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 21f82c29c914..79bb1d4f259e 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -468,7 +468,7 @@ static int cmd_attr_register_cpumask(struct genl_info *info)
cpumask_var_t mask;
int rc;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
if (rc < 0)
@@ -484,7 +484,7 @@ static int cmd_attr_deregister_cpumask(struct genl_info *info)
cpumask_var_t mask;
int rc;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
if (rc < 0)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 99ef0df12807..b6cf842440c8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -314,7 +314,7 @@ static int tick_nohz_init_all(void)
int err = -1;
#ifdef CONFIG_NO_HZ_FULL_ALL
- if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
WARN(1, "NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
@@ -334,7 +334,7 @@ void __init tick_nohz_init(void)
return;
}
- if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
WARN(1, "NO_HZ: Can't allocate not-full dynticks cpumask\n");
cpumask_clear(tick_nohz_full_mask);
tick_nohz_full_running = false;
diff --git a/kernel/torture.c b/kernel/torture.c
index 44aa462d033f..82c02ac0f3f2 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -369,7 +369,7 @@ int torture_shuffle_init(long shuffint)
shuffle_idle_cpu = -1;
- if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
return -ENOMEM;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9c6045a27ba3..15dbf881ee17 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1311,7 +1311,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!buffer)
return NULL;
- if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 87fb9801bd9e..9295b7ef5338 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3421,7 +3421,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
cpumask_var_t tracing_cpumask_new;
int err, cpu;
- if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
@@ -4591,7 +4591,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
trace_seq_init(&iter->seq);
iter->trace = tr->current_trace;
- if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) {
ret = -ENOMEM;
goto fail;
}
@@ -6641,7 +6641,7 @@ static int instance_mkdir(const char *name)
if (!tr->name)
goto out_free_tr;
- if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
goto out_free_tr;
tr->trace_flags = global_trace.trace_flags;
@@ -7205,10 +7205,10 @@ __init static int tracer_alloc_buffers(void)
*/
BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
- if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
- if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c579dbab2e36..8bf8c72dd01a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3007,7 +3007,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
attrs = kzalloc(sizeof(*attrs), gfp_mask);
if (!attrs)
goto fail;
- if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
+ if (!zalloc_cpumask_var(&attrs->cpumask, gfp_mask))
goto fail;
cpumask_copy(attrs->cpumask, cpu_possible_mask);
@@ -5216,7 +5216,7 @@ static int __init init_workqueues(void)
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
- BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
+ BUG_ON(!zalloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 5a70f6196f57..7f93df07bce7 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -64,7 +64,7 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
if (!*mask) {
- printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
+ printk(KERN_ERR "=> zalloc_cpumask_var: failed!\n");
dump_stack();
}
#endif
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879a2be23325..d57465284ea6 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1478,7 +1478,7 @@ static void __init start_shepherd_timer(void)
INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
vmstat_update);
- if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
BUG();
cpumask_copy(cpu_stat_off, cpu_online_mask);
diff --git a/net/core/flow.c b/net/core/flow.c
index 1033725be40b..af62e71507f6 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -344,7 +344,7 @@ void flow_cache_flush(struct net *net)
int i, self;
/* Track which cpus need flushing to avoid disturbing all cores. */
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return;
cpumask_clear(mask);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f88a62ab019d..0bc34c7bd91b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -693,7 +693,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
@@ -1217,7 +1217,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
index = get_netdev_queue_index(queue);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 95b6139d710c..74e858d3150e 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -106,7 +106,7 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
cpumask_var_t mask;
int i, len, ret = 0;
- if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
if (write) {
--
2.5.0
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/