cpumask: alloc zeroed cpumask for static cpumask_var_ts
authorYinghai Lu <yinghai@kernel.org>
Sat, 6 Jun 2009 21:51:36 +0000 (14:51 -0700)
committerRusty Russell <rusty@rustcorp.com.au>
Tue, 9 Jun 2009 13:00:27 +0000 (22:30 +0930)
These are defined as static cpumask_var_t so if MAXSMP is not used,
they are cleared already.  Avoid surprises when MAXSMP is enabled.

Signed-off-by: Yinghai Lu <yinghai.lu@kernel.org>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/powernow-k7.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/tlb_uv.c
drivers/acpi/processor_core.c
drivers/cpufreq/cpufreq.c
kernel/sched_cpupri.c
kernel/sched_rt.c
kernel/smp.c

index 54b6de2cd9478907d383424e38539935274c6516..752e8c6b2c7e29e903895a4abe541b6e186b7add 100644 (file)
@@ -550,7 +550,7 @@ static int __init acpi_cpufreq_early_init(void)
                return -ENOMEM;
        }
        for_each_possible_cpu(i) {
-               if (!alloc_cpumask_var_node(
+               if (!zalloc_cpumask_var_node(
                        &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
                        GFP_KERNEL, cpu_to_node(i))) {
 
index a8363e5be4ef5b6b9ce78497b7d2a4aebf3a7f13..d47c775eb0abce217593a99cd06dba2f87125977 100644 (file)
@@ -322,7 +322,7 @@ static int powernow_acpi_init(void)
                goto err0;
        }
 
-       if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
+       if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
                                                                GFP_KERNEL)) {
                retval = -ENOMEM;
                goto err05;
index 35dc8fbe92bd1679ffe0bd5311e474134b6f2ddb..cf52215d9eb1eaa7c16e0fe24e906da225ffc225 100644 (file)
@@ -887,7 +887,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
        /* notify BIOS that we exist */
        acpi_processor_notify_smm(THIS_MODULE);
 
-       if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
                printk(KERN_ERR PFX
                                "unable to alloc powernow_k8_data cpumask\n");
                ret_val = -ENOMEM;
index c9f1fdc02830f1e6fd1748648bda58c5dd2cb51d..55c831ed71cec378c48692eb91df26e1f45ccc1b 100644 (file)
@@ -471,7 +471,7 @@ static int centrino_target (struct cpufreq_policy *policy,
 
        if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL)))
                return -ENOMEM;
-       if (unlikely(!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
+       if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
                free_cpumask_var(saved_mask);
                return -ENOMEM;
        }
index 6fb0b359d2a5ead120c59b114a4a8834f297e0a3..09dd1d414fc36bf40eb8218619298e76bf1ac443 100644 (file)
@@ -1163,7 +1163,7 @@ static __init int mce_init_device(void)
        if (!mce_available(&boot_cpu_data))
                return -EIO;
 
-       alloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
+       zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL);
 
        err = mce_init_banks();
        if (err)
index ed0c33761e6d1d75bf0b435c490384e2cd8b31dd..8c7b03b0cfcb47b1585187e95a67890137218c5d 100644 (file)
@@ -832,7 +832,7 @@ static int __init uv_bau_init(void)
                return 0;
 
        for_each_possible_cpu(cur_cpu)
-               alloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
+               zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
                                       GFP_KERNEL, cpu_to_node(cur_cpu));
 
        uv_bau_retry_limit = 1;
index 45ad3288c5fffb35868a801d34a9adc31676a134..23f0fb84f1c1104538efe624c5824937e069c191 100644 (file)
@@ -844,7 +844,7 @@ static int acpi_processor_add(struct acpi_device *device)
        if (!pr)
                return -ENOMEM;
 
-       if (!alloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
                kfree(pr);
                return -ENOMEM;
        }
index 47d2ad0ae079c738fa8aa90656595026a56d7a8a..6e2ec0b189489803ebaa55ecb4ae094bd2a1fdf0 100644 (file)
@@ -808,7 +808,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
                ret = -ENOMEM;
                goto nomem_out;
        }
-       if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
                free_cpumask_var(policy->cpus);
                kfree(policy);
                ret = -ENOMEM;
index cdd3c89574cd759ebe3dc6e37499974598d873d5..344712a5e3eddeedd4072ea8754d115dec8809ad 100644 (file)
@@ -165,7 +165,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
                vec->count = 0;
                if (bootmem)
                        alloc_bootmem_cpumask_var(&vec->mask);
-               else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL))
+               else if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
                        goto cleanup;
        }
 
index f2c66f8f9712d218e4849a77ad147bbd65124959..9bf0d2a7304569a87ba4aa0c919bbb2531c39c06 100644 (file)
@@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void)
        unsigned int i;
 
        for_each_possible_cpu(i)
-               alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+               zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
                                        GFP_KERNEL, cpu_to_node(i));
 }
 #endif /* CONFIG_SMP */
index 858baac568ee03d96210bcdb69e0e6413aad84af..ad63d8501207836353ad91d15f94fb3aaa4c0c1b 100644 (file)
@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
        switch (action) {
        case CPU_UP_PREPARE:
        case CPU_UP_PREPARE_FROZEN:
-               if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+               if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
                                cpu_to_node(cpu)))
                        return NOTIFY_BAD;
                break;