cpumask: x86: convert cpu_sibling_map/cpu_core_map to cpumask_var_t
authorRusty Russell <rusty@rustcorp.com.au>
Fri, 13 Mar 2009 04:19:50 +0000 (14:49 +1030)
committerRusty Russell <rusty@rustcorp.com.au>
Fri, 13 Mar 2009 04:19:50 +0000 (14:49 +1030)
Impact: reduce per-cpu size for CONFIG_CPUMASK_OFFSTACK=y

In most places it's cleaner to use the accessors cpu_sibling_mask()
and cpu_core_mask() wrappers which already exist.

I couldn't avoid cleaning up the access in oprofile, either.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
arch/x86/include/asm/smp.h
arch/x86/include/asm/topology.h
arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/smpboot.c
arch/x86/oprofile/op_model_p4.c

index 47d0e21f2b9ec85b3bb0658c88eb2c611be8091d..cfb10f1667fe13f1af4ec6e7baa6a53b9fcba2ad 100644 (file)
 extern int smp_num_siblings;
 extern unsigned int num_processors;
 
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
-DECLARE_PER_CPU(cpumask_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
 DECLARE_PER_CPU(u16, cpu_llc_id);
 DECLARE_PER_CPU(int, cpu_number);
 
 static inline struct cpumask *cpu_sibling_mask(int cpu)
 {
-       return &per_cpu(cpu_sibling_map, cpu);
+       return per_cpu(cpu_sibling_map, cpu);
 }
 
 static inline struct cpumask *cpu_core_mask(int cpu)
 {
-       return &per_cpu(cpu_core_map, cpu);
+       return per_cpu(cpu_core_map, cpu);
 }
 
 DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
index f7c20d0314220cb92c7e50e8e48292a0f13641ce..fa4aa42e976d45490a70867bc434c69d4d9cd961 100644 (file)
@@ -249,8 +249,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 #ifdef ENABLE_TOPO_DEFINES
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
-#define topology_core_cpumask(cpu)             (&per_cpu(cpu_core_map, cpu))
-#define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu)           (per_cpu(cpu_sibling_map, cpu))
 
 /* indicates that pointers to the topology cpumask_t maps are valid */
 #define arch_provides_topology_pointers                yes
@@ -264,7 +264,7 @@ struct pci_bus;
 void set_pci_bus_resources_arch_default(struct pci_bus *b);
 
 #ifdef CONFIG_SMP
-#define mc_capable()   (cpus_weight(per_cpu(cpu_core_map, 0)) != nr_cpu_ids)
+#define mc_capable()   (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids)
 #define smt_capable()                  (smp_num_siblings > 1)
 #endif
 
index 3178c3acd97ebb4aa515d19da8a7f75b1178b08a..d8341d17c1890e239c470f211efc9806d358d7b1 100644 (file)
@@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
        unsigned int i;
 
 #ifdef CONFIG_SMP
-       cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
+       cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
        /* Errata workaround */
index 6428aa17b40e794683b5359ed0589dc3dbfbf592..e8fd76f9888321e2843ba80214810f24f1fc02cc 100644 (file)
@@ -56,7 +56,10 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
 static int cpu_family = CPU_OPTERON;
 
 #ifndef CONFIG_SMP
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+static inline const struct cpumask *cpu_core_mask(int cpu)
+{
+       return cpumask_of(0);
+}
 #endif
 
 /* Return a frequency in MHz, given an input fid */
@@ -654,7 +657,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
 
        dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
        data->powernow_table = powernow_table;
-       if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
+       if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
                print_basics(data);
 
        for (j = 0; j < data->numps; j++)
@@ -808,7 +811,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
 
        /* fill in data */
        data->numps = data->acpi_data.state_count;
-       if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
+       if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
                print_basics(data);
        powernow_k8_acpi_pst_values(data, 0);
 
@@ -1224,7 +1227,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (cpu_family == CPU_HW_PSTATE)
                cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
        else
-               cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
+               cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
        data->available_cores = pol->cpus;
 
        if (cpu_family == CPU_HW_PSTATE)
@@ -1286,7 +1289,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
        unsigned int khz = 0;
        unsigned int first;
 
-       first = first_cpu(per_cpu(cpu_core_map, cpu));
+       first = cpumask_first(cpu_core_mask(cpu));
        data = per_cpu(powernow_data, first);
 
        if (!data)
index dedc1e98f1683c4e35e0b194b81aed354ab3e55a..1f0ec83d343b76e62db2de80bcb7b306a786b038 100644 (file)
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
 
        /* only run on CPU to be set, or on its sibling */
 #ifdef CONFIG_SMP
-       cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
+       cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
        cpus_allowed = current->cpus_allowed;
index c5a32f92d07ecc41b55b290f16fb7a0889c07c41..1f429ee3477d7c9c10617c8c33c03e6e1604c185 100644 (file)
@@ -477,7 +477,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 #ifdef CONFIG_SMP
        if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = cpumask_first(&per_cpu(cpu_core_map, cpu));
+               i = cpumask_first(cpu_core_mask(cpu));
 
                /* first core not up yet */
                if (cpu_data(i).cpu_core_id)
@@ -497,7 +497,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (err)
                        goto out;
 
-               cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
+               cpumask_copy(b->cpus, cpu_core_mask(cpu));
                per_cpu(threshold_banks, cpu)[bank] = b;
                goto out;
        }
@@ -521,7 +521,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 #ifndef CONFIG_SMP
        cpumask_setall(b->cpus);
 #else
-       cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
+       cpumask_copy(b->cpus, cpu_core_mask(cpu));
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
index d67e0e48bc2dfa1115bfc427323bd78e878d51fc..4dd610e226e00cdf9fa32509032701b8078ecb76 100644 (file)
@@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
        if (c->x86_max_cores * smp_num_siblings > 1) {
                seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
                seq_printf(m, "siblings\t: %d\n",
-                          cpus_weight(per_cpu(cpu_core_map, cpu)));
+                          cpumask_weight(cpu_sibling_mask(cpu)));
                seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
                seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
                seq_printf(m, "apicid\t\t: %d\n", c->apicid);
index f534257d4b469b5c4d11fed12fed8e665c9237d6..7f051c170addaffd9bf0d87fd1d7555df6d36087 100644 (file)
@@ -101,11 +101,11 @@ EXPORT_SYMBOL(smp_num_siblings);
 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
 
 /* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 
 /* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 
 /* Per CPU bogomips and other parameters */
@@ -1026,6 +1026,8 @@ static void __init smp_cpu_index_default(void)
  */
 void __init native_smp_prepare_cpus(unsigned int max_cpus)
 {
+       unsigned int i;
+
        preempt_disable();
        smp_cpu_index_default();
        current_cpu_data = boot_cpu_data;
@@ -1039,6 +1041,12 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        boot_cpu_logical_apicid = logical_smp_processor_id();
 #endif
        current_thread_info()->cpu = 0;  /* needed? */
+       for_each_possible_cpu(i) {
+               alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
+               alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
+               cpumask_clear(per_cpu(cpu_core_map, i));
+               cpumask_clear(per_cpu(cpu_sibling_map, i));
+       }
        set_cpu_sibling_map(0);
 
        enable_IR_x2apic();
index 4c4a51c90bc26f31cfc192adf2fb341ba7fcc754..819b131fd752888c39b39f2fb386263223d9dd83 100644 (file)
@@ -380,7 +380,7 @@ static unsigned int get_stagger(void)
 {
 #ifdef CONFIG_SMP
        int cpu = smp_processor_id();
-       return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
+       return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
 #endif
        return 0;
 }