x86: move default_ipi_xx back to ipi.c
authorYinghai Lu <yinghai@kernel.org>
Sat, 31 Jan 2009 01:29:27 +0000 (17:29 -0800)
committerIngo Molnar <mingo@elte.hu>
Thu, 5 Feb 2009 21:27:56 +0000 (22:27 +0100)
Impact: cleanup

only leave _default_ipi_xx etc in .h

Beyond the cleanup factor, this saves a bit of code size as well:

    text    data     bss     dec             hex filename
 7281931 1630144 1463304 10375379  9e50d3 vmlinux.before
 7281753 1630144 1463304 10375201  9e5021 vmlinux.after

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/ipi.h
arch/x86/kernel/ipi.c

index 3ef2bded97acc968f7c3549639f926e17d07475b..1a20e3d1200609bb41f4f67e952907d0f0f4b2c8 100644 (file)
@@ -71,12 +71,6 @@ extern void setup_ioapic_dest(void);
 extern void enable_IO_APIC(void);
 #endif
 
-/* IPI functions */
-#ifdef CONFIG_X86_32
-extern void default_send_IPI_self(int vector);
-#endif
-extern void default_send_IPI(int dest, int vector);
-
 /* Statistics */
 extern atomic_t irq_err_count;
 extern atomic_t irq_mis_count;
index aa79945445b5e86200644de9b4bea6aa6a4aa765..5f2efc5d99270dd72e60cef3aabd1eeab4075e54 100644 (file)
@@ -119,112 +119,22 @@ static inline void
        native_apic_mem_write(APIC_ICR, cfg);
 }
 
-static inline void
-default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
-{
-       unsigned long query_cpu;
-       unsigned long flags;
-
-       /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send
-        * to an arbitrary mask, so I do a unicast to each CPU instead.
-        * - mbligh
-        */
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
-                               query_cpu), vector, APIC_DEST_PHYSICAL);
-       }
-       local_irq_restore(flags);
-}
-
-static inline void
-default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, int vector)
-{
-       unsigned int this_cpu = smp_processor_id();
-       unsigned int query_cpu;
-       unsigned long flags;
-
-       /* See Hack comment above */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               if (query_cpu == this_cpu)
-                       continue;
-               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
-                                query_cpu), vector, APIC_DEST_PHYSICAL);
-       }
-       local_irq_restore(flags);
-}
-
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
+                                                int vector);
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+                                                        int vector);
 #include <asm/genapic.h>
 
-static inline void
-default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
-{
-       unsigned long flags;
-       unsigned int query_cpu;
-
-       /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send
-        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
-        * should be modified to do 1 message per cluster ID - mbligh
-        */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               __default_send_IPI_dest_field(
-                       apic->cpu_to_logical_apicid(query_cpu), vector,
-                       apic->dest_logical);
-       local_irq_restore(flags);
-}
-
-static inline void
-default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, int vector)
-{
-       unsigned long flags;
-       unsigned int query_cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       /* See Hack comment above */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               if (query_cpu == this_cpu)
-                       continue;
-               __default_send_IPI_dest_field(
-                       apic->cpu_to_logical_apicid(query_cpu), vector,
-                       apic->dest_logical);
-               }
-       local_irq_restore(flags);
-}
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                        int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                        int vector);
 
 /* Avoid include hell */
 #define NMI_VECTOR 0x02
 
 extern int no_broadcast;
 
-#ifndef CONFIG_X86_64
-/*
- * This is only used on smaller machines.
- */
-static inline void default_send_IPI_mask_bitmask_logical(const struct cpumask *cpumask, int vector)
-{
-       unsigned long mask = cpumask_bits(cpumask)[0];
-       unsigned long flags;
-
-       local_irq_save(flags);
-       WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
-       __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
-       local_irq_restore(flags);
-}
-
-static inline void default_send_IPI_mask_logical(const struct cpumask *mask, int vector)
-{
-       default_send_IPI_mask_bitmask_logical(mask, vector);
-}
-#endif
-
 static inline void __default_local_send_IPI_allbutself(int vector)
 {
        if (no_broadcast || vector == NMI_VECTOR)
@@ -242,22 +152,11 @@ static inline void __default_local_send_IPI_all(int vector)
 }
 
 #ifdef CONFIG_X86_32
-static inline void default_send_IPI_allbutself(int vector)
-{
-       /*
-        * if there are no other CPUs in the system then we get an APIC send 
-        * error if we try to broadcast, thus avoid sending IPIs in this case.
-        */
-       if (!(num_online_cpus() > 1))
-               return;
-
-       __default_local_send_IPI_allbutself(vector);
-}
-
-static inline void default_send_IPI_all(int vector)
-{
-       __default_local_send_IPI_all(vector);
-}
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
+                                                int vector);
+extern void default_send_IPI_allbutself(int vector);
+extern void default_send_IPI_all(int vector);
+extern void default_send_IPI_self(int vector);
 #endif
 
 #endif
index 339f4f3feee585b446e534534ac3f7498e71b22c..dbf5445727a9d69d5cd02e07ff7c675c916bb0d9 100644 (file)
 #include <asm/proto.h>
 #include <asm/ipi.h>
 
+void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
+{
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       /*
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicast to each CPU instead.
+        * - mbligh
+        */
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+                               query_cpu), vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int query_cpu;
+       unsigned long flags;
+
+       /* See Hack comment above */
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+                                query_cpu), vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned long flags;
+       unsigned int query_cpu;
+
+       /*
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+        * should be modified to do 1 message per cluster ID - mbligh
+        */
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask)
+               __default_send_IPI_dest_field(
+                       apic->cpu_to_logical_apicid(query_cpu), vector,
+                       apic->dest_logical);
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned long flags;
+       unsigned int query_cpu;
+       unsigned int this_cpu = smp_processor_id();
+
+       /* See Hack comment above */
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __default_send_IPI_dest_field(
+                       apic->cpu_to_logical_apicid(query_cpu), vector,
+                       apic->dest_logical);
+               }
+       local_irq_restore(flags);
+}
+
 #ifdef CONFIG_X86_32
 
+/*
+ * This is only used on smaller machines.
+ */
+void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
+{
+       unsigned long mask = cpumask_bits(cpumask)[0];
+       unsigned long flags;
+
+       local_irq_save(flags);
+       WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
+       __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_allbutself(int vector)
+{
+       /*
+        * if there are no other CPUs in the system then we get an APIC send
+        * error if we try to broadcast, thus avoid sending IPIs in this case.
+        */
+       if (!(num_online_cpus() > 1))
+               return;
+
+       __default_local_send_IPI_allbutself(vector);
+}
+
+void default_send_IPI_all(int vector)
+{
+       __default_local_send_IPI_all(vector);
+}
+
 void default_send_IPI_self(int vector)
 {
        __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);