x86: create ipi.c
authorGlauber Costa <gcosta@redhat.com>
Mon, 3 Mar 2008 17:12:53 +0000 (14:12 -0300)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:40:56 +0000 (17:40 +0200)
This patch moves all ipi and apic related functions
from smp_32.c to ipi.c

Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/Makefile
arch/x86/kernel/ipi.c [new file with mode: 0644]
arch/x86/kernel/smp_32.c

index 0a4b088bab5d1e704cf2af1000c256e7ece95b55..e3b01f96c565ccd286b656455e9ea26d6a46b172 100644 (file)
@@ -47,7 +47,7 @@ obj-$(CONFIG_PCI)             += early-quirks.o
 apm-y                          := apm_32.o
 obj-$(CONFIG_APM)              += apm.o
 obj-$(CONFIG_X86_SMP)          += smp_$(BITS).o smpboot_$(BITS).o smp.o
-obj-$(CONFIG_X86_SMP)          += smpboot.o tsc_sync.o
+obj-$(CONFIG_X86_SMP)          += smpboot.o tsc_sync.o ipi.o
 obj-$(CONFIG_X86_32_SMP)       += smpcommon.o
 obj-$(CONFIG_X86_64_SMP)       += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o
 obj-$(CONFIG_X86_TRAMPOLINE)   += trampoline_$(BITS).o
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
new file mode 100644 (file)
index 0000000..c0df7b8
--- /dev/null
@@ -0,0 +1,178 @@
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/apic.h>
+#include <asm/proto.h>
+
+#ifdef CONFIG_X86_32
+#include <mach_apic.h>
+/*
+ * the following functions deal with sending IPIs between CPUs.
+ *
+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
+ */
+
+static inline int __prepare_ICR(unsigned int shortcut, int vector)
+{
+       unsigned int icr = shortcut | APIC_DEST_LOGICAL;
+
+       switch (vector) {
+       default:
+               icr |= APIC_DM_FIXED | vector;
+               break;
+       case NMI_VECTOR:
+               icr |= APIC_DM_NMI;
+               break;
+       }
+       return icr;
+}
+
+static inline int __prepare_ICR2(unsigned int mask)
+{
+       return SET_APIC_DEST_FIELD(mask);
+}
+
+void __send_IPI_shortcut(unsigned int shortcut, int vector)
+{
+       /*
+        * Subtle. In the case of the 'never do double writes' workaround
+        * we have to lock out interrupts to be safe.  As we don't care
+        * of the value read we use an atomic rmw access to avoid costly
+        * cli/sti.  Otherwise we use an even cheaper single atomic write
+        * to the APIC.
+        */
+       unsigned int cfg;
+
+       /*
+        * Wait for idle.
+        */
+       apic_wait_icr_idle();
+
+       /*
+        * No need to touch the target chip field
+        */
+       cfg = __prepare_ICR(shortcut, vector);
+
+       /*
+        * Send the IPI. The write to APIC_ICR fires this off.
+        */
+       apic_write_around(APIC_ICR, cfg);
+}
+
+void send_IPI_self(int vector)
+{
+       __send_IPI_shortcut(APIC_DEST_SELF, vector);
+}
+
+/*
+ * This is used to send an IPI with no shorthand notation (the destination is
+ * specified in bits 56 to 63 of the ICR).
+ */
+static inline void __send_IPI_dest_field(unsigned long mask, int vector)
+{
+       unsigned long cfg;
+
+       /*
+        * Wait for idle.
+        */
+       if (unlikely(vector == NMI_VECTOR))
+               safe_apic_wait_icr_idle();
+       else
+               apic_wait_icr_idle();
+
+       /*
+        * prepare target chip field
+        */
+       cfg = __prepare_ICR2(mask);
+       apic_write_around(APIC_ICR2, cfg);
+
+       /*
+        * program the ICR
+        */
+       cfg = __prepare_ICR(0, vector);
+
+       /*
+        * Send the IPI. The write to APIC_ICR fires this off.
+        */
+       apic_write_around(APIC_ICR, cfg);
+}
+
+/*
+ * This is only used on smaller machines.
+ */
+void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
+{
+       unsigned long mask = cpus_addr(cpumask)[0];
+       unsigned long flags;
+
+       local_irq_save(flags);
+       WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
+       __send_IPI_dest_field(mask, vector);
+       local_irq_restore(flags);
+}
+
+void send_IPI_mask_sequence(cpumask_t mask, int vector)
+{
+       unsigned long flags;
+       unsigned int query_cpu;
+
+       /*
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+        * should be modified to do 1 message per cluster ID - mbligh
+        */
+
+       local_irq_save(flags);
+       for_each_possible_cpu(query_cpu) {
+               if (cpu_isset(query_cpu, mask)) {
+                       __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
+                                             vector);
+               }
+       }
+       local_irq_restore(flags);
+}
+
+/* must come after the send_IPI functions above for inlining */
+#include <mach_ipi.h>
+static int convert_apicid_to_cpu(int apic_id)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
+                       return i;
+       }
+       return -1;
+}
+
+int safe_smp_processor_id(void)
+{
+       int apicid, cpuid;
+
+       if (!boot_cpu_has(X86_FEATURE_APIC))
+               return 0;
+
+       apicid = hard_smp_processor_id();
+       if (apicid == BAD_APICID)
+               return 0;
+
+       cpuid = convert_apicid_to_cpu(apicid);
+
+       return cpuid >= 0 ? cpuid : 0;
+}
+#endif
index 61e546e85733ef2bc4acf7c1ac288684a4ece0aa..d80623aba9c5a24722efedea8009aec30d59fba4 100644 (file)
 
 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
 
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline int __prepare_ICR (unsigned int shortcut, int vector)
-{
-       unsigned int icr = shortcut | APIC_DEST_LOGICAL;
-
-       switch (vector) {
-       default:
-               icr |= APIC_DM_FIXED | vector;
-               break;
-       case NMI_VECTOR:
-               icr |= APIC_DM_NMI;
-               break;
-       }
-       return icr;
-}
-
-static inline int __prepare_ICR2 (unsigned int mask)
-{
-       return SET_APIC_DEST_FIELD(mask);
-}
-
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
-       /*
-        * Subtle. In the case of the 'never do double writes' workaround
-        * we have to lock out interrupts to be safe.  As we don't care
-        * of the value read we use an atomic rmw access to avoid costly
-        * cli/sti.  Otherwise we use an even cheaper single atomic write
-        * to the APIC.
-        */
-       unsigned int cfg;
-
-       /*
-        * Wait for idle.
-        */
-       apic_wait_icr_idle();
-
-       /*
-        * No need to touch the target chip field
-        */
-       cfg = __prepare_ICR(shortcut, vector);
-
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write_around(APIC_ICR, cfg);
-}
-
-void send_IPI_self(int vector)
-{
-       __send_IPI_shortcut(APIC_DEST_SELF, vector);
-}
-
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __send_IPI_dest_field(unsigned long mask, int vector)
-{
-       unsigned long cfg;
-
-       /*
-        * Wait for idle.
-        */
-       if (unlikely(vector == NMI_VECTOR))
-               safe_apic_wait_icr_idle();
-       else
-               apic_wait_icr_idle();
-               
-       /*
-        * prepare target chip field
-        */
-       cfg = __prepare_ICR2(mask);
-       apic_write_around(APIC_ICR2, cfg);
-               
-       /*
-        * program the ICR 
-        */
-       cfg = __prepare_ICR(0, vector);
-                       
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write_around(APIC_ICR, cfg);
-}
-
-/*
- * This is only used on smaller machines.
- */
-void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
-{
-       unsigned long mask = cpus_addr(cpumask)[0];
-       unsigned long flags;
-
-       local_irq_save(flags);
-       WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
-       __send_IPI_dest_field(mask, vector);
-       local_irq_restore(flags);
-}
-
-void send_IPI_mask_sequence(cpumask_t mask, int vector)
-{
-       unsigned long flags;
-       unsigned int query_cpu;
-
-       /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send 
-        * to an arbitrary mask, so I do a unicasts to each CPU instead. This 
-        * should be modified to do 1 message per cluster ID - mbligh
-        */ 
-
-       local_irq_save(flags);
-       for_each_possible_cpu(query_cpu) {
-               if (cpu_isset(query_cpu, mask)) {
-                       __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
-                                             vector);
-               }
-       }
-       local_irq_restore(flags);
-}
-
 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
 
 /*
@@ -465,30 +339,3 @@ void flush_tlb_all(void)
 {
        on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
 }
-
-static int convert_apicid_to_cpu(int apic_id)
-{
-       int i;
-
-       for_each_possible_cpu(i) {
-               if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
-                       return i;
-       }
-       return -1;
-}
-
-int safe_smp_processor_id(void)
-{
-       int apicid, cpuid;
-
-       if (!boot_cpu_has(X86_FEATURE_APIC))
-               return 0;
-
-       apicid = hard_smp_processor_id();
-       if (apicid == BAD_APICID)
-               return 0;
-
-       cpuid = convert_apicid_to_cpu(apicid);
-
-       return cpuid >= 0 ? cpuid : 0;
-}