4 #include <linux/cpumask.h>
5 #include <linux/init.h>
8 * We need the APIC definitions automatically as part of 'smp.h'
11 #include <asm/io_apic.h>
12 #include <asm/mpspec.h>
14 #include <asm/thread_info.h>
16 extern cpumask_t cpu_initialized;
17 extern cpumask_t cpu_callin_map;
19 extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
20 void *info, int wait);
22 static inline int cpu_present_to_apicid(int mps_cpu)
24 if (cpu_present(mps_cpu))
25 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
32 #define raw_smp_processor_id() read_pda(cpunumber)
33 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
35 #define stack_smp_processor_id() \
37 struct thread_info *ti; \
38 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
43 * On x86 all CPUs are mapped 1:1 to the APIC space. This simplifies
44 * scheduling and IPI sending and compresses data structures.
46 static inline int num_booting_cpus(void)
48 return cpus_weight(cpu_callout_map);
51 #else /* CONFIG_SMP */
53 extern unsigned int boot_cpu_id;
54 #define cpu_physical_id(cpu) boot_cpu_id
55 #define stack_smp_processor_id() 0
57 #endif /* !CONFIG_SMP */
59 #define safe_smp_processor_id() smp_processor_id()
61 static __inline int logical_smp_processor_id(void)
63 /* we don't want to mark this access volatile - bad code generation */
64 return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
67 static inline int hard_smp_processor_id(void)
69 /* we don't want to mark this access volatile - bad code generation */
70 return GET_APIC_ID(*(u32 *)(APIC_BASE + APIC_ID));