4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
21 #include <asm/paravirt.h>
23 #include <asm/pgtable.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
29 #include <asm/xen/interface.h>
30 #include <asm/xen/hypercall.h>
34 #include <xen/events.h>
36 #include <xen/hvc-console.h>
40 cpumask_var_t xen_cpu_initialized_map;
42 static DEFINE_PER_CPU(int, xen_resched_irq);
43 static DEFINE_PER_CPU(int, xen_callfunc_irq);
44 static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45 static DEFINE_PER_CPU(int, xen_irq_work);
46 static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
48 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
49 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
53 * Reschedule call back.
55 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
57 inc_irq_stat(irq_resched_count);
63 static void __cpuinit cpu_bringup(void)
68 touch_softlockup_watchdog();
71 xen_enable_sysenter();
74 cpu = smp_processor_id();
75 smp_store_cpu_info(cpu);
76 cpu_data(cpu).x86_max_cores = 1;
77 set_cpu_sibling_map(cpu);
79 xen_setup_cpu_clockevents();
81 notify_cpu_starting(cpu);
83 set_cpu_online(cpu, true);
85 this_cpu_write(cpu_state, CPU_ONLINE);
89 /* We can take interrupts now: we're officially "up". */
92 wmb(); /* make sure everything is out */
95 static void __cpuinit cpu_bringup_and_idle(void)
98 cpu_startup_entry(CPUHP_ONLINE);
101 static int xen_smp_intr_init(unsigned int cpu)
104 const char *resched_name, *callfunc_name, *debug_name;
106 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
107 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
109 xen_reschedule_interrupt,
110 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
115 per_cpu(xen_resched_irq, cpu) = rc;
117 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
118 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
120 xen_call_function_interrupt,
121 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
126 per_cpu(xen_callfunc_irq, cpu) = rc;
128 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
129 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
130 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
134 per_cpu(xen_debug_irq, cpu) = rc;
136 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
137 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
139 xen_call_function_single_interrupt,
140 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
145 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
148 * The IRQ worker on PVHVM goes through the native path and uses the
151 if (xen_hvm_domain())
154 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
155 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
157 xen_irq_work_interrupt,
158 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
163 per_cpu(xen_irq_work, cpu) = rc;
168 if (per_cpu(xen_resched_irq, cpu) >= 0)
169 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
170 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
171 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
172 if (per_cpu(xen_debug_irq, cpu) >= 0)
173 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
174 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
175 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
177 if (xen_hvm_domain())
180 if (per_cpu(xen_irq_work, cpu) >= 0)
181 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
186 static void __init xen_fill_possible_map(void)
190 if (xen_initial_domain())
193 for (i = 0; i < nr_cpu_ids; i++) {
194 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
197 set_cpu_possible(i, true);
202 static void __init xen_filter_cpu_maps(void)
205 unsigned int subtract = 0;
207 if (!xen_initial_domain())
212 for (i = 0; i < nr_cpu_ids; i++) {
213 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
216 set_cpu_possible(i, true);
218 set_cpu_possible(i, false);
219 set_cpu_present(i, false);
223 #ifdef CONFIG_HOTPLUG_CPU
224 /* This is akin to using 'nr_cpus' on the Linux command line.
225 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
226 * have up to X, while nr_cpu_ids is greater than X. This
227 * normally is not a problem, except when CPU hotplugging
228 * is involved and then there might be more than X CPUs
229 * in the guest - which will not work as there is no
230 * hypercall to expand the max number of VCPUs an already
231 * running guest has. So cap it up to X. */
233 nr_cpu_ids = nr_cpu_ids - subtract;
238 static void __init xen_smp_prepare_boot_cpu(void)
240 BUG_ON(smp_processor_id() != 0);
241 native_smp_prepare_boot_cpu();
243 /* We've switched to the "real" per-cpu gdt, so make sure the
244 old memory can be recycled */
245 make_lowmem_page_readwrite(xen_initial_gdt);
247 xen_filter_cpu_maps();
248 xen_setup_vcpu_info_placement();
251 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
256 if (skip_ioapic_setup) {
257 char *m = (max_cpus == 0) ?
258 "The nosmp parameter is incompatible with Xen; " \
259 "use Xen dom0_max_vcpus=1 parameter" :
260 "The noapic parameter is incompatible with Xen";
265 xen_init_lock_cpu(0);
267 smp_store_boot_cpu_info();
268 cpu_data(0).x86_max_cores = 1;
270 for_each_possible_cpu(i) {
271 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
272 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
273 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
275 set_cpu_sibling_map(0);
277 if (xen_smp_intr_init(0))
280 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
281 panic("could not allocate xen_cpu_initialized_map\n");
283 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
285 /* Restrict the possible_map according to max_cpus. */
286 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
287 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
289 set_cpu_possible(cpu, false);
292 for_each_possible_cpu(cpu)
293 set_cpu_present(cpu, true);
297 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
299 struct vcpu_guest_context *ctxt;
300 struct desc_struct *gdt;
301 unsigned long gdt_mfn;
303 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
306 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
310 gdt = get_cpu_gdt_table(cpu);
312 ctxt->flags = VGCF_IN_KERNEL;
313 ctxt->user_regs.ss = __KERNEL_DS;
315 ctxt->user_regs.fs = __KERNEL_PERCPU;
316 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
318 ctxt->gs_base_kernel = per_cpu_offset(cpu);
320 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
322 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
325 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
326 ctxt->user_regs.ds = __USER_DS;
327 ctxt->user_regs.es = __USER_DS;
329 xen_copy_trap_info(ctxt->trap_ctxt);
333 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
335 gdt_mfn = arbitrary_virt_to_mfn(gdt);
336 make_lowmem_page_readonly(gdt);
337 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
339 ctxt->gdt_frames[0] = gdt_mfn;
340 ctxt->gdt_ents = GDT_ENTRIES;
342 ctxt->kernel_ss = __KERNEL_DS;
343 ctxt->kernel_sp = idle->thread.sp0;
346 ctxt->event_callback_cs = __KERNEL_CS;
347 ctxt->failsafe_callback_cs = __KERNEL_CS;
349 ctxt->event_callback_eip =
350 (unsigned long)xen_hypervisor_callback;
351 ctxt->failsafe_callback_eip =
352 (unsigned long)xen_failsafe_callback;
354 ctxt->user_regs.cs = __KERNEL_CS;
355 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
357 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
358 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
360 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
367 static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
371 per_cpu(current_task, cpu) = idle;
375 clear_tsk_thread_flag(idle, TIF_FORK);
376 per_cpu(kernel_stack, cpu) =
377 (unsigned long)task_stack_page(idle) -
378 KERNEL_STACK_OFFSET + THREAD_SIZE;
380 xen_setup_runstate_info(cpu);
381 xen_setup_timer(cpu);
382 xen_init_lock_cpu(cpu);
384 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
386 /* make sure interrupts start blocked */
387 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
389 rc = cpu_initialize_context(cpu, idle);
393 if (num_online_cpus() == 1)
394 /* Just in case we booted with a single CPU. */
395 alternatives_enable_smp();
397 rc = xen_smp_intr_init(cpu);
401 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
404 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
405 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
412 static void xen_smp_cpus_done(unsigned int max_cpus)
416 #ifdef CONFIG_HOTPLUG_CPU
417 static int xen_cpu_disable(void)
419 unsigned int cpu = smp_processor_id();
423 cpu_disable_common();
425 load_cr3(swapper_pg_dir);
429 static void xen_cpu_die(unsigned int cpu)
431 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
432 current->state = TASK_UNINTERRUPTIBLE;
433 schedule_timeout(HZ/10);
435 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
436 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
437 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
438 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
439 if (!xen_hvm_domain())
440 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
441 xen_uninit_lock_cpu(cpu);
442 xen_teardown_timer(cpu);
445 static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
448 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
452 #else /* !CONFIG_HOTPLUG_CPU */
453 static int xen_cpu_disable(void)
458 static void xen_cpu_die(unsigned int cpu)
463 static void xen_play_dead(void)
469 static void stop_self(void *v)
471 int cpu = smp_processor_id();
473 /* make sure we're not pinning something down */
474 load_cr3(swapper_pg_dir);
475 /* should set up a minimal gdt */
477 set_cpu_online(cpu, false);
479 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
483 static void xen_stop_other_cpus(int wait)
485 smp_call_function(stop_self, NULL, wait);
488 static void xen_smp_send_reschedule(int cpu)
490 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
493 static void __xen_send_IPI_mask(const struct cpumask *mask,
498 for_each_cpu_and(cpu, mask, cpu_online_mask)
499 xen_send_IPI_one(cpu, vector);
502 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
506 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
508 /* Make sure other vcpus get a chance to run if they need to. */
509 for_each_cpu(cpu, mask) {
510 if (xen_vcpu_stolen(cpu)) {
511 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
517 static void xen_smp_send_call_function_single_ipi(int cpu)
519 __xen_send_IPI_mask(cpumask_of(cpu),
520 XEN_CALL_FUNCTION_SINGLE_VECTOR);
523 static inline int xen_map_vector(int vector)
528 case RESCHEDULE_VECTOR:
529 xen_vector = XEN_RESCHEDULE_VECTOR;
531 case CALL_FUNCTION_VECTOR:
532 xen_vector = XEN_CALL_FUNCTION_VECTOR;
534 case CALL_FUNCTION_SINGLE_VECTOR:
535 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
537 case IRQ_WORK_VECTOR:
538 xen_vector = XEN_IRQ_WORK_VECTOR;
542 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
549 void xen_send_IPI_mask(const struct cpumask *mask,
552 int xen_vector = xen_map_vector(vector);
555 __xen_send_IPI_mask(mask, xen_vector);
558 void xen_send_IPI_all(int vector)
560 int xen_vector = xen_map_vector(vector);
563 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
566 void xen_send_IPI_self(int vector)
568 int xen_vector = xen_map_vector(vector);
571 xen_send_IPI_one(smp_processor_id(), xen_vector);
574 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
578 unsigned int this_cpu = smp_processor_id();
580 if (!(num_online_cpus() > 1))
583 for_each_cpu_and(cpu, mask, cpu_online_mask) {
587 xen_smp_send_call_function_single_ipi(cpu);
591 void xen_send_IPI_allbutself(int vector)
593 int xen_vector = xen_map_vector(vector);
596 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
599 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
602 generic_smp_call_function_interrupt();
603 inc_irq_stat(irq_call_count);
609 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
612 generic_smp_call_function_single_interrupt();
613 inc_irq_stat(irq_call_count);
619 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
623 inc_irq_stat(apic_irq_work_irqs);
629 static const struct smp_ops xen_smp_ops __initconst = {
630 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
631 .smp_prepare_cpus = xen_smp_prepare_cpus,
632 .smp_cpus_done = xen_smp_cpus_done,
634 .cpu_up = xen_cpu_up,
635 .cpu_die = xen_cpu_die,
636 .cpu_disable = xen_cpu_disable,
637 .play_dead = xen_play_dead,
639 .stop_other_cpus = xen_stop_other_cpus,
640 .smp_send_reschedule = xen_smp_send_reschedule,
642 .send_call_func_ipi = xen_smp_send_call_function_ipi,
643 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
646 void __init xen_smp_init(void)
648 smp_ops = xen_smp_ops;
649 xen_fill_possible_map();
650 xen_init_spinlocks();
653 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
655 native_smp_prepare_cpus(max_cpus);
656 WARN_ON(xen_smp_intr_init(0));
658 xen_init_lock_cpu(0);
661 static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
664 rc = native_cpu_up(cpu, tidle);
665 WARN_ON (xen_smp_intr_init(cpu));
669 static void xen_hvm_cpu_die(unsigned int cpu)
675 void __init xen_hvm_smp_init(void)
677 if (!xen_have_vector_callback)
679 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
680 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
681 smp_ops.cpu_up = xen_hvm_cpu_up;
682 smp_ops.cpu_die = xen_hvm_cpu_die;
683 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
684 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;