4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
8 * IPIs are handled through the Xen event mechanism.
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/smp.h>
19 #include <linux/irq_work.h>
20 #include <linux/tick.h>
22 #include <asm/paravirt.h>
24 #include <asm/pgtable.h>
27 #include <xen/interface/xen.h>
28 #include <xen/interface/vcpu.h>
30 #include <asm/xen/interface.h>
31 #include <asm/xen/hypercall.h>
35 #include <xen/events.h>
37 #include <xen/hvc-console.h>
41 cpumask_var_t xen_cpu_initialized_map;
43 struct xen_common_irq {
47 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
48 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
49 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
50 static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
51 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
53 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
54 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
55 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
58 * Reschedule call back.
60 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
62 inc_irq_stat(irq_resched_count);
68 static void cpu_bringup(void)
73 touch_softlockup_watchdog();
76 xen_enable_sysenter();
79 cpu = smp_processor_id();
80 smp_store_cpu_info(cpu);
81 cpu_data(cpu).x86_max_cores = 1;
82 set_cpu_sibling_map(cpu);
84 xen_setup_cpu_clockevents();
86 notify_cpu_starting(cpu);
88 set_cpu_online(cpu, true);
90 this_cpu_write(cpu_state, CPU_ONLINE);
94 /* We can take interrupts now: we're officially "up". */
97 wmb(); /* make sure everything is out */
100 static void cpu_bringup_and_idle(void)
103 cpu_startup_entry(CPUHP_ONLINE);
106 static void xen_smp_intr_free(unsigned int cpu)
108 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
109 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
110 per_cpu(xen_resched_irq, cpu).irq = -1;
111 kfree(per_cpu(xen_resched_irq, cpu).name);
112 per_cpu(xen_resched_irq, cpu).name = NULL;
114 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
115 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
116 per_cpu(xen_callfunc_irq, cpu).irq = -1;
117 kfree(per_cpu(xen_callfunc_irq, cpu).name);
118 per_cpu(xen_callfunc_irq, cpu).name = NULL;
120 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
121 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
122 per_cpu(xen_debug_irq, cpu).irq = -1;
123 kfree(per_cpu(xen_debug_irq, cpu).name);
124 per_cpu(xen_debug_irq, cpu).name = NULL;
126 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
127 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
129 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
130 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
131 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
133 if (xen_hvm_domain())
136 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
137 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
138 per_cpu(xen_irq_work, cpu).irq = -1;
139 kfree(per_cpu(xen_irq_work, cpu).name);
140 per_cpu(xen_irq_work, cpu).name = NULL;
143 static int xen_smp_intr_init(unsigned int cpu)
146 char *resched_name, *callfunc_name, *debug_name;
148 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
149 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
151 xen_reschedule_interrupt,
152 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
157 per_cpu(xen_resched_irq, cpu).irq = rc;
158 per_cpu(xen_resched_irq, cpu).name = resched_name;
160 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
161 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
163 xen_call_function_interrupt,
164 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
169 per_cpu(xen_callfunc_irq, cpu).irq = rc;
170 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
172 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
173 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
174 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
178 per_cpu(xen_debug_irq, cpu).irq = rc;
179 per_cpu(xen_debug_irq, cpu).name = debug_name;
181 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
182 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
184 xen_call_function_single_interrupt,
185 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
190 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
191 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
194 * The IRQ worker on PVHVM goes through the native path and uses the
197 if (xen_hvm_domain())
200 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
201 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
203 xen_irq_work_interrupt,
204 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
209 per_cpu(xen_irq_work, cpu).irq = rc;
210 per_cpu(xen_irq_work, cpu).name = callfunc_name;
215 xen_smp_intr_free(cpu);
219 static void __init xen_fill_possible_map(void)
223 if (xen_initial_domain())
226 for (i = 0; i < nr_cpu_ids; i++) {
227 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
230 set_cpu_possible(i, true);
235 static void __init xen_filter_cpu_maps(void)
238 unsigned int subtract = 0;
240 if (!xen_initial_domain())
245 for (i = 0; i < nr_cpu_ids; i++) {
246 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
249 set_cpu_possible(i, true);
251 set_cpu_possible(i, false);
252 set_cpu_present(i, false);
256 #ifdef CONFIG_HOTPLUG_CPU
257 /* This is akin to using 'nr_cpus' on the Linux command line.
258 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
259 * have up to X, while nr_cpu_ids is greater than X. This
260 * normally is not a problem, except when CPU hotplugging
261 * is involved and then there might be more than X CPUs
262 * in the guest - which will not work as there is no
263 * hypercall to expand the max number of VCPUs an already
264 * running guest has. So cap it up to X. */
266 nr_cpu_ids = nr_cpu_ids - subtract;
271 static void __init xen_smp_prepare_boot_cpu(void)
273 BUG_ON(smp_processor_id() != 0);
274 native_smp_prepare_boot_cpu();
276 /* We've switched to the "real" per-cpu gdt, so make sure the
277 old memory can be recycled */
278 make_lowmem_page_readwrite(xen_initial_gdt);
280 xen_filter_cpu_maps();
281 xen_setup_vcpu_info_placement();
284 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
289 if (skip_ioapic_setup) {
290 char *m = (max_cpus == 0) ?
291 "The nosmp parameter is incompatible with Xen; " \
292 "use Xen dom0_max_vcpus=1 parameter" :
293 "The noapic parameter is incompatible with Xen";
298 xen_init_lock_cpu(0);
300 smp_store_boot_cpu_info();
301 cpu_data(0).x86_max_cores = 1;
303 for_each_possible_cpu(i) {
304 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
305 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
306 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
308 set_cpu_sibling_map(0);
310 if (xen_smp_intr_init(0))
313 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
314 panic("could not allocate xen_cpu_initialized_map\n");
316 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
318 /* Restrict the possible_map according to max_cpus. */
319 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
320 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
322 set_cpu_possible(cpu, false);
325 for_each_possible_cpu(cpu)
326 set_cpu_present(cpu, true);
330 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
332 struct vcpu_guest_context *ctxt;
333 struct desc_struct *gdt;
334 unsigned long gdt_mfn;
336 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
339 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
343 gdt = get_cpu_gdt_table(cpu);
345 ctxt->flags = VGCF_IN_KERNEL;
346 ctxt->user_regs.ss = __KERNEL_DS;
348 ctxt->user_regs.fs = __KERNEL_PERCPU;
349 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
351 ctxt->gs_base_kernel = per_cpu_offset(cpu);
353 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
355 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
358 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
359 ctxt->user_regs.ds = __USER_DS;
360 ctxt->user_regs.es = __USER_DS;
362 xen_copy_trap_info(ctxt->trap_ctxt);
366 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
368 gdt_mfn = arbitrary_virt_to_mfn(gdt);
369 make_lowmem_page_readonly(gdt);
370 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
372 ctxt->gdt_frames[0] = gdt_mfn;
373 ctxt->gdt_ents = GDT_ENTRIES;
375 ctxt->kernel_ss = __KERNEL_DS;
376 ctxt->kernel_sp = idle->thread.sp0;
379 ctxt->event_callback_cs = __KERNEL_CS;
380 ctxt->failsafe_callback_cs = __KERNEL_CS;
382 ctxt->event_callback_eip =
383 (unsigned long)xen_hypervisor_callback;
384 ctxt->failsafe_callback_eip =
385 (unsigned long)xen_failsafe_callback;
387 ctxt->user_regs.cs = __KERNEL_CS;
388 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
390 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
391 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
393 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
400 static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
404 per_cpu(current_task, cpu) = idle;
408 clear_tsk_thread_flag(idle, TIF_FORK);
409 per_cpu(kernel_stack, cpu) =
410 (unsigned long)task_stack_page(idle) -
411 KERNEL_STACK_OFFSET + THREAD_SIZE;
413 xen_setup_runstate_info(cpu);
414 xen_setup_timer(cpu);
415 xen_init_lock_cpu(cpu);
417 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
419 /* make sure interrupts start blocked */
420 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
422 rc = cpu_initialize_context(cpu, idle);
426 if (num_online_cpus() == 1)
427 /* Just in case we booted with a single CPU. */
428 alternatives_enable_smp();
430 rc = xen_smp_intr_init(cpu);
434 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
437 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
438 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
445 static void xen_smp_cpus_done(unsigned int max_cpus)
449 #ifdef CONFIG_HOTPLUG_CPU
450 static int xen_cpu_disable(void)
452 unsigned int cpu = smp_processor_id();
456 cpu_disable_common();
458 load_cr3(swapper_pg_dir);
462 static void xen_cpu_die(unsigned int cpu)
464 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
465 current->state = TASK_UNINTERRUPTIBLE;
466 schedule_timeout(HZ/10);
468 xen_smp_intr_free(cpu);
469 xen_uninit_lock_cpu(cpu);
470 xen_teardown_timer(cpu);
473 static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
479 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
480 * clears certain data that the cpu_idle loop (which called us
481 * and that we return from) expects. The only way to get that
482 * data back is to call:
484 tick_nohz_idle_enter();
487 #else /* !CONFIG_HOTPLUG_CPU */
488 static int xen_cpu_disable(void)
493 static void xen_cpu_die(unsigned int cpu)
498 static void xen_play_dead(void)
504 static void stop_self(void *v)
506 int cpu = smp_processor_id();
508 /* make sure we're not pinning something down */
509 load_cr3(swapper_pg_dir);
510 /* should set up a minimal gdt */
512 set_cpu_online(cpu, false);
514 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
518 static void xen_stop_other_cpus(int wait)
520 smp_call_function(stop_self, NULL, wait);
523 static void xen_smp_send_reschedule(int cpu)
525 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
528 static void __xen_send_IPI_mask(const struct cpumask *mask,
533 for_each_cpu_and(cpu, mask, cpu_online_mask)
534 xen_send_IPI_one(cpu, vector);
537 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
541 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
543 /* Make sure other vcpus get a chance to run if they need to. */
544 for_each_cpu(cpu, mask) {
545 if (xen_vcpu_stolen(cpu)) {
546 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
552 static void xen_smp_send_call_function_single_ipi(int cpu)
554 __xen_send_IPI_mask(cpumask_of(cpu),
555 XEN_CALL_FUNCTION_SINGLE_VECTOR);
558 static inline int xen_map_vector(int vector)
563 case RESCHEDULE_VECTOR:
564 xen_vector = XEN_RESCHEDULE_VECTOR;
566 case CALL_FUNCTION_VECTOR:
567 xen_vector = XEN_CALL_FUNCTION_VECTOR;
569 case CALL_FUNCTION_SINGLE_VECTOR:
570 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
572 case IRQ_WORK_VECTOR:
573 xen_vector = XEN_IRQ_WORK_VECTOR;
577 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
584 void xen_send_IPI_mask(const struct cpumask *mask,
587 int xen_vector = xen_map_vector(vector);
590 __xen_send_IPI_mask(mask, xen_vector);
593 void xen_send_IPI_all(int vector)
595 int xen_vector = xen_map_vector(vector);
598 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
601 void xen_send_IPI_self(int vector)
603 int xen_vector = xen_map_vector(vector);
606 xen_send_IPI_one(smp_processor_id(), xen_vector);
609 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
613 unsigned int this_cpu = smp_processor_id();
614 int xen_vector = xen_map_vector(vector);
616 if (!(num_online_cpus() > 1) || (xen_vector < 0))
619 for_each_cpu_and(cpu, mask, cpu_online_mask) {
623 xen_send_IPI_one(cpu, xen_vector);
627 void xen_send_IPI_allbutself(int vector)
629 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
632 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
635 generic_smp_call_function_interrupt();
636 inc_irq_stat(irq_call_count);
642 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
645 generic_smp_call_function_single_interrupt();
646 inc_irq_stat(irq_call_count);
652 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
656 inc_irq_stat(apic_irq_work_irqs);
662 static const struct smp_ops xen_smp_ops __initconst = {
663 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
664 .smp_prepare_cpus = xen_smp_prepare_cpus,
665 .smp_cpus_done = xen_smp_cpus_done,
667 .cpu_up = xen_cpu_up,
668 .cpu_die = xen_cpu_die,
669 .cpu_disable = xen_cpu_disable,
670 .play_dead = xen_play_dead,
672 .stop_other_cpus = xen_stop_other_cpus,
673 .smp_send_reschedule = xen_smp_send_reschedule,
675 .send_call_func_ipi = xen_smp_send_call_function_ipi,
676 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
679 void __init xen_smp_init(void)
681 smp_ops = xen_smp_ops;
682 xen_fill_possible_map();
683 xen_init_spinlocks();
686 static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
688 native_smp_prepare_cpus(max_cpus);
689 WARN_ON(xen_smp_intr_init(0));
691 xen_init_lock_cpu(0);
694 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
698 * xen_smp_intr_init() needs to run before native_cpu_up()
699 * so that IPI vectors are set up on the booting CPU before
700 * it is marked online in native_cpu_up().
702 rc = xen_smp_intr_init(cpu);
705 rc = native_cpu_up(cpu, tidle);
709 static void xen_hvm_cpu_die(unsigned int cpu)
715 void __init xen_hvm_smp_init(void)
717 if (!xen_have_vector_callback)
719 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
720 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
721 smp_ops.cpu_up = xen_hvm_cpu_up;
722 smp_ops.cpu_die = xen_hvm_cpu_die;
723 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
724 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;