2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops *kvmppc_hv_ops;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45 struct kvmppc_ops *kvmppc_pr_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
51 return !!(v->arch.pending_exceptions) ||
55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
61 * Common checks before entering the guest world. Call with interrupts
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
69 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
73 WARN_ON(irqs_disabled());
84 if (signal_pending(current)) {
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86 vcpu->run->exit_reason = KVM_EXIT_INTR;
91 vcpu->mode = IN_GUEST_MODE;
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
101 if (vcpu->requests) {
102 /* Make sure we process requests preemptable */
104 trace_kvm_check_requests(vcpu);
105 r = kvmppc_core_check_requests(vcpu);
112 if (kvmppc_core_prepare_to_enter(vcpu)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
128 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
134 shared->sprg0 = swab64(shared->sprg0);
135 shared->sprg1 = swab64(shared->sprg1);
136 shared->sprg2 = swab64(shared->sprg2);
137 shared->sprg3 = swab64(shared->sprg3);
138 shared->srr0 = swab64(shared->srr0);
139 shared->srr1 = swab64(shared->srr1);
140 shared->dar = swab64(shared->dar);
141 shared->msr = swab64(shared->msr);
142 shared->dsisr = swab32(shared->dsisr);
143 shared->int_pending = swab32(shared->int_pending);
144 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145 shared->sr[i] = swab32(shared->sr[i]);
149 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
151 int nr = kvmppc_get_gpr(vcpu, 11);
153 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
154 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
155 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
157 unsigned long r2 = 0;
159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
161 param1 &= 0xffffffff;
162 param2 &= 0xffffffff;
163 param3 &= 0xffffffff;
164 param4 &= 0xffffffff;
168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
170 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian = true;
173 if (vcpu->arch.intr_msr & MSR_LE)
174 shared_big_endian = false;
175 if (shared_big_endian != vcpu->arch.shared_big_endian)
176 kvmppc_swab_shared(vcpu);
177 vcpu->arch.shared_big_endian = shared_big_endian;
180 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
186 vcpu->arch.disable_kernel_nx = true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
193 #ifdef CONFIG_PPC_64K_PAGES
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
198 if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 ((ulong)vcpu->arch.shared & 0xf000)) {
200 void *old_shared = vcpu->arch.shared;
201 ulong shared = (ulong)vcpu->arch.shared;
205 shared |= vcpu->arch.magic_page_pa & 0xf000;
206 new_shared = (void*)shared;
207 memcpy(new_shared, old_shared, 0x1000);
208 vcpu->arch.shared = new_shared;
212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
219 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
223 /* Second return value is in r4 */
225 case EV_HCALL_TOKEN(EV_IDLE):
227 kvm_vcpu_block(vcpu);
228 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
231 r = EV_UNIMPLEMENTED;
235 kvmppc_set_gpr(vcpu, 4, r2);
239 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
241 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
245 /* We have to know what CPU to virtualize */
249 /* PAPR only works with book3s_64 */
250 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
253 /* HV KVM can only do PAPR mode for now */
254 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
257 #ifdef CONFIG_KVM_BOOKE_HV
258 if (!cpu_has_feature(CPU_FTR_EMB_HV))
266 return r ? 0 : -EINVAL;
268 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
270 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
272 enum emulation_result er;
275 er = kvmppc_emulate_instruction(run, vcpu);
278 /* Future optimization: only reload non-volatiles if they were
279 * actually modified. */
285 case EMULATE_DO_MMIO:
286 run->exit_reason = KVM_EXIT_MMIO;
287 /* We must reload nonvolatiles because "update" load/store
288 * instructions modify register state. */
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
297 kvmppc_get_last_inst(vcpu, false, &last_inst);
298 /* XXX Deliver Program interrupt to guest. */
299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
310 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
312 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
315 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
316 struct kvmppc_pte pte;
321 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
331 /* Magic page override */
332 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
333 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
334 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
335 void *magic = vcpu->arch.shared;
336 magic += pte.eaddr & 0xfff;
337 memcpy(magic, ptr, size);
341 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
342 return EMULATE_DO_MMIO;
346 EXPORT_SYMBOL_GPL(kvmppc_st);
348 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352 struct kvmppc_pte pte;
357 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
367 if (!data && !pte.may_execute)
370 /* Magic page override */
371 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
372 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
373 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
374 void *magic = vcpu->arch.shared;
375 magic += pte.eaddr & 0xfff;
376 memcpy(ptr, magic, size);
380 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
381 return EMULATE_DO_MMIO;
385 EXPORT_SYMBOL_GPL(kvmppc_ld);
387 int kvm_arch_hardware_enable(void *garbage)
392 void kvm_arch_hardware_disable(void *garbage)
396 int kvm_arch_hardware_setup(void)
401 void kvm_arch_hardware_unsetup(void)
405 void kvm_arch_check_processor_compat(void *rtn)
407 *(int *)rtn = kvmppc_core_check_processor_compat();
410 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
412 struct kvmppc_ops *kvm_ops = NULL;
414 * if we have both HV and PR enabled, default is HV
418 kvm_ops = kvmppc_hv_ops;
420 kvm_ops = kvmppc_pr_ops;
423 } else if (type == KVM_VM_PPC_HV) {
426 kvm_ops = kvmppc_hv_ops;
427 } else if (type == KVM_VM_PPC_PR) {
430 kvm_ops = kvmppc_pr_ops;
434 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
437 kvm->arch.kvm_ops = kvm_ops;
438 return kvmppc_core_init_vm(kvm);
443 void kvm_arch_destroy_vm(struct kvm *kvm)
446 struct kvm_vcpu *vcpu;
448 kvm_for_each_vcpu(i, vcpu, kvm)
449 kvm_arch_vcpu_free(vcpu);
451 mutex_lock(&kvm->lock);
452 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
453 kvm->vcpus[i] = NULL;
455 atomic_set(&kvm->online_vcpus, 0);
457 kvmppc_core_destroy_vm(kvm);
459 mutex_unlock(&kvm->lock);
461 /* drop the module reference */
462 module_put(kvm->arch.kvm_ops->owner);
465 void kvm_arch_sync_events(struct kvm *kvm)
469 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
472 /* Assume we're using HV mode when the HV module is loaded */
473 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
477 * Hooray - we know which VM type we're running on. Depend on
478 * that rather than the guess above.
480 hv_enabled = is_kvmppc_hv_enabled(kvm);
485 case KVM_CAP_PPC_BOOKE_SREGS:
486 case KVM_CAP_PPC_BOOKE_WATCHDOG:
487 case KVM_CAP_PPC_EPR:
489 case KVM_CAP_PPC_SEGSTATE:
490 case KVM_CAP_PPC_HIOR:
491 case KVM_CAP_PPC_PAPR:
493 case KVM_CAP_PPC_UNSET_IRQ:
494 case KVM_CAP_PPC_IRQ_LEVEL:
495 case KVM_CAP_ENABLE_CAP:
496 case KVM_CAP_ENABLE_CAP_VM:
497 case KVM_CAP_ONE_REG:
498 case KVM_CAP_IOEVENTFD:
499 case KVM_CAP_DEVICE_CTRL:
502 case KVM_CAP_PPC_PAIRED_SINGLES:
503 case KVM_CAP_PPC_OSI:
504 case KVM_CAP_PPC_GET_PVINFO:
505 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
508 /* We support this only for PR */
511 #ifdef CONFIG_KVM_MMIO
512 case KVM_CAP_COALESCED_MMIO:
513 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
516 #ifdef CONFIG_KVM_MPIC
517 case KVM_CAP_IRQ_MPIC:
522 #ifdef CONFIG_PPC_BOOK3S_64
523 case KVM_CAP_SPAPR_TCE:
524 case KVM_CAP_PPC_ALLOC_HTAB:
525 case KVM_CAP_PPC_RTAS:
526 case KVM_CAP_PPC_FIXUP_HCALL:
527 case KVM_CAP_PPC_ENABLE_HCALL:
528 #ifdef CONFIG_KVM_XICS
529 case KVM_CAP_IRQ_XICS:
533 #endif /* CONFIG_PPC_BOOK3S_64 */
534 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
535 case KVM_CAP_PPC_SMT:
537 r = threads_per_subcore;
541 case KVM_CAP_PPC_RMA:
543 /* PPC970 requires an RMA */
544 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
548 case KVM_CAP_SYNC_MMU:
549 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
551 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
554 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
560 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
561 case KVM_CAP_PPC_HTAB_FD:
565 case KVM_CAP_NR_VCPUS:
567 * Recommending a number of CPUs is somewhat arbitrary; we
568 * return the number of present CPUs for -HV (since a host
569 * will have secondary threads "offline"), and for other KVM
570 * implementations just count online CPUs.
573 r = num_present_cpus();
575 r = num_online_cpus();
577 case KVM_CAP_MAX_VCPUS:
580 #ifdef CONFIG_PPC_BOOK3S_64
581 case KVM_CAP_PPC_GET_SMMU_INFO:
593 long kvm_arch_dev_ioctl(struct file *filp,
594 unsigned int ioctl, unsigned long arg)
599 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
600 struct kvm_memory_slot *dont)
602 kvmppc_core_free_memslot(kvm, free, dont);
605 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
606 unsigned long npages)
608 return kvmppc_core_create_memslot(kvm, slot, npages);
611 void kvm_arch_memslots_updated(struct kvm *kvm)
615 int kvm_arch_prepare_memory_region(struct kvm *kvm,
616 struct kvm_memory_slot *memslot,
617 struct kvm_userspace_memory_region *mem,
618 enum kvm_mr_change change)
620 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
623 void kvm_arch_commit_memory_region(struct kvm *kvm,
624 struct kvm_userspace_memory_region *mem,
625 const struct kvm_memory_slot *old,
626 enum kvm_mr_change change)
628 kvmppc_core_commit_memory_region(kvm, mem, old);
631 void kvm_arch_flush_shadow_all(struct kvm *kvm)
635 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
636 struct kvm_memory_slot *slot)
638 kvmppc_core_flush_memslot(kvm, slot);
641 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
643 struct kvm_vcpu *vcpu;
644 vcpu = kvmppc_core_vcpu_create(kvm, id);
646 vcpu->arch.wqp = &vcpu->wq;
647 kvmppc_create_vcpu_debugfs(vcpu, id);
652 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
657 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
659 /* Make sure we're not using the vcpu anymore */
660 hrtimer_cancel(&vcpu->arch.dec_timer);
661 tasklet_kill(&vcpu->arch.tasklet);
663 kvmppc_remove_vcpu_debugfs(vcpu);
665 switch (vcpu->arch.irq_type) {
666 case KVMPPC_IRQ_MPIC:
667 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
669 case KVMPPC_IRQ_XICS:
670 kvmppc_xics_free_icp(vcpu);
674 kvmppc_core_vcpu_free(vcpu);
677 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
679 kvm_arch_vcpu_free(vcpu);
682 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
684 return kvmppc_core_pending_dec(vcpu);
688 * low level hrtimer wake routine. Because this runs in hardirq context
689 * we schedule a tasklet to do the real work.
691 enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
693 struct kvm_vcpu *vcpu;
695 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
696 tasklet_schedule(&vcpu->arch.tasklet);
698 return HRTIMER_NORESTART;
701 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
705 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
706 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
707 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
708 vcpu->arch.dec_expires = ~(u64)0;
710 #ifdef CONFIG_KVM_EXIT_TIMING
711 mutex_init(&vcpu->arch.exit_timing_lock);
713 ret = kvmppc_subarch_vcpu_init(vcpu);
717 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
719 kvmppc_mmu_destroy(vcpu);
720 kvmppc_subarch_vcpu_uninit(vcpu);
723 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
727 * vrsave (formerly usprg0) isn't used by Linux, but may
728 * be used by the guest.
730 * On non-booke this is associated with Altivec and
731 * is handled by code in book3s.c.
733 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
735 kvmppc_core_vcpu_load(vcpu, cpu);
738 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
740 kvmppc_core_vcpu_put(vcpu);
742 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
746 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
749 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
752 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
755 u64 uninitialized_var(gpr);
757 if (run->mmio.len > sizeof(gpr)) {
758 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
762 if (vcpu->arch.mmio_is_bigendian) {
763 switch (run->mmio.len) {
764 case 8: gpr = *(u64 *)run->mmio.data; break;
765 case 4: gpr = *(u32 *)run->mmio.data; break;
766 case 2: gpr = *(u16 *)run->mmio.data; break;
767 case 1: gpr = *(u8 *)run->mmio.data; break;
770 /* Convert BE data from userland back to LE. */
771 switch (run->mmio.len) {
772 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
773 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
774 case 1: gpr = *(u8 *)run->mmio.data; break;
778 if (vcpu->arch.mmio_sign_extend) {
779 switch (run->mmio.len) {
794 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
796 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
797 case KVM_MMIO_REG_GPR:
798 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
800 case KVM_MMIO_REG_FPR:
801 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
803 #ifdef CONFIG_PPC_BOOK3S
804 case KVM_MMIO_REG_QPR:
805 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
807 case KVM_MMIO_REG_FQPR:
808 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
809 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
817 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
818 unsigned int rt, unsigned int bytes,
819 int is_default_endian)
824 if (kvmppc_need_byteswap(vcpu)) {
825 /* Default endianness is "little endian". */
826 is_bigendian = !is_default_endian;
828 /* Default endianness is "big endian". */
829 is_bigendian = is_default_endian;
832 if (bytes > sizeof(run->mmio.data)) {
833 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
837 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
838 run->mmio.len = bytes;
839 run->mmio.is_write = 0;
841 vcpu->arch.io_gpr = rt;
842 vcpu->arch.mmio_is_bigendian = is_bigendian;
843 vcpu->mmio_needed = 1;
844 vcpu->mmio_is_write = 0;
845 vcpu->arch.mmio_sign_extend = 0;
847 idx = srcu_read_lock(&vcpu->kvm->srcu);
849 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
850 bytes, &run->mmio.data);
852 srcu_read_unlock(&vcpu->kvm->srcu, idx);
855 kvmppc_complete_mmio_load(vcpu, run);
856 vcpu->mmio_needed = 0;
860 return EMULATE_DO_MMIO;
862 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
864 /* Same as above, but sign extends */
865 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
866 unsigned int rt, unsigned int bytes,
867 int is_default_endian)
871 vcpu->arch.mmio_sign_extend = 1;
872 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
877 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
878 u64 val, unsigned int bytes, int is_default_endian)
880 void *data = run->mmio.data;
884 if (kvmppc_need_byteswap(vcpu)) {
885 /* Default endianness is "little endian". */
886 is_bigendian = !is_default_endian;
888 /* Default endianness is "big endian". */
889 is_bigendian = is_default_endian;
892 if (bytes > sizeof(run->mmio.data)) {
893 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
897 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
898 run->mmio.len = bytes;
899 run->mmio.is_write = 1;
900 vcpu->mmio_needed = 1;
901 vcpu->mmio_is_write = 1;
903 /* Store the value at the lowest bytes in 'data'. */
906 case 8: *(u64 *)data = val; break;
907 case 4: *(u32 *)data = val; break;
908 case 2: *(u16 *)data = val; break;
909 case 1: *(u8 *)data = val; break;
912 /* Store LE value into 'data'. */
914 case 4: st_le32(data, val); break;
915 case 2: st_le16(data, val); break;
916 case 1: *(u8 *)data = val; break;
920 idx = srcu_read_lock(&vcpu->kvm->srcu);
922 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
923 bytes, &run->mmio.data);
925 srcu_read_unlock(&vcpu->kvm->srcu, idx);
928 vcpu->mmio_needed = 0;
932 return EMULATE_DO_MMIO;
934 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
936 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
941 if (vcpu->sigset_active)
942 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
944 if (vcpu->mmio_needed) {
945 if (!vcpu->mmio_is_write)
946 kvmppc_complete_mmio_load(vcpu, run);
947 vcpu->mmio_needed = 0;
948 } else if (vcpu->arch.dcr_needed) {
949 if (!vcpu->arch.dcr_is_write)
950 kvmppc_complete_dcr_load(vcpu, run);
951 vcpu->arch.dcr_needed = 0;
952 } else if (vcpu->arch.osi_needed) {
953 u64 *gprs = run->osi.gprs;
956 for (i = 0; i < 32; i++)
957 kvmppc_set_gpr(vcpu, i, gprs[i]);
958 vcpu->arch.osi_needed = 0;
959 } else if (vcpu->arch.hcall_needed) {
962 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
963 for (i = 0; i < 9; ++i)
964 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
965 vcpu->arch.hcall_needed = 0;
967 } else if (vcpu->arch.epr_needed) {
968 kvmppc_set_epr(vcpu, run->epr.epr);
969 vcpu->arch.epr_needed = 0;
973 r = kvmppc_vcpu_run(run, vcpu);
975 if (vcpu->sigset_active)
976 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
981 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
983 if (irq->irq == KVM_INTERRUPT_UNSET) {
984 kvmppc_core_dequeue_external(vcpu);
988 kvmppc_core_queue_external(vcpu, irq);
995 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
996 struct kvm_enable_cap *cap)
1004 case KVM_CAP_PPC_OSI:
1006 vcpu->arch.osi_enabled = true;
1008 case KVM_CAP_PPC_PAPR:
1010 vcpu->arch.papr_enabled = true;
1012 case KVM_CAP_PPC_EPR:
1015 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1017 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1020 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1022 vcpu->arch.watchdog_enabled = true;
1025 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1026 case KVM_CAP_SW_TLB: {
1027 struct kvm_config_tlb cfg;
1028 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1031 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1034 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1038 #ifdef CONFIG_KVM_MPIC
1039 case KVM_CAP_IRQ_MPIC: {
1041 struct kvm_device *dev;
1044 f = fdget(cap->args[0]);
1049 dev = kvm_device_from_filp(f.file);
1051 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1057 #ifdef CONFIG_KVM_XICS
1058 case KVM_CAP_IRQ_XICS: {
1060 struct kvm_device *dev;
1063 f = fdget(cap->args[0]);
1068 dev = kvm_device_from_filp(f.file);
1070 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1075 #endif /* CONFIG_KVM_XICS */
1082 r = kvmppc_sanity_check(vcpu);
1087 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1088 struct kvm_mp_state *mp_state)
1093 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1094 struct kvm_mp_state *mp_state)
1099 long kvm_arch_vcpu_ioctl(struct file *filp,
1100 unsigned int ioctl, unsigned long arg)
1102 struct kvm_vcpu *vcpu = filp->private_data;
1103 void __user *argp = (void __user *)arg;
1107 case KVM_INTERRUPT: {
1108 struct kvm_interrupt irq;
1110 if (copy_from_user(&irq, argp, sizeof(irq)))
1112 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1116 case KVM_ENABLE_CAP:
1118 struct kvm_enable_cap cap;
1120 if (copy_from_user(&cap, argp, sizeof(cap)))
1122 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1126 case KVM_SET_ONE_REG:
1127 case KVM_GET_ONE_REG:
1129 struct kvm_one_reg reg;
1131 if (copy_from_user(®, argp, sizeof(reg)))
1133 if (ioctl == KVM_SET_ONE_REG)
1134 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1136 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1140 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1141 case KVM_DIRTY_TLB: {
1142 struct kvm_dirty_tlb dirty;
1144 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1146 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1158 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1160 return VM_FAULT_SIGBUS;
1163 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1165 u32 inst_nop = 0x60000000;
1166 #ifdef CONFIG_KVM_BOOKE_HV
1167 u32 inst_sc1 = 0x44000022;
1168 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1169 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1170 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1171 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1173 u32 inst_lis = 0x3c000000;
1174 u32 inst_ori = 0x60000000;
1175 u32 inst_sc = 0x44000002;
1176 u32 inst_imm_mask = 0xffff;
1179 * The hypercall to get into KVM from within guest context is as
1182 * lis r0, r0, KVM_SC_MAGIC_R0@h
1183 * ori r0, KVM_SC_MAGIC_R0@l
1187 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1188 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1189 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1190 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1193 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1198 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1201 if (!irqchip_in_kernel(kvm))
1204 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1205 irq_event->irq, irq_event->level,
1211 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1212 struct kvm_enable_cap *cap)
1220 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1221 case KVM_CAP_PPC_ENABLE_HCALL: {
1222 unsigned long hcall = cap->args[0];
1225 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1228 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1231 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1233 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1246 long kvm_arch_vm_ioctl(struct file *filp,
1247 unsigned int ioctl, unsigned long arg)
1249 struct kvm *kvm __maybe_unused = filp->private_data;
1250 void __user *argp = (void __user *)arg;
1254 case KVM_PPC_GET_PVINFO: {
1255 struct kvm_ppc_pvinfo pvinfo;
1256 memset(&pvinfo, 0, sizeof(pvinfo));
1257 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1258 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1265 case KVM_ENABLE_CAP:
1267 struct kvm_enable_cap cap;
1269 if (copy_from_user(&cap, argp, sizeof(cap)))
1271 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1274 #ifdef CONFIG_PPC_BOOK3S_64
1275 case KVM_CREATE_SPAPR_TCE: {
1276 struct kvm_create_spapr_tce create_tce;
1279 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1281 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1284 case KVM_PPC_GET_SMMU_INFO: {
1285 struct kvm_ppc_smmu_info info;
1286 struct kvm *kvm = filp->private_data;
1288 memset(&info, 0, sizeof(info));
1289 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1290 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1294 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1295 struct kvm *kvm = filp->private_data;
1297 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1301 struct kvm *kvm = filp->private_data;
1302 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1304 #else /* CONFIG_PPC_BOOK3S_64 */
1313 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1314 static unsigned long nr_lpids;
1316 long kvmppc_alloc_lpid(void)
1321 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1322 if (lpid >= nr_lpids) {
1323 pr_err("%s: No LPIDs free\n", __func__);
1326 } while (test_and_set_bit(lpid, lpid_inuse));
1330 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1332 void kvmppc_claim_lpid(long lpid)
1334 set_bit(lpid, lpid_inuse);
1336 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1338 void kvmppc_free_lpid(long lpid)
1340 clear_bit(lpid, lpid_inuse);
1342 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1344 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1346 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1347 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1349 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1351 int kvm_arch_init(void *opaque)
1356 void kvm_arch_exit(void)