2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
23 #include <asm/bootinfo.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
26 #include <asm/cacheflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
38 atomic_t kvm_mips_instance;
39 EXPORT_SYMBOL(kvm_mips_instance);
41 /* These function pointers are initialized once the KVM module is loaded */
42 pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
43 EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45 void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
46 EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48 bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
49 EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
57 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
62 inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64 return vcpu->kvm->arch.commpage_tlb;
69 * Structure defining an tlb entry data set.
72 void kvm_mips_dump_host_tlbs(void)
74 unsigned long old_entryhi;
75 unsigned long old_pagemask;
76 struct kvm_mips_tlb tlb;
80 local_irq_save(flags);
82 old_entryhi = read_c0_entryhi();
83 old_pagemask = read_c0_pagemask();
85 printk("HOST TLBs:\n");
86 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88 for (i = 0; i < current_cpu_data.tlbsize; i++) {
95 tlb.tlb_hi = read_c0_entryhi();
96 tlb.tlb_lo0 = read_c0_entrylo0();
97 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask();
100 printk("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7);
108 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask);
117 local_irq_restore(flags);
120 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123 struct kvm_mips_tlb tlb;
126 printk("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
130 tlb = vcpu->arch.guest_tlb[i];
131 printk("TLB%c%3d Hi 0x%08lx ",
132 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
136 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
137 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
138 (tlb.tlb_lo0 >> 3) & 7);
139 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
141 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
142 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
143 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
147 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
149 int srcu_idx, err = 0;
152 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
155 srcu_idx = srcu_read_lock(&kvm->srcu);
156 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
158 if (kvm_mips_is_error_pfn(pfn)) {
159 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
164 kvm->arch.guest_pmap[gfn] = pfn;
166 srcu_read_unlock(&kvm->srcu, srcu_idx);
170 /* Translate guest KSEG0 addresses to Host PA */
171 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
175 uint32_t offset = gva & ~PAGE_MASK;
176 struct kvm *kvm = vcpu->kvm;
178 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
179 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
180 __builtin_return_address(0), gva);
181 return KVM_INVALID_PAGE;
184 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
186 if (gfn >= kvm->arch.guest_pmap_npages) {
187 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
189 return KVM_INVALID_PAGE;
192 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
193 return KVM_INVALID_ADDR;
195 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
198 /* XXXKYMA: Must be called with interrupts disabled */
199 /* set flush_dcache_mask == 0 if no dcache flush required */
201 kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
202 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
205 unsigned long old_entryhi;
208 local_irq_save(flags);
211 old_entryhi = read_c0_entryhi();
212 write_c0_entryhi(entryhi);
217 idx = read_c0_index();
219 if (idx > current_cpu_data.tlbsize) {
220 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
221 kvm_mips_dump_host_tlbs();
225 write_c0_entrylo0(entrylo0);
226 write_c0_entrylo1(entrylo1);
236 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
237 vcpu->arch.pc, idx, read_c0_entryhi(),
238 read_c0_entrylo0(), read_c0_entrylo1());
242 if (flush_dcache_mask) {
243 if (entrylo0 & MIPS3_PG_V) {
244 ++vcpu->stat.flush_dcache_exits;
245 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
247 if (entrylo1 & MIPS3_PG_V) {
248 ++vcpu->stat.flush_dcache_exits;
249 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
250 (0x1 << PAGE_SHIFT));
254 /* Restore old ASID */
255 write_c0_entryhi(old_entryhi);
258 local_irq_restore(flags);
263 /* XXXKYMA: Must be called with interrupts disabled */
264 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
265 struct kvm_vcpu *vcpu)
269 unsigned long vaddr = 0;
270 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
272 struct kvm *kvm = vcpu->kvm;
273 const int flush_dcache_mask = 0;
276 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
277 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
278 kvm_mips_dump_host_tlbs();
282 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
283 if (gfn >= kvm->arch.guest_pmap_npages) {
284 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
286 kvm_mips_dump_host_tlbs();
290 vaddr = badvaddr & (PAGE_MASK << 1);
292 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
295 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
299 pfn0 = kvm->arch.guest_pmap[gfn];
300 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
302 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
303 pfn1 = kvm->arch.guest_pmap[gfn];
306 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
307 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
309 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
312 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
316 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
317 struct kvm_vcpu *vcpu)
320 unsigned long flags, old_entryhi = 0, vaddr = 0;
321 unsigned long entrylo0 = 0, entrylo1 = 0;
324 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
326 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
330 local_irq_save(flags);
332 old_entryhi = read_c0_entryhi();
333 vaddr = badvaddr & (PAGE_MASK << 1);
334 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
336 write_c0_entrylo0(entrylo0);
338 write_c0_entrylo1(entrylo1);
340 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
347 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
348 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
349 read_c0_entrylo0(), read_c0_entrylo1());
352 /* Restore old ASID */
353 write_c0_entryhi(old_entryhi);
356 local_irq_restore(flags);
362 kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
363 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
365 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
366 struct kvm *kvm = vcpu->kvm;
370 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
374 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
377 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
380 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
381 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
385 *hpa0 = pfn0 << PAGE_SHIFT;
388 *hpa1 = pfn1 << PAGE_SHIFT;
390 /* Get attributes from the Guest TLB */
391 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
392 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
393 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
394 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
395 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
396 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
399 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
400 tlb->tlb_lo0, tlb->tlb_lo1);
403 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
407 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
411 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
414 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
415 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
416 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
423 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
424 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
430 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
432 unsigned long old_entryhi, flags;
436 local_irq_save(flags);
438 old_entryhi = read_c0_entryhi();
440 if (KVM_GUEST_KERNEL_MODE(vcpu))
441 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
443 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
450 idx = read_c0_index();
452 /* Restore old ASID */
453 write_c0_entryhi(old_entryhi);
457 local_irq_restore(flags);
460 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
466 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
469 unsigned long flags, old_entryhi;
471 local_irq_save(flags);
474 old_entryhi = read_c0_entryhi();
476 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
481 idx = read_c0_index();
483 if (idx >= current_cpu_data.tlbsize)
487 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
490 write_c0_entrylo0(0);
493 write_c0_entrylo1(0);
500 write_c0_entryhi(old_entryhi);
504 local_irq_restore(flags);
508 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
509 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
515 /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
516 int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
518 unsigned long flags, old_entryhi;
520 if (index >= current_cpu_data.tlbsize)
523 local_irq_save(flags);
526 old_entryhi = read_c0_entryhi();
528 write_c0_entryhi(UNIQUE_ENTRYHI(index));
531 write_c0_index(index);
534 write_c0_entrylo0(0);
537 write_c0_entrylo1(0);
544 write_c0_entryhi(old_entryhi);
548 local_irq_restore(flags);
553 void kvm_mips_flush_host_tlb(int skip_kseg0)
556 unsigned long old_entryhi, entryhi;
557 unsigned long old_pagemask;
559 int maxentry = current_cpu_data.tlbsize;
562 local_irq_save(flags);
564 old_entryhi = read_c0_entryhi();
565 old_pagemask = read_c0_pagemask();
567 /* Blast 'em all away. */
568 for (entry = 0; entry < maxentry; entry++) {
570 write_c0_index(entry);
577 entryhi = read_c0_entryhi();
579 /* Don't blow away guest kernel entries */
580 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
585 /* Make sure all entries differ. */
586 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
588 write_c0_entrylo0(0);
590 write_c0_entrylo1(0);
599 write_c0_entryhi(old_entryhi);
600 write_c0_pagemask(old_pagemask);
604 local_irq_restore(flags);
608 kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
609 struct kvm_vcpu *vcpu)
611 unsigned long asid = asid_cache(cpu);
613 if (!((asid += ASID_INC) & ASID_MASK)) {
614 if (cpu_has_vtag_icache) {
618 kvm_local_flush_tlb_all(); /* start new asid cycle */
620 if (!asid) /* fix version if needed */
621 asid = ASID_FIRST_VERSION;
624 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
627 void kvm_local_flush_tlb_all(void)
630 unsigned long old_ctx;
633 local_irq_save(flags);
634 /* Save old context and create impossible VPN2 value */
635 old_ctx = read_c0_entryhi();
636 write_c0_entrylo0(0);
637 write_c0_entrylo1(0);
639 /* Blast 'em all away. */
640 while (entry < current_cpu_data.tlbsize) {
641 /* Make sure all entries differ. */
642 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
643 write_c0_index(entry);
649 write_c0_entryhi(old_ctx);
652 local_irq_restore(flags);
656 * kvm_mips_migrate_count() - Migrate timer.
657 * @vcpu: Virtual CPU.
659 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
660 * if it was running prior to being cancelled.
662 * Must be called when the VCPU is migrated to a different CPU to ensure that
663 * timer expiry during guest execution interrupts the guest and causes the
664 * interrupt to be delivered in a timely manner.
666 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
668 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
669 hrtimer_restart(&vcpu->arch.comparecount_timer);
672 /* Restore ASID once we are scheduled back after preemption */
673 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
679 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
682 /* Alocate new kernel and user ASIDs if needed */
684 local_irq_save(flags);
687 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
688 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
689 vcpu->arch.guest_kernel_asid[cpu] =
690 vcpu->arch.guest_kernel_mm.context.asid[cpu];
691 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
692 vcpu->arch.guest_user_asid[cpu] =
693 vcpu->arch.guest_user_mm.context.asid[cpu];
696 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
697 cpu_context(cpu, current->mm));
698 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
699 cpu, vcpu->arch.guest_kernel_asid[cpu]);
700 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
701 vcpu->arch.guest_user_asid[cpu]);
704 if (vcpu->arch.last_sched_cpu != cpu) {
705 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
706 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
708 * Migrate the timer interrupt to the current CPU so that it
709 * always interrupts the guest and synchronously triggers a
710 * guest timer interrupt.
712 kvm_mips_migrate_count(vcpu);
716 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
717 if (current->flags & PF_VCPU) {
718 write_c0_entryhi(vcpu->arch.
719 preempt_entryhi & ASID_MASK);
723 /* New ASIDs were allocated for the VM */
725 /* Were we in guest context? If so then the pre-empted ASID is no longer
726 * valid, we need to set it to what it should be based on the mode of
727 * the Guest (Kernel/User)
729 if (current->flags & PF_VCPU) {
730 if (KVM_GUEST_KERNEL_MODE(vcpu))
731 write_c0_entryhi(vcpu->arch.
732 guest_kernel_asid[cpu] &
735 write_c0_entryhi(vcpu->arch.
736 guest_user_asid[cpu] &
742 local_irq_restore(flags);
746 /* ASID can change if another task is scheduled during preemption */
747 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
752 local_irq_save(flags);
754 cpu = smp_processor_id();
757 vcpu->arch.preempt_entryhi = read_c0_entryhi();
758 vcpu->arch.last_sched_cpu = cpu;
760 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
761 ASID_VERSION_MASK)) {
762 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
763 cpu_context(cpu, current->mm));
764 drop_mmu_context(current->mm, cpu);
766 write_c0_entryhi(cpu_asid(cpu, current->mm));
769 local_irq_restore(flags);
772 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
774 struct mips_coproc *cop0 = vcpu->arch.cop0;
775 unsigned long paddr, flags;
779 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
780 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
781 local_irq_save(flags);
782 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
787 kvm_mips_guest_tlb_lookup(vcpu,
788 ((unsigned long) opc & VPN2_MASK)
790 (kvm_read_c0_guest_entryhi
791 (cop0) & ASID_MASK));
794 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
795 __func__, opc, vcpu, read_c0_entryhi());
796 kvm_mips_dump_host_tlbs();
797 local_irq_restore(flags);
798 return KVM_INVALID_INST;
800 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
806 local_irq_restore(flags);
807 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
809 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
810 (unsigned long) opc);
811 inst = *(uint32_t *) CKSEG0ADDR(paddr);
813 kvm_err("%s: illegal address: %p\n", __func__, opc);
814 return KVM_INVALID_INST;
820 EXPORT_SYMBOL(kvm_local_flush_tlb_all);
821 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
822 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
823 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
824 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
825 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
826 EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
827 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
828 EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
829 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
830 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
831 EXPORT_SYMBOL(kvm_get_inst);
832 EXPORT_SYMBOL(kvm_arch_vcpu_load);
833 EXPORT_SYMBOL(kvm_arch_vcpu_put);