2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #include "kvm_mips_opcode.h"
33 #include "kvm_mips_int.h"
34 #include "kvm_mips_comm.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
55 * Read the instruction
57 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
59 if (insn.word == KVM_INVALID_INST)
60 return KVM_INVALID_INST;
62 switch (insn.i_format.opcode) {
64 * jr and jalr are in r_format format.
67 switch (insn.r_format.func) {
69 arch->gprs[insn.r_format.rd] = epc + 8;
72 nextpc = arch->gprs[insn.r_format.rs];
78 * This group contains:
79 * bltz_op, bgez_op, bltzl_op, bgezl_op,
80 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
83 switch (insn.i_format.rt) {
86 if ((long)arch->gprs[insn.i_format.rs] < 0)
87 epc = epc + 4 + (insn.i_format.simmediate << 2);
95 if ((long)arch->gprs[insn.i_format.rs] >= 0)
96 epc = epc + 4 + (insn.i_format.simmediate << 2);
104 arch->gprs[31] = epc + 8;
105 if ((long)arch->gprs[insn.i_format.rs] < 0)
106 epc = epc + 4 + (insn.i_format.simmediate << 2);
114 arch->gprs[31] = epc + 8;
115 if ((long)arch->gprs[insn.i_format.rs] >= 0)
116 epc = epc + 4 + (insn.i_format.simmediate << 2);
125 dspcontrol = rddsp(0x01);
127 if (dspcontrol >= 32) {
128 epc = epc + 4 + (insn.i_format.simmediate << 2);
137 * These are unconditional and in j_format.
140 arch->gprs[31] = instpc + 8;
145 epc |= (insn.j_format.target << 2);
150 * These are conditional and in i_format.
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
172 case blez_op: /* not really i_format */
174 /* rt field assumed to be zero */
175 if ((long)arch->gprs[insn.i_format.rs] <= 0)
176 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 /* rt field assumed to be zero */
185 if ((long)arch->gprs[insn.i_format.rs] > 0)
186 epc = epc + 4 + (insn.i_format.simmediate << 2);
193 * And now the FPA/cp1 branch instructions.
196 printk("%s: unsupported cop1_op\n", __func__);
203 printk("%s: unaligned epc\n", __func__);
207 printk("%s: DSP branch but not DSP ASE\n", __func__);
211 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
213 unsigned long branch_pc;
214 enum emulation_result er = EMULATE_DONE;
216 if (cause & CAUSEF_BD) {
217 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
218 if (branch_pc == KVM_INVALID_INST) {
221 vcpu->arch.pc = branch_pc;
222 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
234 * @vcpu: Virtual CPU.
236 * Returns: 1 if the CP0_Count timer is disabled by the guest CP0_Cause.DC
238 * 0 otherwise (in which case CP0_Count timer is running).
240 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
242 struct mips_coproc *cop0 = vcpu->arch.cop0;
243 return kvm_read_c0_guest_cause(cop0) & CAUSEF_DC;
247 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
249 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
251 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
253 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
258 now_ns = ktime_to_ns(now);
259 delta = now_ns + vcpu->arch.count_dyn_bias;
261 if (delta >= vcpu->arch.count_period) {
262 /* If delta is out of safe range the bias needs adjusting */
263 periods = div64_s64(now_ns, vcpu->arch.count_period);
264 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
265 /* Recalculate delta with new bias */
266 delta = now_ns + vcpu->arch.count_dyn_bias;
270 * We've ensured that:
271 * delta < count_period
273 * Therefore the intermediate delta*count_hz will never overflow since
274 * at the boundary condition:
275 * delta = count_period
276 * delta = NSEC_PER_SEC * 2^32 / count_hz
277 * delta * count_hz = NSEC_PER_SEC * 2^32
279 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
283 * kvm_mips_read_count_running() - Read the current count value as if running.
284 * @vcpu: Virtual CPU.
285 * @now: Kernel time to read CP0_Count at.
287 * Returns the current guest CP0_Count register at time @now and handles if the
288 * timer interrupt is pending and hasn't been handled yet.
290 * Returns: The current value of the guest CP0_Count register.
292 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
297 /* Is the hrtimer pending? */
298 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
299 if (ktime_compare(now, expires) >= 0) {
301 * Cancel it while we handle it so there's no chance of
302 * interference with the timeout handler.
304 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
306 /* Nothing should be waiting on the timeout */
307 kvm_mips_callbacks->queue_timer_int(vcpu);
310 * Restart the timer if it was running based on the expiry time
311 * we read, so that we don't push it back 2 periods.
314 expires = ktime_add_ns(expires,
315 vcpu->arch.count_period);
316 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
321 /* Return the biased and scaled guest CP0_Count */
322 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
326 * kvm_mips_read_count() - Read the current count value.
327 * @vcpu: Virtual CPU.
329 * Read the current guest CP0_Count value, taking into account whether the timer
332 * Returns: The current guest CP0_Count value.
334 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
336 struct mips_coproc *cop0 = vcpu->arch.cop0;
338 /* If count disabled just read static copy of count */
339 if (kvm_mips_count_disabled(vcpu))
340 return kvm_read_c0_guest_count(cop0);
342 return kvm_mips_read_count_running(vcpu, ktime_get());
346 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
347 * @vcpu: Virtual CPU.
348 * @count: Output pointer for CP0_Count value at point of freeze.
350 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
351 * at the point it was frozen. It is guaranteed that any pending interrupts at
352 * the point it was frozen are handled, and none after that point.
354 * This is useful where the time/CP0_Count is needed in the calculation of the
357 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
359 * Returns: The ktime at the point of freeze.
361 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
366 /* stop hrtimer before finding time */
367 hrtimer_cancel(&vcpu->arch.comparecount_timer);
370 /* find count at this point and handle pending hrtimer */
371 *count = kvm_mips_read_count_running(vcpu, now);
378 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
379 * @vcpu: Virtual CPU.
380 * @now: ktime at point of resume.
381 * @count: CP0_Count at point of resume.
383 * Resumes the timer and updates the timer expiry based on @now and @count.
384 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
385 * parameters need to be changed.
387 * It is guaranteed that a timer interrupt immediately after resume will be
388 * handled, but not if CP_Compare is exactly at @count. That case is already
389 * handled by kvm_mips_freeze_timer().
391 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
393 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
394 ktime_t now, uint32_t count)
396 struct mips_coproc *cop0 = vcpu->arch.cop0;
401 /* Calculate timeout (wrap 0 to 2^32) */
402 compare = kvm_read_c0_guest_compare(cop0);
403 delta = (u64)(uint32_t)(compare - count - 1) + 1;
404 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
405 expire = ktime_add_ns(now, delta);
407 /* Update hrtimer to use new timeout */
408 hrtimer_cancel(&vcpu->arch.comparecount_timer);
409 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
413 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
414 * @vcpu: Virtual CPU.
416 * Recalculates and updates the expiry time of the hrtimer. This can be used
417 * after timer parameters have been altered which do not depend on the time that
418 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
419 * kvm_mips_resume_hrtimer() are used directly).
421 * It is guaranteed that no timer interrupts will be lost in the process.
423 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
425 static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
431 * freeze_hrtimer takes care of a timer interrupts <= count, and
432 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
434 now = kvm_mips_freeze_hrtimer(vcpu, &count);
435 kvm_mips_resume_hrtimer(vcpu, now, count);
439 * kvm_mips_write_count() - Modify the count and update timer.
440 * @vcpu: Virtual CPU.
441 * @count: Guest CP0_Count value to set.
443 * Sets the CP0_Count value and updates the timer accordingly.
445 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
447 struct mips_coproc *cop0 = vcpu->arch.cop0;
452 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
454 if (kvm_mips_count_disabled(vcpu))
455 /* The timer's disabled, adjust the static count */
456 kvm_write_c0_guest_count(cop0, count);
459 kvm_mips_resume_hrtimer(vcpu, now, count);
463 * kvm_mips_init_count() - Initialise timer.
464 * @vcpu: Virtual CPU.
466 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
467 * it going if it's enabled.
469 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
472 vcpu->arch.count_hz = 100*1000*1000;
473 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
474 vcpu->arch.count_hz);
475 vcpu->arch.count_dyn_bias = 0;
478 kvm_mips_write_count(vcpu, 0);
482 * kvm_mips_write_compare() - Modify compare and update timer.
483 * @vcpu: Virtual CPU.
484 * @compare: New CP0_Compare value.
486 * Update CP0_Compare to a new value and update the timeout.
488 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
490 struct mips_coproc *cop0 = vcpu->arch.cop0;
492 /* if unchanged, must just be an ack */
493 if (kvm_read_c0_guest_compare(cop0) == compare)
497 kvm_write_c0_guest_compare(cop0, compare);
499 /* Update timeout if count enabled */
500 if (!kvm_mips_count_disabled(vcpu))
501 kvm_mips_update_hrtimer(vcpu);
505 * kvm_mips_count_disable() - Disable count.
506 * @vcpu: Virtual CPU.
508 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
509 * time will be handled but not after.
511 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC has been
512 * set (count disabled).
514 * Returns: The time that the timer was stopped.
516 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
518 struct mips_coproc *cop0 = vcpu->arch.cop0;
523 hrtimer_cancel(&vcpu->arch.comparecount_timer);
525 /* Set the static count from the dynamic count, handling pending TI */
527 count = kvm_mips_read_count_running(vcpu, now);
528 kvm_write_c0_guest_count(cop0, count);
534 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
535 * @vcpu: Virtual CPU.
537 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
538 * before the final stop time will be handled, but not after.
540 * Assumes CP0_Cause.DC is clear (count enabled).
542 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
544 struct mips_coproc *cop0 = vcpu->arch.cop0;
546 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
547 kvm_mips_count_disable(vcpu);
551 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
552 * @vcpu: Virtual CPU.
554 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
555 * the start time will be handled, potentially before even returning, so the
556 * caller should be careful with ordering of CP0_Cause modifications so as not
559 * Assumes CP0_Cause.DC is set (count disabled).
561 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
563 struct mips_coproc *cop0 = vcpu->arch.cop0;
566 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
569 * Set the dynamic count to match the static count.
570 * This starts the hrtimer.
572 count = kvm_read_c0_guest_count(cop0);
573 kvm_mips_write_count(vcpu, count);
577 * kvm_mips_count_timeout() - Push timer forward on timeout.
578 * @vcpu: Virtual CPU.
580 * Handle an hrtimer event by push the hrtimer forward a period.
582 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
584 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
586 /* Add the Count period to the current expiry time */
587 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
588 vcpu->arch.count_period);
589 return HRTIMER_RESTART;
592 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
594 struct mips_coproc *cop0 = vcpu->arch.cop0;
595 enum emulation_result er = EMULATE_DONE;
597 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
598 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
599 kvm_read_c0_guest_epc(cop0));
600 kvm_clear_c0_guest_status(cop0, ST0_EXL);
601 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
603 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
604 kvm_clear_c0_guest_status(cop0, ST0_ERL);
605 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
607 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
615 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
617 enum emulation_result er = EMULATE_DONE;
619 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
620 vcpu->arch.pending_exceptions);
622 ++vcpu->stat.wait_exits;
623 trace_kvm_exit(vcpu, WAIT_EXITS);
624 if (!vcpu->arch.pending_exceptions) {
626 kvm_vcpu_block(vcpu);
628 /* We we are runnable, then definitely go off to user space to check if any
629 * I/O interrupts are pending.
631 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
632 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
633 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
640 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
641 * this, if things ever change
643 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
645 struct mips_coproc *cop0 = vcpu->arch.cop0;
646 enum emulation_result er = EMULATE_FAIL;
647 uint32_t pc = vcpu->arch.pc;
649 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
653 /* Write Guest TLB Entry @ Index */
654 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
656 struct mips_coproc *cop0 = vcpu->arch.cop0;
657 int index = kvm_read_c0_guest_index(cop0);
658 enum emulation_result er = EMULATE_DONE;
659 struct kvm_mips_tlb *tlb = NULL;
660 uint32_t pc = vcpu->arch.pc;
662 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
663 printk("%s: illegal index: %d\n", __func__, index);
665 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
666 pc, index, kvm_read_c0_guest_entryhi(cop0),
667 kvm_read_c0_guest_entrylo0(cop0),
668 kvm_read_c0_guest_entrylo1(cop0),
669 kvm_read_c0_guest_pagemask(cop0));
670 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
673 tlb = &vcpu->arch.guest_tlb[index];
675 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
676 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
679 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
680 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
681 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
682 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
685 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
686 pc, index, kvm_read_c0_guest_entryhi(cop0),
687 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
688 kvm_read_c0_guest_pagemask(cop0));
693 /* Write Guest TLB Entry @ Random Index */
694 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
696 struct mips_coproc *cop0 = vcpu->arch.cop0;
697 enum emulation_result er = EMULATE_DONE;
698 struct kvm_mips_tlb *tlb = NULL;
699 uint32_t pc = vcpu->arch.pc;
703 get_random_bytes(&index, sizeof(index));
704 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
706 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
709 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
710 printk("%s: illegal index: %d\n", __func__, index);
714 tlb = &vcpu->arch.guest_tlb[index];
717 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
718 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
721 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
722 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
723 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
724 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
727 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
728 pc, index, kvm_read_c0_guest_entryhi(cop0),
729 kvm_read_c0_guest_entrylo0(cop0),
730 kvm_read_c0_guest_entrylo1(cop0));
735 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
737 struct mips_coproc *cop0 = vcpu->arch.cop0;
738 long entryhi = kvm_read_c0_guest_entryhi(cop0);
739 enum emulation_result er = EMULATE_DONE;
740 uint32_t pc = vcpu->arch.pc;
743 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
745 kvm_write_c0_guest_index(cop0, index);
747 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
753 enum emulation_result
754 kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
755 struct kvm_run *run, struct kvm_vcpu *vcpu)
757 struct mips_coproc *cop0 = vcpu->arch.cop0;
758 enum emulation_result er = EMULATE_DONE;
759 int32_t rt, rd, copz, sel, co_bit, op;
760 uint32_t pc = vcpu->arch.pc;
761 unsigned long curr_pc;
764 * Update PC and hold onto current PC in case there is
765 * an error and we want to rollback the PC
767 curr_pc = vcpu->arch.pc;
768 er = update_pc(vcpu, cause);
769 if (er == EMULATE_FAIL) {
773 copz = (inst >> 21) & 0x1f;
774 rt = (inst >> 16) & 0x1f;
775 rd = (inst >> 11) & 0x1f;
777 co_bit = (inst >> 25) & 1;
783 case tlbr_op: /* Read indexed TLB entry */
784 er = kvm_mips_emul_tlbr(vcpu);
786 case tlbwi_op: /* Write indexed */
787 er = kvm_mips_emul_tlbwi(vcpu);
789 case tlbwr_op: /* Write random */
790 er = kvm_mips_emul_tlbwr(vcpu);
792 case tlbp_op: /* TLB Probe */
793 er = kvm_mips_emul_tlbp(vcpu);
796 printk("!!!COP0_RFE!!!\n");
799 er = kvm_mips_emul_eret(vcpu);
803 er = kvm_mips_emul_wait(vcpu);
809 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
810 cop0->stat[rd][sel]++;
813 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
814 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
815 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
816 vcpu->arch.gprs[rt] = 0x0;
817 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
818 kvm_mips_trans_mfc0(inst, opc, vcpu);
822 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
824 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
825 kvm_mips_trans_mfc0(inst, opc, vcpu);
830 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
831 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
836 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
840 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
841 cop0->stat[rd][sel]++;
843 if ((rd == MIPS_CP0_TLB_INDEX)
844 && (vcpu->arch.gprs[rt] >=
845 KVM_MIPS_GUEST_TLB_SIZE)) {
846 printk("Invalid TLB Index: %ld",
847 vcpu->arch.gprs[rt]);
851 #define C0_EBASE_CORE_MASK 0xff
852 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
853 /* Preserve CORE number */
854 kvm_change_c0_guest_ebase(cop0,
855 ~(C0_EBASE_CORE_MASK),
856 vcpu->arch.gprs[rt]);
857 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
858 kvm_read_c0_guest_ebase(cop0));
859 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
861 vcpu->arch.gprs[rt] & ASID_MASK;
862 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
864 ((kvm_read_c0_guest_entryhi(cop0) &
865 ASID_MASK) != nasid)) {
868 ("MTCz, change ASID from %#lx to %#lx\n",
869 kvm_read_c0_guest_entryhi(cop0) &
871 vcpu->arch.gprs[rt] & ASID_MASK);
873 /* Blow away the shadow host TLBs */
874 kvm_mips_flush_host_tlb(1);
876 kvm_write_c0_guest_entryhi(cop0,
877 vcpu->arch.gprs[rt]);
879 /* Are we writing to COUNT */
880 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
881 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
883 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
884 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
885 pc, kvm_read_c0_guest_compare(cop0),
886 vcpu->arch.gprs[rt]);
888 /* If we are writing to COMPARE */
889 /* Clear pending timer interrupt, if any */
890 kvm_mips_callbacks->dequeue_timer_int(vcpu);
891 kvm_mips_write_compare(vcpu,
892 vcpu->arch.gprs[rt]);
893 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
894 kvm_write_c0_guest_status(cop0,
895 vcpu->arch.gprs[rt]);
896 /* Make sure that CU1 and NMI bits are never set */
897 kvm_clear_c0_guest_status(cop0,
898 (ST0_CU1 | ST0_NMI));
900 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
901 kvm_mips_trans_mtc0(inst, opc, vcpu);
903 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
904 uint32_t old_cause, new_cause;
905 old_cause = kvm_read_c0_guest_cause(cop0);
906 new_cause = vcpu->arch.gprs[rt];
907 /* Update R/W bits */
908 kvm_change_c0_guest_cause(cop0, 0x08800300,
910 /* DC bit enabling/disabling timer? */
911 if ((old_cause ^ new_cause) & CAUSEF_DC) {
912 if (new_cause & CAUSEF_DC)
913 kvm_mips_count_disable_cause(vcpu);
915 kvm_mips_count_enable_cause(vcpu);
918 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
919 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
920 kvm_mips_trans_mtc0(inst, opc, vcpu);
924 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
925 rd, sel, cop0->reg[rd][sel]);
930 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
931 vcpu->arch.pc, rt, rd, sel);
936 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
937 cop0->stat[MIPS_CP0_STATUS][0]++;
940 vcpu->arch.gprs[rt] =
941 kvm_read_c0_guest_status(cop0);
945 kvm_debug("[%#lx] mfmcz_op: EI\n",
947 kvm_set_c0_guest_status(cop0, ST0_IE);
949 kvm_debug("[%#lx] mfmcz_op: DI\n",
951 kvm_clear_c0_guest_status(cop0, ST0_IE);
959 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
961 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
962 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
967 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
968 vcpu->arch.gprs[rt]);
969 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
974 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
975 vcpu->arch.pc, copz);
983 * Rollback PC only if emulation was unsuccessful
985 if (er == EMULATE_FAIL) {
986 vcpu->arch.pc = curr_pc;
991 * This is for special instructions whose emulation
992 * updates the PC, so do not overwrite the PC under
999 enum emulation_result
1000 kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1001 struct kvm_run *run, struct kvm_vcpu *vcpu)
1003 enum emulation_result er = EMULATE_DO_MMIO;
1004 int32_t op, base, rt, offset;
1006 void *data = run->mmio.data;
1007 unsigned long curr_pc;
1010 * Update PC and hold onto current PC in case there is
1011 * an error and we want to rollback the PC
1013 curr_pc = vcpu->arch.pc;
1014 er = update_pc(vcpu, cause);
1015 if (er == EMULATE_FAIL)
1018 rt = (inst >> 16) & 0x1f;
1019 base = (inst >> 21) & 0x1f;
1020 offset = inst & 0xffff;
1021 op = (inst >> 26) & 0x3f;
1026 if (bytes > sizeof(run->mmio.data)) {
1027 kvm_err("%s: bad MMIO length: %d\n", __func__,
1030 run->mmio.phys_addr =
1031 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1033 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1037 run->mmio.len = bytes;
1038 run->mmio.is_write = 1;
1039 vcpu->mmio_needed = 1;
1040 vcpu->mmio_is_write = 1;
1041 *(u8 *) data = vcpu->arch.gprs[rt];
1042 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1043 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1050 if (bytes > sizeof(run->mmio.data)) {
1051 kvm_err("%s: bad MMIO length: %d\n", __func__,
1054 run->mmio.phys_addr =
1055 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1057 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1062 run->mmio.len = bytes;
1063 run->mmio.is_write = 1;
1064 vcpu->mmio_needed = 1;
1065 vcpu->mmio_is_write = 1;
1066 *(uint32_t *) data = vcpu->arch.gprs[rt];
1068 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1069 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1070 vcpu->arch.gprs[rt], *(uint32_t *) data);
1075 if (bytes > sizeof(run->mmio.data)) {
1076 kvm_err("%s: bad MMIO length: %d\n", __func__,
1079 run->mmio.phys_addr =
1080 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1082 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1087 run->mmio.len = bytes;
1088 run->mmio.is_write = 1;
1089 vcpu->mmio_needed = 1;
1090 vcpu->mmio_is_write = 1;
1091 *(uint16_t *) data = vcpu->arch.gprs[rt];
1093 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1094 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1095 vcpu->arch.gprs[rt], *(uint32_t *) data);
1099 printk("Store not yet supported");
1105 * Rollback PC if emulation was unsuccessful
1107 if (er == EMULATE_FAIL) {
1108 vcpu->arch.pc = curr_pc;
1114 enum emulation_result
1115 kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1116 struct kvm_run *run, struct kvm_vcpu *vcpu)
1118 enum emulation_result er = EMULATE_DO_MMIO;
1119 int32_t op, base, rt, offset;
1122 rt = (inst >> 16) & 0x1f;
1123 base = (inst >> 21) & 0x1f;
1124 offset = inst & 0xffff;
1125 op = (inst >> 26) & 0x3f;
1127 vcpu->arch.pending_load_cause = cause;
1128 vcpu->arch.io_gpr = rt;
1133 if (bytes > sizeof(run->mmio.data)) {
1134 kvm_err("%s: bad MMIO length: %d\n", __func__,
1139 run->mmio.phys_addr =
1140 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1142 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1147 run->mmio.len = bytes;
1148 run->mmio.is_write = 0;
1149 vcpu->mmio_needed = 1;
1150 vcpu->mmio_is_write = 0;
1156 if (bytes > sizeof(run->mmio.data)) {
1157 kvm_err("%s: bad MMIO length: %d\n", __func__,
1162 run->mmio.phys_addr =
1163 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1165 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1170 run->mmio.len = bytes;
1171 run->mmio.is_write = 0;
1172 vcpu->mmio_needed = 1;
1173 vcpu->mmio_is_write = 0;
1176 vcpu->mmio_needed = 2;
1178 vcpu->mmio_needed = 1;
1185 if (bytes > sizeof(run->mmio.data)) {
1186 kvm_err("%s: bad MMIO length: %d\n", __func__,
1191 run->mmio.phys_addr =
1192 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1194 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1199 run->mmio.len = bytes;
1200 run->mmio.is_write = 0;
1201 vcpu->mmio_is_write = 0;
1204 vcpu->mmio_needed = 2;
1206 vcpu->mmio_needed = 1;
1211 printk("Load not yet supported");
1219 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1221 unsigned long offset = (va & ~PAGE_MASK);
1222 struct kvm *kvm = vcpu->kvm;
1227 gfn = va >> PAGE_SHIFT;
1229 if (gfn >= kvm->arch.guest_pmap_npages) {
1230 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
1231 kvm_mips_dump_host_tlbs();
1232 kvm_arch_vcpu_dump_regs(vcpu);
1235 pfn = kvm->arch.guest_pmap[gfn];
1236 pa = (pfn << PAGE_SHIFT) | offset;
1238 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
1240 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1244 #define MIPS_CACHE_OP_INDEX_INV 0x0
1245 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1246 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1247 #define MIPS_CACHE_OP_IMP 0x3
1248 #define MIPS_CACHE_OP_HIT_INV 0x4
1249 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
1250 #define MIPS_CACHE_OP_HIT_HB 0x6
1251 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
1253 #define MIPS_CACHE_ICACHE 0x0
1254 #define MIPS_CACHE_DCACHE 0x1
1255 #define MIPS_CACHE_SEC 0x3
1257 enum emulation_result
1258 kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1259 struct kvm_run *run, struct kvm_vcpu *vcpu)
1261 struct mips_coproc *cop0 = vcpu->arch.cop0;
1262 extern void (*r4k_blast_dcache) (void);
1263 extern void (*r4k_blast_icache) (void);
1264 enum emulation_result er = EMULATE_DONE;
1265 int32_t offset, cache, op_inst, op, base;
1266 struct kvm_vcpu_arch *arch = &vcpu->arch;
1268 unsigned long curr_pc;
1271 * Update PC and hold onto current PC in case there is
1272 * an error and we want to rollback the PC
1274 curr_pc = vcpu->arch.pc;
1275 er = update_pc(vcpu, cause);
1276 if (er == EMULATE_FAIL)
1279 base = (inst >> 21) & 0x1f;
1280 op_inst = (inst >> 16) & 0x1f;
1281 offset = inst & 0xffff;
1282 cache = (inst >> 16) & 0x3;
1283 op = (inst >> 18) & 0x7;
1285 va = arch->gprs[base] + offset;
1287 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1288 cache, op, base, arch->gprs[base], offset);
1290 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
1291 * the caches entirely by stepping through all the ways/indexes
1293 if (op == MIPS_CACHE_OP_INDEX_INV) {
1295 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1296 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1297 arch->gprs[base], offset);
1299 if (cache == MIPS_CACHE_DCACHE)
1301 else if (cache == MIPS_CACHE_ICACHE)
1304 printk("%s: unsupported CACHE INDEX operation\n",
1306 return EMULATE_FAIL;
1309 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1310 kvm_mips_trans_cache_index(inst, opc, vcpu);
1316 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1318 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
1319 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1321 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1322 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1325 /* If an entry already exists then skip */
1326 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
1330 /* If address not in the guest TLB, then give the guest a fault, the
1331 * resulting handler will do the right thing
1333 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1334 (kvm_read_c0_guest_entryhi
1335 (cop0) & ASID_MASK));
1338 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1339 vcpu->arch.host_cp0_badvaddr = va;
1340 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1343 goto dont_update_pc;
1345 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1346 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1347 if (!TLB_IS_VALID(*tlb, va)) {
1348 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1351 goto dont_update_pc;
1353 /* We fault an entry from the guest tlb to the shadow host TLB */
1354 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1361 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1362 cache, op, base, arch->gprs[base], offset);
1365 goto dont_update_pc;
1370 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1371 if (cache == MIPS_CACHE_DCACHE
1372 && (op == MIPS_CACHE_OP_FILL_WB_INV
1373 || op == MIPS_CACHE_OP_HIT_INV)) {
1374 flush_dcache_line(va);
1376 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1377 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1378 kvm_mips_trans_cache_va(inst, opc, vcpu);
1380 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1381 flush_dcache_line(va);
1382 flush_icache_line(va);
1384 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1385 /* Replace the CACHE instruction, with a SYNCI */
1386 kvm_mips_trans_cache_va(inst, opc, vcpu);
1390 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1391 cache, op, base, arch->gprs[base], offset);
1394 goto dont_update_pc;
1403 vcpu->arch.pc = curr_pc;
1408 enum emulation_result
1409 kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1410 struct kvm_run *run, struct kvm_vcpu *vcpu)
1412 enum emulation_result er = EMULATE_DONE;
1416 * Fetch the instruction.
1418 if (cause & CAUSEF_BD) {
1422 inst = kvm_get_inst(opc, vcpu);
1424 switch (((union mips_instruction)inst).r_format.opcode) {
1426 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1431 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1438 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1442 ++vcpu->stat.cache_exits;
1443 trace_kvm_exit(vcpu, CACHE_EXITS);
1444 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1448 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1450 kvm_arch_vcpu_dump_regs(vcpu);
1458 enum emulation_result
1459 kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1460 struct kvm_run *run, struct kvm_vcpu *vcpu)
1462 struct mips_coproc *cop0 = vcpu->arch.cop0;
1463 struct kvm_vcpu_arch *arch = &vcpu->arch;
1464 enum emulation_result er = EMULATE_DONE;
1466 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1468 kvm_write_c0_guest_epc(cop0, arch->pc);
1469 kvm_set_c0_guest_status(cop0, ST0_EXL);
1471 if (cause & CAUSEF_BD)
1472 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1474 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1476 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1478 kvm_change_c0_guest_cause(cop0, (0xff),
1479 (T_SYSCALL << CAUSEB_EXCCODE));
1481 /* Set PC to the exception entry point */
1482 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1485 printk("Trying to deliver SYSCALL when EXL is already set\n");
1492 enum emulation_result
1493 kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1494 struct kvm_run *run, struct kvm_vcpu *vcpu)
1496 struct mips_coproc *cop0 = vcpu->arch.cop0;
1497 struct kvm_vcpu_arch *arch = &vcpu->arch;
1498 enum emulation_result er = EMULATE_DONE;
1499 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1500 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1502 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1504 kvm_write_c0_guest_epc(cop0, arch->pc);
1505 kvm_set_c0_guest_status(cop0, ST0_EXL);
1507 if (cause & CAUSEF_BD)
1508 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1510 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1512 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1515 /* set pc to the exception entry point */
1516 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1519 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1522 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1525 kvm_change_c0_guest_cause(cop0, (0xff),
1526 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1528 /* setup badvaddr, context and entryhi registers for the guest */
1529 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1530 /* XXXKYMA: is the context register used by linux??? */
1531 kvm_write_c0_guest_entryhi(cop0, entryhi);
1532 /* Blow away the shadow host TLBs */
1533 kvm_mips_flush_host_tlb(1);
1538 enum emulation_result
1539 kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1540 struct kvm_run *run, struct kvm_vcpu *vcpu)
1542 struct mips_coproc *cop0 = vcpu->arch.cop0;
1543 struct kvm_vcpu_arch *arch = &vcpu->arch;
1544 enum emulation_result er = EMULATE_DONE;
1545 unsigned long entryhi =
1546 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1547 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1549 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1551 kvm_write_c0_guest_epc(cop0, arch->pc);
1552 kvm_set_c0_guest_status(cop0, ST0_EXL);
1554 if (cause & CAUSEF_BD)
1555 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1557 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1559 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1562 /* set pc to the exception entry point */
1563 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1566 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1568 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1571 kvm_change_c0_guest_cause(cop0, (0xff),
1572 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1574 /* setup badvaddr, context and entryhi registers for the guest */
1575 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1576 /* XXXKYMA: is the context register used by linux??? */
1577 kvm_write_c0_guest_entryhi(cop0, entryhi);
1578 /* Blow away the shadow host TLBs */
1579 kvm_mips_flush_host_tlb(1);
1584 enum emulation_result
1585 kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1586 struct kvm_run *run, struct kvm_vcpu *vcpu)
1588 struct mips_coproc *cop0 = vcpu->arch.cop0;
1589 struct kvm_vcpu_arch *arch = &vcpu->arch;
1590 enum emulation_result er = EMULATE_DONE;
1591 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1592 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1594 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1596 kvm_write_c0_guest_epc(cop0, arch->pc);
1597 kvm_set_c0_guest_status(cop0, ST0_EXL);
1599 if (cause & CAUSEF_BD)
1600 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1602 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1604 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1607 /* Set PC to the exception entry point */
1608 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1610 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1612 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1615 kvm_change_c0_guest_cause(cop0, (0xff),
1616 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1618 /* setup badvaddr, context and entryhi registers for the guest */
1619 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1620 /* XXXKYMA: is the context register used by linux??? */
1621 kvm_write_c0_guest_entryhi(cop0, entryhi);
1622 /* Blow away the shadow host TLBs */
1623 kvm_mips_flush_host_tlb(1);
1628 enum emulation_result
1629 kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1630 struct kvm_run *run, struct kvm_vcpu *vcpu)
1632 struct mips_coproc *cop0 = vcpu->arch.cop0;
1633 struct kvm_vcpu_arch *arch = &vcpu->arch;
1634 enum emulation_result er = EMULATE_DONE;
1635 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1636 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1638 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1640 kvm_write_c0_guest_epc(cop0, arch->pc);
1641 kvm_set_c0_guest_status(cop0, ST0_EXL);
1643 if (cause & CAUSEF_BD)
1644 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1646 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1648 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1651 /* Set PC to the exception entry point */
1652 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1654 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1656 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1659 kvm_change_c0_guest_cause(cop0, (0xff),
1660 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1662 /* setup badvaddr, context and entryhi registers for the guest */
1663 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1664 /* XXXKYMA: is the context register used by linux??? */
1665 kvm_write_c0_guest_entryhi(cop0, entryhi);
1666 /* Blow away the shadow host TLBs */
1667 kvm_mips_flush_host_tlb(1);
1672 /* TLBMOD: store into address matching TLB with Dirty bit off */
1673 enum emulation_result
1674 kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1675 struct kvm_run *run, struct kvm_vcpu *vcpu)
1677 enum emulation_result er = EMULATE_DONE;
1681 * If address not in the guest TLB, then we are in trouble
1683 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1685 /* XXXKYMA Invalidate and retry */
1686 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1687 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1689 kvm_mips_dump_guest_tlbs(vcpu);
1690 kvm_mips_dump_host_tlbs();
1691 return EMULATE_FAIL;
1695 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1699 enum emulation_result
1700 kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1701 struct kvm_run *run, struct kvm_vcpu *vcpu)
1703 struct mips_coproc *cop0 = vcpu->arch.cop0;
1704 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1705 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1706 struct kvm_vcpu_arch *arch = &vcpu->arch;
1707 enum emulation_result er = EMULATE_DONE;
1709 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1711 kvm_write_c0_guest_epc(cop0, arch->pc);
1712 kvm_set_c0_guest_status(cop0, ST0_EXL);
1714 if (cause & CAUSEF_BD)
1715 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1717 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1719 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1722 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1724 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1726 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1729 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1731 /* setup badvaddr, context and entryhi registers for the guest */
1732 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1733 /* XXXKYMA: is the context register used by linux??? */
1734 kvm_write_c0_guest_entryhi(cop0, entryhi);
1735 /* Blow away the shadow host TLBs */
1736 kvm_mips_flush_host_tlb(1);
1741 enum emulation_result
1742 kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1743 struct kvm_run *run, struct kvm_vcpu *vcpu)
1745 struct mips_coproc *cop0 = vcpu->arch.cop0;
1746 struct kvm_vcpu_arch *arch = &vcpu->arch;
1747 enum emulation_result er = EMULATE_DONE;
1749 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1751 kvm_write_c0_guest_epc(cop0, arch->pc);
1752 kvm_set_c0_guest_status(cop0, ST0_EXL);
1754 if (cause & CAUSEF_BD)
1755 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1757 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1761 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1763 kvm_change_c0_guest_cause(cop0, (0xff),
1764 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1765 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1770 enum emulation_result
1771 kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1772 struct kvm_run *run, struct kvm_vcpu *vcpu)
1774 struct mips_coproc *cop0 = vcpu->arch.cop0;
1775 struct kvm_vcpu_arch *arch = &vcpu->arch;
1776 enum emulation_result er = EMULATE_DONE;
1778 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1780 kvm_write_c0_guest_epc(cop0, arch->pc);
1781 kvm_set_c0_guest_status(cop0, ST0_EXL);
1783 if (cause & CAUSEF_BD)
1784 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1786 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1788 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1790 kvm_change_c0_guest_cause(cop0, (0xff),
1791 (T_RES_INST << CAUSEB_EXCCODE));
1793 /* Set PC to the exception entry point */
1794 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1797 kvm_err("Trying to deliver RI when EXL is already set\n");
1804 enum emulation_result
1805 kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1806 struct kvm_run *run, struct kvm_vcpu *vcpu)
1808 struct mips_coproc *cop0 = vcpu->arch.cop0;
1809 struct kvm_vcpu_arch *arch = &vcpu->arch;
1810 enum emulation_result er = EMULATE_DONE;
1812 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1814 kvm_write_c0_guest_epc(cop0, arch->pc);
1815 kvm_set_c0_guest_status(cop0, ST0_EXL);
1817 if (cause & CAUSEF_BD)
1818 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1820 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1822 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1824 kvm_change_c0_guest_cause(cop0, (0xff),
1825 (T_BREAK << CAUSEB_EXCCODE));
1827 /* Set PC to the exception entry point */
1828 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1831 printk("Trying to deliver BP when EXL is already set\n");
1839 * ll/sc, rdhwr, sync emulation
1842 #define OPCODE 0xfc000000
1843 #define BASE 0x03e00000
1844 #define RT 0x001f0000
1845 #define OFFSET 0x0000ffff
1846 #define LL 0xc0000000
1847 #define SC 0xe0000000
1848 #define SPEC0 0x00000000
1849 #define SPEC3 0x7c000000
1850 #define RD 0x0000f800
1851 #define FUNC 0x0000003f
1852 #define SYNC 0x0000000f
1853 #define RDHWR 0x0000003b
1855 enum emulation_result
1856 kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1857 struct kvm_run *run, struct kvm_vcpu *vcpu)
1859 struct mips_coproc *cop0 = vcpu->arch.cop0;
1860 struct kvm_vcpu_arch *arch = &vcpu->arch;
1861 enum emulation_result er = EMULATE_DONE;
1862 unsigned long curr_pc;
1866 * Update PC and hold onto current PC in case there is
1867 * an error and we want to rollback the PC
1869 curr_pc = vcpu->arch.pc;
1870 er = update_pc(vcpu, cause);
1871 if (er == EMULATE_FAIL)
1875 * Fetch the instruction.
1877 if (cause & CAUSEF_BD)
1880 inst = kvm_get_inst(opc, vcpu);
1882 if (inst == KVM_INVALID_INST) {
1883 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1884 return EMULATE_FAIL;
1887 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1888 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1889 int rd = (inst & RD) >> 11;
1890 int rt = (inst & RT) >> 16;
1891 /* If usermode, check RDHWR rd is allowed by guest HWREna */
1892 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
1893 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
1898 case 0: /* CPU number */
1901 case 1: /* SYNCI length */
1902 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1903 current_cpu_data.icache.linesz);
1905 case 2: /* Read count register */
1906 arch->gprs[rt] = kvm_mips_read_count(vcpu);
1908 case 3: /* Count register resolution */
1909 switch (current_cpu_data.cputype) {
1919 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1923 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
1927 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
1931 return EMULATE_DONE;
1935 * Rollback PC (if in branch delay slot then the PC already points to
1936 * branch target), and pass the RI exception to the guest OS.
1938 vcpu->arch.pc = curr_pc;
1939 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1942 enum emulation_result
1943 kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1945 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1946 enum emulation_result er = EMULATE_DONE;
1947 unsigned long curr_pc;
1949 if (run->mmio.len > sizeof(*gpr)) {
1950 printk("Bad MMIO length: %d", run->mmio.len);
1956 * Update PC and hold onto current PC in case there is
1957 * an error and we want to rollback the PC
1959 curr_pc = vcpu->arch.pc;
1960 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1961 if (er == EMULATE_FAIL)
1964 switch (run->mmio.len) {
1966 *gpr = *(int32_t *) run->mmio.data;
1970 if (vcpu->mmio_needed == 2)
1971 *gpr = *(int16_t *) run->mmio.data;
1973 *gpr = *(int16_t *) run->mmio.data;
1977 if (vcpu->mmio_needed == 2)
1978 *gpr = *(int8_t *) run->mmio.data;
1980 *gpr = *(u8 *) run->mmio.data;
1984 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1986 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1987 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1994 static enum emulation_result
1995 kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1996 struct kvm_run *run, struct kvm_vcpu *vcpu)
1998 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1999 struct mips_coproc *cop0 = vcpu->arch.cop0;
2000 struct kvm_vcpu_arch *arch = &vcpu->arch;
2001 enum emulation_result er = EMULATE_DONE;
2003 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2005 kvm_write_c0_guest_epc(cop0, arch->pc);
2006 kvm_set_c0_guest_status(cop0, ST0_EXL);
2008 if (cause & CAUSEF_BD)
2009 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2011 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2013 kvm_change_c0_guest_cause(cop0, (0xff),
2014 (exccode << CAUSEB_EXCCODE));
2016 /* Set PC to the exception entry point */
2017 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2018 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2020 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2021 exccode, kvm_read_c0_guest_epc(cop0),
2022 kvm_read_c0_guest_badvaddr(cop0));
2024 printk("Trying to deliver EXC when EXL is already set\n");
2031 enum emulation_result
2032 kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2033 struct kvm_run *run, struct kvm_vcpu *vcpu)
2035 enum emulation_result er = EMULATE_DONE;
2036 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2037 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2039 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2049 case T_COP_UNUSABLE:
2050 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2051 er = EMULATE_PRIV_FAIL;
2058 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2059 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2060 printk("%s: LD MISS @ %#lx\n", __func__,
2063 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2064 er = EMULATE_PRIV_FAIL;
2069 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2070 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2071 printk("%s: ST MISS @ %#lx\n", __func__,
2074 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2075 er = EMULATE_PRIV_FAIL;
2080 printk("%s: address error ST @ %#lx\n", __func__,
2082 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2084 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2086 er = EMULATE_PRIV_FAIL;
2089 printk("%s: address error LD @ %#lx\n", __func__,
2091 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2093 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2095 er = EMULATE_PRIV_FAIL;
2098 er = EMULATE_PRIV_FAIL;
2103 if (er == EMULATE_PRIV_FAIL) {
2104 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2109 /* User Address (UA) fault, this could happen if
2110 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2111 * case we pass on the fault to the guest kernel and let it handle it.
2112 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2113 * case we inject the TLB from the Guest TLB into the shadow host TLB
2115 enum emulation_result
2116 kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2117 struct kvm_run *run, struct kvm_vcpu *vcpu)
2119 enum emulation_result er = EMULATE_DONE;
2120 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2121 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2124 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2125 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2127 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
2128 * Check the Guest TLB, if the entry is not there then send the guest an
2129 * exception. The guest exc handler should then inject an entry into the
2132 index = kvm_mips_guest_tlb_lookup(vcpu,
2134 (kvm_read_c0_guest_entryhi
2135 (vcpu->arch.cop0) & ASID_MASK));
2137 if (exccode == T_TLB_LD_MISS) {
2138 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2139 } else if (exccode == T_TLB_ST_MISS) {
2140 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2142 printk("%s: invalid exc code: %d\n", __func__, exccode);
2146 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2148 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
2149 if (!TLB_IS_VALID(*tlb, va)) {
2150 if (exccode == T_TLB_LD_MISS) {
2151 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2153 } else if (exccode == T_TLB_ST_MISS) {
2154 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2157 printk("%s: invalid exc code: %d\n", __func__,
2164 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2165 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2167 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
2168 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,