2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #include "kvm_mips_opcode.h"
33 #include "kvm_mips_int.h"
34 #include "kvm_mips_comm.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
55 * Read the instruction
57 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
59 if (insn.word == KVM_INVALID_INST)
60 return KVM_INVALID_INST;
62 switch (insn.i_format.opcode) {
64 * jr and jalr are in r_format format.
67 switch (insn.r_format.func) {
69 arch->gprs[insn.r_format.rd] = epc + 8;
72 nextpc = arch->gprs[insn.r_format.rs];
78 * This group contains:
79 * bltz_op, bgez_op, bltzl_op, bgezl_op,
80 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
83 switch (insn.i_format.rt) {
86 if ((long)arch->gprs[insn.i_format.rs] < 0)
87 epc = epc + 4 + (insn.i_format.simmediate << 2);
95 if ((long)arch->gprs[insn.i_format.rs] >= 0)
96 epc = epc + 4 + (insn.i_format.simmediate << 2);
104 arch->gprs[31] = epc + 8;
105 if ((long)arch->gprs[insn.i_format.rs] < 0)
106 epc = epc + 4 + (insn.i_format.simmediate << 2);
114 arch->gprs[31] = epc + 8;
115 if ((long)arch->gprs[insn.i_format.rs] >= 0)
116 epc = epc + 4 + (insn.i_format.simmediate << 2);
125 dspcontrol = rddsp(0x01);
127 if (dspcontrol >= 32) {
128 epc = epc + 4 + (insn.i_format.simmediate << 2);
137 * These are unconditional and in j_format.
140 arch->gprs[31] = instpc + 8;
145 epc |= (insn.j_format.target << 2);
150 * These are conditional and in i_format.
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
172 case blez_op: /* not really i_format */
174 /* rt field assumed to be zero */
175 if ((long)arch->gprs[insn.i_format.rs] <= 0)
176 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 /* rt field assumed to be zero */
185 if ((long)arch->gprs[insn.i_format.rs] > 0)
186 epc = epc + 4 + (insn.i_format.simmediate << 2);
193 * And now the FPA/cp1 branch instructions.
196 printk("%s: unsupported cop1_op\n", __func__);
203 printk("%s: unaligned epc\n", __func__);
207 printk("%s: DSP branch but not DSP ASE\n", __func__);
211 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
213 unsigned long branch_pc;
214 enum emulation_result er = EMULATE_DONE;
216 if (cause & CAUSEF_BD) {
217 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
218 if (branch_pc == KVM_INVALID_INST) {
221 vcpu->arch.pc = branch_pc;
222 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
234 * @vcpu: Virtual CPU.
236 * Returns: 1 if the CP0_Count timer is disabled by either the guest
237 * CP0_Cause.DC bit or the count_ctl.DC bit.
238 * 0 otherwise (in which case CP0_Count timer is running).
240 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
242 struct mips_coproc *cop0 = vcpu->arch.cop0;
243 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
244 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
248 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
250 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
252 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
254 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
259 now_ns = ktime_to_ns(now);
260 delta = now_ns + vcpu->arch.count_dyn_bias;
262 if (delta >= vcpu->arch.count_period) {
263 /* If delta is out of safe range the bias needs adjusting */
264 periods = div64_s64(now_ns, vcpu->arch.count_period);
265 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
266 /* Recalculate delta with new bias */
267 delta = now_ns + vcpu->arch.count_dyn_bias;
271 * We've ensured that:
272 * delta < count_period
274 * Therefore the intermediate delta*count_hz will never overflow since
275 * at the boundary condition:
276 * delta = count_period
277 * delta = NSEC_PER_SEC * 2^32 / count_hz
278 * delta * count_hz = NSEC_PER_SEC * 2^32
280 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
284 * kvm_mips_count_time() - Get effective current time.
285 * @vcpu: Virtual CPU.
287 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
288 * except when the master disable bit is set in count_ctl, in which case it is
289 * count_resume, i.e. the time that the count was disabled.
291 * Returns: Effective monotonic ktime for CP0_Count.
293 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
295 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
296 return vcpu->arch.count_resume;
302 * kvm_mips_read_count_running() - Read the current count value as if running.
303 * @vcpu: Virtual CPU.
304 * @now: Kernel time to read CP0_Count at.
306 * Returns the current guest CP0_Count register at time @now and handles if the
307 * timer interrupt is pending and hasn't been handled yet.
309 * Returns: The current value of the guest CP0_Count register.
311 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
316 /* Is the hrtimer pending? */
317 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
318 if (ktime_compare(now, expires) >= 0) {
320 * Cancel it while we handle it so there's no chance of
321 * interference with the timeout handler.
323 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
325 /* Nothing should be waiting on the timeout */
326 kvm_mips_callbacks->queue_timer_int(vcpu);
329 * Restart the timer if it was running based on the expiry time
330 * we read, so that we don't push it back 2 periods.
333 expires = ktime_add_ns(expires,
334 vcpu->arch.count_period);
335 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
340 /* Return the biased and scaled guest CP0_Count */
341 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
345 * kvm_mips_read_count() - Read the current count value.
346 * @vcpu: Virtual CPU.
348 * Read the current guest CP0_Count value, taking into account whether the timer
351 * Returns: The current guest CP0_Count value.
353 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
355 struct mips_coproc *cop0 = vcpu->arch.cop0;
357 /* If count disabled just read static copy of count */
358 if (kvm_mips_count_disabled(vcpu))
359 return kvm_read_c0_guest_count(cop0);
361 return kvm_mips_read_count_running(vcpu, ktime_get());
365 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
366 * @vcpu: Virtual CPU.
367 * @count: Output pointer for CP0_Count value at point of freeze.
369 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
370 * at the point it was frozen. It is guaranteed that any pending interrupts at
371 * the point it was frozen are handled, and none after that point.
373 * This is useful where the time/CP0_Count is needed in the calculation of the
376 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
378 * Returns: The ktime at the point of freeze.
380 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
385 /* stop hrtimer before finding time */
386 hrtimer_cancel(&vcpu->arch.comparecount_timer);
389 /* find count at this point and handle pending hrtimer */
390 *count = kvm_mips_read_count_running(vcpu, now);
397 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
398 * @vcpu: Virtual CPU.
399 * @now: ktime at point of resume.
400 * @count: CP0_Count at point of resume.
402 * Resumes the timer and updates the timer expiry based on @now and @count.
403 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
404 * parameters need to be changed.
406 * It is guaranteed that a timer interrupt immediately after resume will be
407 * handled, but not if CP_Compare is exactly at @count. That case is already
408 * handled by kvm_mips_freeze_timer().
410 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
412 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
413 ktime_t now, uint32_t count)
415 struct mips_coproc *cop0 = vcpu->arch.cop0;
420 /* Calculate timeout (wrap 0 to 2^32) */
421 compare = kvm_read_c0_guest_compare(cop0);
422 delta = (u64)(uint32_t)(compare - count - 1) + 1;
423 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
424 expire = ktime_add_ns(now, delta);
426 /* Update hrtimer to use new timeout */
427 hrtimer_cancel(&vcpu->arch.comparecount_timer);
428 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
432 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
433 * @vcpu: Virtual CPU.
435 * Recalculates and updates the expiry time of the hrtimer. This can be used
436 * after timer parameters have been altered which do not depend on the time that
437 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
438 * kvm_mips_resume_hrtimer() are used directly).
440 * It is guaranteed that no timer interrupts will be lost in the process.
442 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
444 static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
450 * freeze_hrtimer takes care of a timer interrupts <= count, and
451 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
453 now = kvm_mips_freeze_hrtimer(vcpu, &count);
454 kvm_mips_resume_hrtimer(vcpu, now, count);
458 * kvm_mips_write_count() - Modify the count and update timer.
459 * @vcpu: Virtual CPU.
460 * @count: Guest CP0_Count value to set.
462 * Sets the CP0_Count value and updates the timer accordingly.
464 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
466 struct mips_coproc *cop0 = vcpu->arch.cop0;
470 now = kvm_mips_count_time(vcpu);
471 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
473 if (kvm_mips_count_disabled(vcpu))
474 /* The timer's disabled, adjust the static count */
475 kvm_write_c0_guest_count(cop0, count);
478 kvm_mips_resume_hrtimer(vcpu, now, count);
482 * kvm_mips_init_count() - Initialise timer.
483 * @vcpu: Virtual CPU.
485 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
486 * it going if it's enabled.
488 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
491 vcpu->arch.count_hz = 100*1000*1000;
492 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
493 vcpu->arch.count_hz);
494 vcpu->arch.count_dyn_bias = 0;
497 kvm_mips_write_count(vcpu, 0);
501 * kvm_mips_set_count_hz() - Update the frequency of the timer.
502 * @vcpu: Virtual CPU.
503 * @count_hz: Frequency of CP0_Count timer in Hz.
505 * Change the frequency of the CP0_Count timer. This is done atomically so that
506 * CP0_Count is continuous and no timer interrupt is lost.
508 * Returns: -EINVAL if @count_hz is out of range.
511 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
513 struct mips_coproc *cop0 = vcpu->arch.cop0;
518 /* ensure the frequency is in a sensible range... */
519 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
521 /* ... and has actually changed */
522 if (vcpu->arch.count_hz == count_hz)
525 /* Safely freeze timer so we can keep it continuous */
526 dc = kvm_mips_count_disabled(vcpu);
528 now = kvm_mips_count_time(vcpu);
529 count = kvm_read_c0_guest_count(cop0);
531 now = kvm_mips_freeze_hrtimer(vcpu, &count);
534 /* Update the frequency */
535 vcpu->arch.count_hz = count_hz;
536 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
537 vcpu->arch.count_dyn_bias = 0;
539 /* Calculate adjusted bias so dynamic count is unchanged */
540 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
542 /* Update and resume hrtimer */
544 kvm_mips_resume_hrtimer(vcpu, now, count);
549 * kvm_mips_write_compare() - Modify compare and update timer.
550 * @vcpu: Virtual CPU.
551 * @compare: New CP0_Compare value.
553 * Update CP0_Compare to a new value and update the timeout.
555 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
557 struct mips_coproc *cop0 = vcpu->arch.cop0;
559 /* if unchanged, must just be an ack */
560 if (kvm_read_c0_guest_compare(cop0) == compare)
564 kvm_write_c0_guest_compare(cop0, compare);
566 /* Update timeout if count enabled */
567 if (!kvm_mips_count_disabled(vcpu))
568 kvm_mips_update_hrtimer(vcpu);
572 * kvm_mips_count_disable() - Disable count.
573 * @vcpu: Virtual CPU.
575 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
576 * time will be handled but not after.
578 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
579 * count_ctl.DC has been set (count disabled).
581 * Returns: The time that the timer was stopped.
583 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
585 struct mips_coproc *cop0 = vcpu->arch.cop0;
590 hrtimer_cancel(&vcpu->arch.comparecount_timer);
592 /* Set the static count from the dynamic count, handling pending TI */
594 count = kvm_mips_read_count_running(vcpu, now);
595 kvm_write_c0_guest_count(cop0, count);
601 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
602 * @vcpu: Virtual CPU.
604 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
605 * before the final stop time will be handled if the timer isn't disabled by
606 * count_ctl.DC, but not after.
608 * Assumes CP0_Cause.DC is clear (count enabled).
610 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
612 struct mips_coproc *cop0 = vcpu->arch.cop0;
614 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
615 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
616 kvm_mips_count_disable(vcpu);
620 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
621 * @vcpu: Virtual CPU.
623 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
624 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
625 * potentially before even returning, so the caller should be careful with
626 * ordering of CP0_Cause modifications so as not to lose it.
628 * Assumes CP0_Cause.DC is set (count disabled).
630 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
632 struct mips_coproc *cop0 = vcpu->arch.cop0;
635 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
638 * Set the dynamic count to match the static count.
639 * This starts the hrtimer if count_ctl.DC allows it.
640 * Otherwise it conveniently updates the biases.
642 count = kvm_read_c0_guest_count(cop0);
643 kvm_mips_write_count(vcpu, count);
647 * kvm_mips_set_count_ctl() - Update the count control KVM register.
648 * @vcpu: Virtual CPU.
649 * @count_ctl: Count control register new value.
651 * Set the count control KVM register. The timer is updated accordingly.
653 * Returns: -EINVAL if reserved bits are set.
656 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
658 struct mips_coproc *cop0 = vcpu->arch.cop0;
659 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
662 uint32_t count, compare;
664 /* Only allow defined bits to be changed */
665 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
668 /* Apply new value */
669 vcpu->arch.count_ctl = count_ctl;
671 /* Master CP0_Count disable */
672 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
673 /* Is CP0_Cause.DC already disabling CP0_Count? */
674 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
675 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
676 /* Just record the current time */
677 vcpu->arch.count_resume = ktime_get();
678 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
679 /* disable timer and record current time */
680 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
683 * Calculate timeout relative to static count at resume
684 * time (wrap 0 to 2^32).
686 count = kvm_read_c0_guest_count(cop0);
687 compare = kvm_read_c0_guest_compare(cop0);
688 delta = (u64)(uint32_t)(compare - count - 1) + 1;
689 delta = div_u64(delta * NSEC_PER_SEC,
690 vcpu->arch.count_hz);
691 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
693 /* Handle pending interrupt */
695 if (ktime_compare(now, expire) >= 0)
696 /* Nothing should be waiting on the timeout */
697 kvm_mips_callbacks->queue_timer_int(vcpu);
699 /* Resume hrtimer without changing bias */
700 count = kvm_mips_read_count_running(vcpu, now);
701 kvm_mips_resume_hrtimer(vcpu, now, count);
709 * kvm_mips_set_count_resume() - Update the count resume KVM register.
710 * @vcpu: Virtual CPU.
711 * @count_resume: Count resume register new value.
713 * Set the count resume KVM register.
715 * Returns: -EINVAL if out of valid range (0..now).
718 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
721 * It doesn't make sense for the resume time to be in the future, as it
722 * would be possible for the next interrupt to be more than a full
723 * period in the future.
725 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
728 vcpu->arch.count_resume = ns_to_ktime(count_resume);
733 * kvm_mips_count_timeout() - Push timer forward on timeout.
734 * @vcpu: Virtual CPU.
736 * Handle an hrtimer event by push the hrtimer forward a period.
738 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
740 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
742 /* Add the Count period to the current expiry time */
743 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
744 vcpu->arch.count_period);
745 return HRTIMER_RESTART;
748 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
750 struct mips_coproc *cop0 = vcpu->arch.cop0;
751 enum emulation_result er = EMULATE_DONE;
753 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
754 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
755 kvm_read_c0_guest_epc(cop0));
756 kvm_clear_c0_guest_status(cop0, ST0_EXL);
757 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
759 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
760 kvm_clear_c0_guest_status(cop0, ST0_ERL);
761 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
763 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
771 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
773 enum emulation_result er = EMULATE_DONE;
775 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
776 vcpu->arch.pending_exceptions);
778 ++vcpu->stat.wait_exits;
779 trace_kvm_exit(vcpu, WAIT_EXITS);
780 if (!vcpu->arch.pending_exceptions) {
782 kvm_vcpu_block(vcpu);
784 /* We we are runnable, then definitely go off to user space to check if any
785 * I/O interrupts are pending.
787 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
788 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
789 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
796 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
797 * this, if things ever change
799 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
801 struct mips_coproc *cop0 = vcpu->arch.cop0;
802 enum emulation_result er = EMULATE_FAIL;
803 uint32_t pc = vcpu->arch.pc;
805 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
809 /* Write Guest TLB Entry @ Index */
810 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
812 struct mips_coproc *cop0 = vcpu->arch.cop0;
813 int index = kvm_read_c0_guest_index(cop0);
814 enum emulation_result er = EMULATE_DONE;
815 struct kvm_mips_tlb *tlb = NULL;
816 uint32_t pc = vcpu->arch.pc;
818 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
819 printk("%s: illegal index: %d\n", __func__, index);
821 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
822 pc, index, kvm_read_c0_guest_entryhi(cop0),
823 kvm_read_c0_guest_entrylo0(cop0),
824 kvm_read_c0_guest_entrylo1(cop0),
825 kvm_read_c0_guest_pagemask(cop0));
826 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
829 tlb = &vcpu->arch.guest_tlb[index];
831 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
832 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
835 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
836 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
837 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
838 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
841 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
842 pc, index, kvm_read_c0_guest_entryhi(cop0),
843 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
844 kvm_read_c0_guest_pagemask(cop0));
849 /* Write Guest TLB Entry @ Random Index */
850 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
852 struct mips_coproc *cop0 = vcpu->arch.cop0;
853 enum emulation_result er = EMULATE_DONE;
854 struct kvm_mips_tlb *tlb = NULL;
855 uint32_t pc = vcpu->arch.pc;
859 get_random_bytes(&index, sizeof(index));
860 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
862 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
865 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
866 printk("%s: illegal index: %d\n", __func__, index);
870 tlb = &vcpu->arch.guest_tlb[index];
873 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
874 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
877 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
878 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
879 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
880 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
883 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
884 pc, index, kvm_read_c0_guest_entryhi(cop0),
885 kvm_read_c0_guest_entrylo0(cop0),
886 kvm_read_c0_guest_entrylo1(cop0));
891 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
893 struct mips_coproc *cop0 = vcpu->arch.cop0;
894 long entryhi = kvm_read_c0_guest_entryhi(cop0);
895 enum emulation_result er = EMULATE_DONE;
896 uint32_t pc = vcpu->arch.pc;
899 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
901 kvm_write_c0_guest_index(cop0, index);
903 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
909 enum emulation_result
910 kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
911 struct kvm_run *run, struct kvm_vcpu *vcpu)
913 struct mips_coproc *cop0 = vcpu->arch.cop0;
914 enum emulation_result er = EMULATE_DONE;
915 int32_t rt, rd, copz, sel, co_bit, op;
916 uint32_t pc = vcpu->arch.pc;
917 unsigned long curr_pc;
920 * Update PC and hold onto current PC in case there is
921 * an error and we want to rollback the PC
923 curr_pc = vcpu->arch.pc;
924 er = update_pc(vcpu, cause);
925 if (er == EMULATE_FAIL) {
929 copz = (inst >> 21) & 0x1f;
930 rt = (inst >> 16) & 0x1f;
931 rd = (inst >> 11) & 0x1f;
933 co_bit = (inst >> 25) & 1;
939 case tlbr_op: /* Read indexed TLB entry */
940 er = kvm_mips_emul_tlbr(vcpu);
942 case tlbwi_op: /* Write indexed */
943 er = kvm_mips_emul_tlbwi(vcpu);
945 case tlbwr_op: /* Write random */
946 er = kvm_mips_emul_tlbwr(vcpu);
948 case tlbp_op: /* TLB Probe */
949 er = kvm_mips_emul_tlbp(vcpu);
952 printk("!!!COP0_RFE!!!\n");
955 er = kvm_mips_emul_eret(vcpu);
959 er = kvm_mips_emul_wait(vcpu);
965 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
966 cop0->stat[rd][sel]++;
969 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
970 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
971 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
972 vcpu->arch.gprs[rt] = 0x0;
973 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
974 kvm_mips_trans_mfc0(inst, opc, vcpu);
978 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
980 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
981 kvm_mips_trans_mfc0(inst, opc, vcpu);
986 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
987 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
992 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
996 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
997 cop0->stat[rd][sel]++;
999 if ((rd == MIPS_CP0_TLB_INDEX)
1000 && (vcpu->arch.gprs[rt] >=
1001 KVM_MIPS_GUEST_TLB_SIZE)) {
1002 printk("Invalid TLB Index: %ld",
1003 vcpu->arch.gprs[rt]);
1007 #define C0_EBASE_CORE_MASK 0xff
1008 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1009 /* Preserve CORE number */
1010 kvm_change_c0_guest_ebase(cop0,
1011 ~(C0_EBASE_CORE_MASK),
1012 vcpu->arch.gprs[rt]);
1013 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
1014 kvm_read_c0_guest_ebase(cop0));
1015 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1017 vcpu->arch.gprs[rt] & ASID_MASK;
1018 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
1020 ((kvm_read_c0_guest_entryhi(cop0) &
1021 ASID_MASK) != nasid)) {
1024 ("MTCz, change ASID from %#lx to %#lx\n",
1025 kvm_read_c0_guest_entryhi(cop0) &
1027 vcpu->arch.gprs[rt] & ASID_MASK);
1029 /* Blow away the shadow host TLBs */
1030 kvm_mips_flush_host_tlb(1);
1032 kvm_write_c0_guest_entryhi(cop0,
1033 vcpu->arch.gprs[rt]);
1035 /* Are we writing to COUNT */
1036 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1037 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1039 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1040 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1041 pc, kvm_read_c0_guest_compare(cop0),
1042 vcpu->arch.gprs[rt]);
1044 /* If we are writing to COMPARE */
1045 /* Clear pending timer interrupt, if any */
1046 kvm_mips_callbacks->dequeue_timer_int(vcpu);
1047 kvm_mips_write_compare(vcpu,
1048 vcpu->arch.gprs[rt]);
1049 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1050 kvm_write_c0_guest_status(cop0,
1051 vcpu->arch.gprs[rt]);
1052 /* Make sure that CU1 and NMI bits are never set */
1053 kvm_clear_c0_guest_status(cop0,
1054 (ST0_CU1 | ST0_NMI));
1056 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1057 kvm_mips_trans_mtc0(inst, opc, vcpu);
1059 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1060 uint32_t old_cause, new_cause;
1061 old_cause = kvm_read_c0_guest_cause(cop0);
1062 new_cause = vcpu->arch.gprs[rt];
1063 /* Update R/W bits */
1064 kvm_change_c0_guest_cause(cop0, 0x08800300,
1066 /* DC bit enabling/disabling timer? */
1067 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1068 if (new_cause & CAUSEF_DC)
1069 kvm_mips_count_disable_cause(vcpu);
1071 kvm_mips_count_enable_cause(vcpu);
1074 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1075 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1076 kvm_mips_trans_mtc0(inst, opc, vcpu);
1080 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1081 rd, sel, cop0->reg[rd][sel]);
1086 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1087 vcpu->arch.pc, rt, rd, sel);
1092 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1093 cop0->stat[MIPS_CP0_STATUS][0]++;
1096 vcpu->arch.gprs[rt] =
1097 kvm_read_c0_guest_status(cop0);
1101 kvm_debug("[%#lx] mfmcz_op: EI\n",
1103 kvm_set_c0_guest_status(cop0, ST0_IE);
1105 kvm_debug("[%#lx] mfmcz_op: DI\n",
1107 kvm_clear_c0_guest_status(cop0, ST0_IE);
1115 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1117 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1118 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
1123 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1124 vcpu->arch.gprs[rt]);
1125 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1130 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1131 vcpu->arch.pc, copz);
1139 * Rollback PC only if emulation was unsuccessful
1141 if (er == EMULATE_FAIL) {
1142 vcpu->arch.pc = curr_pc;
1147 * This is for special instructions whose emulation
1148 * updates the PC, so do not overwrite the PC under
1155 enum emulation_result
1156 kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1157 struct kvm_run *run, struct kvm_vcpu *vcpu)
1159 enum emulation_result er = EMULATE_DO_MMIO;
1160 int32_t op, base, rt, offset;
1162 void *data = run->mmio.data;
1163 unsigned long curr_pc;
1166 * Update PC and hold onto current PC in case there is
1167 * an error and we want to rollback the PC
1169 curr_pc = vcpu->arch.pc;
1170 er = update_pc(vcpu, cause);
1171 if (er == EMULATE_FAIL)
1174 rt = (inst >> 16) & 0x1f;
1175 base = (inst >> 21) & 0x1f;
1176 offset = inst & 0xffff;
1177 op = (inst >> 26) & 0x3f;
1182 if (bytes > sizeof(run->mmio.data)) {
1183 kvm_err("%s: bad MMIO length: %d\n", __func__,
1186 run->mmio.phys_addr =
1187 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1189 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1193 run->mmio.len = bytes;
1194 run->mmio.is_write = 1;
1195 vcpu->mmio_needed = 1;
1196 vcpu->mmio_is_write = 1;
1197 *(u8 *) data = vcpu->arch.gprs[rt];
1198 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1199 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1206 if (bytes > sizeof(run->mmio.data)) {
1207 kvm_err("%s: bad MMIO length: %d\n", __func__,
1210 run->mmio.phys_addr =
1211 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1213 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1218 run->mmio.len = bytes;
1219 run->mmio.is_write = 1;
1220 vcpu->mmio_needed = 1;
1221 vcpu->mmio_is_write = 1;
1222 *(uint32_t *) data = vcpu->arch.gprs[rt];
1224 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1225 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1226 vcpu->arch.gprs[rt], *(uint32_t *) data);
1231 if (bytes > sizeof(run->mmio.data)) {
1232 kvm_err("%s: bad MMIO length: %d\n", __func__,
1235 run->mmio.phys_addr =
1236 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1238 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1243 run->mmio.len = bytes;
1244 run->mmio.is_write = 1;
1245 vcpu->mmio_needed = 1;
1246 vcpu->mmio_is_write = 1;
1247 *(uint16_t *) data = vcpu->arch.gprs[rt];
1249 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1250 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1251 vcpu->arch.gprs[rt], *(uint32_t *) data);
1255 printk("Store not yet supported");
1261 * Rollback PC if emulation was unsuccessful
1263 if (er == EMULATE_FAIL) {
1264 vcpu->arch.pc = curr_pc;
1270 enum emulation_result
1271 kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1272 struct kvm_run *run, struct kvm_vcpu *vcpu)
1274 enum emulation_result er = EMULATE_DO_MMIO;
1275 int32_t op, base, rt, offset;
1278 rt = (inst >> 16) & 0x1f;
1279 base = (inst >> 21) & 0x1f;
1280 offset = inst & 0xffff;
1281 op = (inst >> 26) & 0x3f;
1283 vcpu->arch.pending_load_cause = cause;
1284 vcpu->arch.io_gpr = rt;
1289 if (bytes > sizeof(run->mmio.data)) {
1290 kvm_err("%s: bad MMIO length: %d\n", __func__,
1295 run->mmio.phys_addr =
1296 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1298 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1303 run->mmio.len = bytes;
1304 run->mmio.is_write = 0;
1305 vcpu->mmio_needed = 1;
1306 vcpu->mmio_is_write = 0;
1312 if (bytes > sizeof(run->mmio.data)) {
1313 kvm_err("%s: bad MMIO length: %d\n", __func__,
1318 run->mmio.phys_addr =
1319 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1321 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1326 run->mmio.len = bytes;
1327 run->mmio.is_write = 0;
1328 vcpu->mmio_needed = 1;
1329 vcpu->mmio_is_write = 0;
1332 vcpu->mmio_needed = 2;
1334 vcpu->mmio_needed = 1;
1341 if (bytes > sizeof(run->mmio.data)) {
1342 kvm_err("%s: bad MMIO length: %d\n", __func__,
1347 run->mmio.phys_addr =
1348 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1350 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1355 run->mmio.len = bytes;
1356 run->mmio.is_write = 0;
1357 vcpu->mmio_is_write = 0;
1360 vcpu->mmio_needed = 2;
1362 vcpu->mmio_needed = 1;
1367 printk("Load not yet supported");
1375 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1377 unsigned long offset = (va & ~PAGE_MASK);
1378 struct kvm *kvm = vcpu->kvm;
1383 gfn = va >> PAGE_SHIFT;
1385 if (gfn >= kvm->arch.guest_pmap_npages) {
1386 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
1387 kvm_mips_dump_host_tlbs();
1388 kvm_arch_vcpu_dump_regs(vcpu);
1391 pfn = kvm->arch.guest_pmap[gfn];
1392 pa = (pfn << PAGE_SHIFT) | offset;
1394 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
1396 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1400 #define MIPS_CACHE_OP_INDEX_INV 0x0
1401 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1402 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1403 #define MIPS_CACHE_OP_IMP 0x3
1404 #define MIPS_CACHE_OP_HIT_INV 0x4
1405 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
1406 #define MIPS_CACHE_OP_HIT_HB 0x6
1407 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
1409 #define MIPS_CACHE_ICACHE 0x0
1410 #define MIPS_CACHE_DCACHE 0x1
1411 #define MIPS_CACHE_SEC 0x3
1413 enum emulation_result
1414 kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1415 struct kvm_run *run, struct kvm_vcpu *vcpu)
1417 struct mips_coproc *cop0 = vcpu->arch.cop0;
1418 extern void (*r4k_blast_dcache) (void);
1419 extern void (*r4k_blast_icache) (void);
1420 enum emulation_result er = EMULATE_DONE;
1421 int32_t offset, cache, op_inst, op, base;
1422 struct kvm_vcpu_arch *arch = &vcpu->arch;
1424 unsigned long curr_pc;
1427 * Update PC and hold onto current PC in case there is
1428 * an error and we want to rollback the PC
1430 curr_pc = vcpu->arch.pc;
1431 er = update_pc(vcpu, cause);
1432 if (er == EMULATE_FAIL)
1435 base = (inst >> 21) & 0x1f;
1436 op_inst = (inst >> 16) & 0x1f;
1437 offset = inst & 0xffff;
1438 cache = (inst >> 16) & 0x3;
1439 op = (inst >> 18) & 0x7;
1441 va = arch->gprs[base] + offset;
1443 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1444 cache, op, base, arch->gprs[base], offset);
1446 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
1447 * the caches entirely by stepping through all the ways/indexes
1449 if (op == MIPS_CACHE_OP_INDEX_INV) {
1451 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1452 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1453 arch->gprs[base], offset);
1455 if (cache == MIPS_CACHE_DCACHE)
1457 else if (cache == MIPS_CACHE_ICACHE)
1460 printk("%s: unsupported CACHE INDEX operation\n",
1462 return EMULATE_FAIL;
1465 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1466 kvm_mips_trans_cache_index(inst, opc, vcpu);
1472 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1474 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
1475 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1477 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1478 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1481 /* If an entry already exists then skip */
1482 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
1486 /* If address not in the guest TLB, then give the guest a fault, the
1487 * resulting handler will do the right thing
1489 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1490 (kvm_read_c0_guest_entryhi
1491 (cop0) & ASID_MASK));
1494 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1495 vcpu->arch.host_cp0_badvaddr = va;
1496 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1499 goto dont_update_pc;
1501 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1502 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1503 if (!TLB_IS_VALID(*tlb, va)) {
1504 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1507 goto dont_update_pc;
1509 /* We fault an entry from the guest tlb to the shadow host TLB */
1510 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1517 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1518 cache, op, base, arch->gprs[base], offset);
1521 goto dont_update_pc;
1526 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1527 if (cache == MIPS_CACHE_DCACHE
1528 && (op == MIPS_CACHE_OP_FILL_WB_INV
1529 || op == MIPS_CACHE_OP_HIT_INV)) {
1530 flush_dcache_line(va);
1532 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1533 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1534 kvm_mips_trans_cache_va(inst, opc, vcpu);
1536 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1537 flush_dcache_line(va);
1538 flush_icache_line(va);
1540 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1541 /* Replace the CACHE instruction, with a SYNCI */
1542 kvm_mips_trans_cache_va(inst, opc, vcpu);
1546 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1547 cache, op, base, arch->gprs[base], offset);
1550 goto dont_update_pc;
1559 vcpu->arch.pc = curr_pc;
1564 enum emulation_result
1565 kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1566 struct kvm_run *run, struct kvm_vcpu *vcpu)
1568 enum emulation_result er = EMULATE_DONE;
1572 * Fetch the instruction.
1574 if (cause & CAUSEF_BD) {
1578 inst = kvm_get_inst(opc, vcpu);
1580 switch (((union mips_instruction)inst).r_format.opcode) {
1582 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1587 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1594 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1598 ++vcpu->stat.cache_exits;
1599 trace_kvm_exit(vcpu, CACHE_EXITS);
1600 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1604 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1606 kvm_arch_vcpu_dump_regs(vcpu);
1614 enum emulation_result
1615 kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1616 struct kvm_run *run, struct kvm_vcpu *vcpu)
1618 struct mips_coproc *cop0 = vcpu->arch.cop0;
1619 struct kvm_vcpu_arch *arch = &vcpu->arch;
1620 enum emulation_result er = EMULATE_DONE;
1622 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1624 kvm_write_c0_guest_epc(cop0, arch->pc);
1625 kvm_set_c0_guest_status(cop0, ST0_EXL);
1627 if (cause & CAUSEF_BD)
1628 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1630 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1632 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1634 kvm_change_c0_guest_cause(cop0, (0xff),
1635 (T_SYSCALL << CAUSEB_EXCCODE));
1637 /* Set PC to the exception entry point */
1638 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1641 printk("Trying to deliver SYSCALL when EXL is already set\n");
1648 enum emulation_result
1649 kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1650 struct kvm_run *run, struct kvm_vcpu *vcpu)
1652 struct mips_coproc *cop0 = vcpu->arch.cop0;
1653 struct kvm_vcpu_arch *arch = &vcpu->arch;
1654 enum emulation_result er = EMULATE_DONE;
1655 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1656 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1658 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1660 kvm_write_c0_guest_epc(cop0, arch->pc);
1661 kvm_set_c0_guest_status(cop0, ST0_EXL);
1663 if (cause & CAUSEF_BD)
1664 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1666 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1668 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1671 /* set pc to the exception entry point */
1672 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1675 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1678 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1681 kvm_change_c0_guest_cause(cop0, (0xff),
1682 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1684 /* setup badvaddr, context and entryhi registers for the guest */
1685 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1686 /* XXXKYMA: is the context register used by linux??? */
1687 kvm_write_c0_guest_entryhi(cop0, entryhi);
1688 /* Blow away the shadow host TLBs */
1689 kvm_mips_flush_host_tlb(1);
1694 enum emulation_result
1695 kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1696 struct kvm_run *run, struct kvm_vcpu *vcpu)
1698 struct mips_coproc *cop0 = vcpu->arch.cop0;
1699 struct kvm_vcpu_arch *arch = &vcpu->arch;
1700 enum emulation_result er = EMULATE_DONE;
1701 unsigned long entryhi =
1702 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1703 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1705 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1707 kvm_write_c0_guest_epc(cop0, arch->pc);
1708 kvm_set_c0_guest_status(cop0, ST0_EXL);
1710 if (cause & CAUSEF_BD)
1711 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1713 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1715 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1718 /* set pc to the exception entry point */
1719 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1722 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1724 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1727 kvm_change_c0_guest_cause(cop0, (0xff),
1728 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1730 /* setup badvaddr, context and entryhi registers for the guest */
1731 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1732 /* XXXKYMA: is the context register used by linux??? */
1733 kvm_write_c0_guest_entryhi(cop0, entryhi);
1734 /* Blow away the shadow host TLBs */
1735 kvm_mips_flush_host_tlb(1);
1740 enum emulation_result
1741 kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1742 struct kvm_run *run, struct kvm_vcpu *vcpu)
1744 struct mips_coproc *cop0 = vcpu->arch.cop0;
1745 struct kvm_vcpu_arch *arch = &vcpu->arch;
1746 enum emulation_result er = EMULATE_DONE;
1747 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1748 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1750 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1752 kvm_write_c0_guest_epc(cop0, arch->pc);
1753 kvm_set_c0_guest_status(cop0, ST0_EXL);
1755 if (cause & CAUSEF_BD)
1756 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1758 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1760 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1763 /* Set PC to the exception entry point */
1764 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1766 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1768 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1771 kvm_change_c0_guest_cause(cop0, (0xff),
1772 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1774 /* setup badvaddr, context and entryhi registers for the guest */
1775 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1776 /* XXXKYMA: is the context register used by linux??? */
1777 kvm_write_c0_guest_entryhi(cop0, entryhi);
1778 /* Blow away the shadow host TLBs */
1779 kvm_mips_flush_host_tlb(1);
1784 enum emulation_result
1785 kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1786 struct kvm_run *run, struct kvm_vcpu *vcpu)
1788 struct mips_coproc *cop0 = vcpu->arch.cop0;
1789 struct kvm_vcpu_arch *arch = &vcpu->arch;
1790 enum emulation_result er = EMULATE_DONE;
1791 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1792 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1794 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1796 kvm_write_c0_guest_epc(cop0, arch->pc);
1797 kvm_set_c0_guest_status(cop0, ST0_EXL);
1799 if (cause & CAUSEF_BD)
1800 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1802 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1804 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1807 /* Set PC to the exception entry point */
1808 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1810 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1812 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1815 kvm_change_c0_guest_cause(cop0, (0xff),
1816 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1818 /* setup badvaddr, context and entryhi registers for the guest */
1819 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1820 /* XXXKYMA: is the context register used by linux??? */
1821 kvm_write_c0_guest_entryhi(cop0, entryhi);
1822 /* Blow away the shadow host TLBs */
1823 kvm_mips_flush_host_tlb(1);
1828 /* TLBMOD: store into address matching TLB with Dirty bit off */
1829 enum emulation_result
1830 kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1831 struct kvm_run *run, struct kvm_vcpu *vcpu)
1833 enum emulation_result er = EMULATE_DONE;
1835 struct mips_coproc *cop0 = vcpu->arch.cop0;
1836 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1837 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1841 * If address not in the guest TLB, then we are in trouble
1843 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1845 /* XXXKYMA Invalidate and retry */
1846 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1847 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1849 kvm_mips_dump_guest_tlbs(vcpu);
1850 kvm_mips_dump_host_tlbs();
1851 return EMULATE_FAIL;
1855 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1859 enum emulation_result
1860 kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1861 struct kvm_run *run, struct kvm_vcpu *vcpu)
1863 struct mips_coproc *cop0 = vcpu->arch.cop0;
1864 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1865 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1866 struct kvm_vcpu_arch *arch = &vcpu->arch;
1867 enum emulation_result er = EMULATE_DONE;
1869 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1871 kvm_write_c0_guest_epc(cop0, arch->pc);
1872 kvm_set_c0_guest_status(cop0, ST0_EXL);
1874 if (cause & CAUSEF_BD)
1875 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1877 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1879 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1882 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1884 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1886 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1889 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1891 /* setup badvaddr, context and entryhi registers for the guest */
1892 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1893 /* XXXKYMA: is the context register used by linux??? */
1894 kvm_write_c0_guest_entryhi(cop0, entryhi);
1895 /* Blow away the shadow host TLBs */
1896 kvm_mips_flush_host_tlb(1);
1901 enum emulation_result
1902 kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1903 struct kvm_run *run, struct kvm_vcpu *vcpu)
1905 struct mips_coproc *cop0 = vcpu->arch.cop0;
1906 struct kvm_vcpu_arch *arch = &vcpu->arch;
1907 enum emulation_result er = EMULATE_DONE;
1909 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1911 kvm_write_c0_guest_epc(cop0, arch->pc);
1912 kvm_set_c0_guest_status(cop0, ST0_EXL);
1914 if (cause & CAUSEF_BD)
1915 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1917 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1921 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1923 kvm_change_c0_guest_cause(cop0, (0xff),
1924 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1925 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1930 enum emulation_result
1931 kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1932 struct kvm_run *run, struct kvm_vcpu *vcpu)
1934 struct mips_coproc *cop0 = vcpu->arch.cop0;
1935 struct kvm_vcpu_arch *arch = &vcpu->arch;
1936 enum emulation_result er = EMULATE_DONE;
1938 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1940 kvm_write_c0_guest_epc(cop0, arch->pc);
1941 kvm_set_c0_guest_status(cop0, ST0_EXL);
1943 if (cause & CAUSEF_BD)
1944 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1946 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1948 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1950 kvm_change_c0_guest_cause(cop0, (0xff),
1951 (T_RES_INST << CAUSEB_EXCCODE));
1953 /* Set PC to the exception entry point */
1954 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1957 kvm_err("Trying to deliver RI when EXL is already set\n");
1964 enum emulation_result
1965 kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1966 struct kvm_run *run, struct kvm_vcpu *vcpu)
1968 struct mips_coproc *cop0 = vcpu->arch.cop0;
1969 struct kvm_vcpu_arch *arch = &vcpu->arch;
1970 enum emulation_result er = EMULATE_DONE;
1972 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1974 kvm_write_c0_guest_epc(cop0, arch->pc);
1975 kvm_set_c0_guest_status(cop0, ST0_EXL);
1977 if (cause & CAUSEF_BD)
1978 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1980 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1982 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1984 kvm_change_c0_guest_cause(cop0, (0xff),
1985 (T_BREAK << CAUSEB_EXCCODE));
1987 /* Set PC to the exception entry point */
1988 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1991 printk("Trying to deliver BP when EXL is already set\n");
1999 * ll/sc, rdhwr, sync emulation
2002 #define OPCODE 0xfc000000
2003 #define BASE 0x03e00000
2004 #define RT 0x001f0000
2005 #define OFFSET 0x0000ffff
2006 #define LL 0xc0000000
2007 #define SC 0xe0000000
2008 #define SPEC0 0x00000000
2009 #define SPEC3 0x7c000000
2010 #define RD 0x0000f800
2011 #define FUNC 0x0000003f
2012 #define SYNC 0x0000000f
2013 #define RDHWR 0x0000003b
2015 enum emulation_result
2016 kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2017 struct kvm_run *run, struct kvm_vcpu *vcpu)
2019 struct mips_coproc *cop0 = vcpu->arch.cop0;
2020 struct kvm_vcpu_arch *arch = &vcpu->arch;
2021 enum emulation_result er = EMULATE_DONE;
2022 unsigned long curr_pc;
2026 * Update PC and hold onto current PC in case there is
2027 * an error and we want to rollback the PC
2029 curr_pc = vcpu->arch.pc;
2030 er = update_pc(vcpu, cause);
2031 if (er == EMULATE_FAIL)
2035 * Fetch the instruction.
2037 if (cause & CAUSEF_BD)
2040 inst = kvm_get_inst(opc, vcpu);
2042 if (inst == KVM_INVALID_INST) {
2043 printk("%s: Cannot get inst @ %p\n", __func__, opc);
2044 return EMULATE_FAIL;
2047 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2048 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2049 int rd = (inst & RD) >> 11;
2050 int rt = (inst & RT) >> 16;
2051 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2052 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2053 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2058 case 0: /* CPU number */
2061 case 1: /* SYNCI length */
2062 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2063 current_cpu_data.icache.linesz);
2065 case 2: /* Read count register */
2066 arch->gprs[rt] = kvm_mips_read_count(vcpu);
2068 case 3: /* Count register resolution */
2069 switch (current_cpu_data.cputype) {
2079 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2083 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2087 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2091 return EMULATE_DONE;
2095 * Rollback PC (if in branch delay slot then the PC already points to
2096 * branch target), and pass the RI exception to the guest OS.
2098 vcpu->arch.pc = curr_pc;
2099 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2102 enum emulation_result
2103 kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
2105 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2106 enum emulation_result er = EMULATE_DONE;
2107 unsigned long curr_pc;
2109 if (run->mmio.len > sizeof(*gpr)) {
2110 printk("Bad MMIO length: %d", run->mmio.len);
2116 * Update PC and hold onto current PC in case there is
2117 * an error and we want to rollback the PC
2119 curr_pc = vcpu->arch.pc;
2120 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2121 if (er == EMULATE_FAIL)
2124 switch (run->mmio.len) {
2126 *gpr = *(int32_t *) run->mmio.data;
2130 if (vcpu->mmio_needed == 2)
2131 *gpr = *(int16_t *) run->mmio.data;
2133 *gpr = *(int16_t *) run->mmio.data;
2137 if (vcpu->mmio_needed == 2)
2138 *gpr = *(int8_t *) run->mmio.data;
2140 *gpr = *(u8 *) run->mmio.data;
2144 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2146 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2147 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2154 static enum emulation_result
2155 kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
2156 struct kvm_run *run, struct kvm_vcpu *vcpu)
2158 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2159 struct mips_coproc *cop0 = vcpu->arch.cop0;
2160 struct kvm_vcpu_arch *arch = &vcpu->arch;
2161 enum emulation_result er = EMULATE_DONE;
2163 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2165 kvm_write_c0_guest_epc(cop0, arch->pc);
2166 kvm_set_c0_guest_status(cop0, ST0_EXL);
2168 if (cause & CAUSEF_BD)
2169 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2171 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2173 kvm_change_c0_guest_cause(cop0, (0xff),
2174 (exccode << CAUSEB_EXCCODE));
2176 /* Set PC to the exception entry point */
2177 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2178 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2180 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2181 exccode, kvm_read_c0_guest_epc(cop0),
2182 kvm_read_c0_guest_badvaddr(cop0));
2184 printk("Trying to deliver EXC when EXL is already set\n");
2191 enum emulation_result
2192 kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2193 struct kvm_run *run, struct kvm_vcpu *vcpu)
2195 enum emulation_result er = EMULATE_DONE;
2196 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2197 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2199 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2209 case T_COP_UNUSABLE:
2210 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2211 er = EMULATE_PRIV_FAIL;
2218 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2219 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2220 printk("%s: LD MISS @ %#lx\n", __func__,
2223 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2224 er = EMULATE_PRIV_FAIL;
2229 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2230 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2231 printk("%s: ST MISS @ %#lx\n", __func__,
2234 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2235 er = EMULATE_PRIV_FAIL;
2240 printk("%s: address error ST @ %#lx\n", __func__,
2242 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2244 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2246 er = EMULATE_PRIV_FAIL;
2249 printk("%s: address error LD @ %#lx\n", __func__,
2251 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2253 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2255 er = EMULATE_PRIV_FAIL;
2258 er = EMULATE_PRIV_FAIL;
2263 if (er == EMULATE_PRIV_FAIL) {
2264 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2269 /* User Address (UA) fault, this could happen if
2270 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2271 * case we pass on the fault to the guest kernel and let it handle it.
2272 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2273 * case we inject the TLB from the Guest TLB into the shadow host TLB
2275 enum emulation_result
2276 kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2277 struct kvm_run *run, struct kvm_vcpu *vcpu)
2279 enum emulation_result er = EMULATE_DONE;
2280 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2281 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2284 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2285 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2287 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
2288 * Check the Guest TLB, if the entry is not there then send the guest an
2289 * exception. The guest exc handler should then inject an entry into the
2292 index = kvm_mips_guest_tlb_lookup(vcpu,
2294 (kvm_read_c0_guest_entryhi
2295 (vcpu->arch.cop0) & ASID_MASK));
2297 if (exccode == T_TLB_LD_MISS) {
2298 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2299 } else if (exccode == T_TLB_ST_MISS) {
2300 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2302 printk("%s: invalid exc code: %d\n", __func__, exccode);
2306 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2308 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
2309 if (!TLB_IS_VALID(*tlb, va)) {
2310 if (exccode == T_TLB_LD_MISS) {
2311 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2313 } else if (exccode == T_TLB_ST_MISS) {
2314 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2317 printk("%s: invalid exc code: %d\n", __func__,
2324 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2325 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2327 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
2328 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,