2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
54 /* Read the instruction */
55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
60 switch (insn.i_format.opcode) {
61 /* jr and jalr are in r_format format. */
63 switch (insn.r_format.func) {
65 arch->gprs[insn.r_format.rd] = epc + 8;
68 nextpc = arch->gprs[insn.r_format.rs];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn.i_format.rt) {
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
121 dspcontrol = rddsp(0x01);
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
132 /* These are unconditional and in j_format. */
134 arch->gprs[31] = instpc + 8;
139 epc |= (insn.j_format.target << 2);
143 /* These are conditional and in i_format. */
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 case blez_op: /* not really i_format */
166 /* rt field assumed to be zero */
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 /* rt field assumed to be zero */
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 /* And now the FPA/cp1 branch instructions. */
186 kvm_err("%s: unsupported cop1_op\n", __func__);
193 kvm_err("%s: unaligned epc\n", __func__);
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
211 vcpu->arch.pc = branch_pc;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
229 * 0 otherwise (in which case CP0_Count timer is running).
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
254 if (delta >= vcpu->arch.count_period) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258 /* Recalculate delta with new bias */
259 delta = now_ns + vcpu->arch.count_dyn_bias;
263 * We've ensured that:
264 * delta < count_period
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
283 * Returns: Effective monotonic ktime for CP0_Count.
285 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
301 * Returns: The current value of the guest CP0_Count register.
303 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 ktime_t expires, threshold;
307 uint32_t count, compare;
310 /* Calculate the biased and scaled guest CP0_Count */
311 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
312 compare = kvm_read_c0_guest_compare(cop0);
315 * Find whether CP0_Count has reached the closest timer interrupt. If
316 * not, we shouldn't inject it.
318 if ((int32_t)(count - compare) < 0)
322 * The CP0_Count we're going to return has already reached the closest
323 * timer interrupt. Quickly check if it really is a new interrupt by
324 * looking at whether the interval until the hrtimer expiry time is
325 * less than 1/4 of the timer period.
327 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
328 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
329 if (ktime_before(expires, threshold)) {
331 * Cancel it while we handle it so there's no chance of
332 * interference with the timeout handler.
334 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
336 /* Nothing should be waiting on the timeout */
337 kvm_mips_callbacks->queue_timer_int(vcpu);
340 * Restart the timer if it was running based on the expiry time
341 * we read, so that we don't push it back 2 periods.
344 expires = ktime_add_ns(expires,
345 vcpu->arch.count_period);
346 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
355 * kvm_mips_read_count() - Read the current count value.
356 * @vcpu: Virtual CPU.
358 * Read the current guest CP0_Count value, taking into account whether the timer
361 * Returns: The current guest CP0_Count value.
363 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
365 struct mips_coproc *cop0 = vcpu->arch.cop0;
367 /* If count disabled just read static copy of count */
368 if (kvm_mips_count_disabled(vcpu))
369 return kvm_read_c0_guest_count(cop0);
371 return kvm_mips_read_count_running(vcpu, ktime_get());
375 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
376 * @vcpu: Virtual CPU.
377 * @count: Output pointer for CP0_Count value at point of freeze.
379 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
380 * at the point it was frozen. It is guaranteed that any pending interrupts at
381 * the point it was frozen are handled, and none after that point.
383 * This is useful where the time/CP0_Count is needed in the calculation of the
386 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
388 * Returns: The ktime at the point of freeze.
390 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
395 /* stop hrtimer before finding time */
396 hrtimer_cancel(&vcpu->arch.comparecount_timer);
399 /* find count at this point and handle pending hrtimer */
400 *count = kvm_mips_read_count_running(vcpu, now);
406 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
407 * @vcpu: Virtual CPU.
408 * @now: ktime at point of resume.
409 * @count: CP0_Count at point of resume.
411 * Resumes the timer and updates the timer expiry based on @now and @count.
412 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
413 * parameters need to be changed.
415 * It is guaranteed that a timer interrupt immediately after resume will be
416 * handled, but not if CP_Compare is exactly at @count. That case is already
417 * handled by kvm_mips_freeze_timer().
419 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
421 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
422 ktime_t now, uint32_t count)
424 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 /* Calculate timeout (wrap 0 to 2^32) */
430 compare = kvm_read_c0_guest_compare(cop0);
431 delta = (u64)(uint32_t)(compare - count - 1) + 1;
432 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
433 expire = ktime_add_ns(now, delta);
435 /* Update hrtimer to use new timeout */
436 hrtimer_cancel(&vcpu->arch.comparecount_timer);
437 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
441 * kvm_mips_write_count() - Modify the count and update timer.
442 * @vcpu: Virtual CPU.
443 * @count: Guest CP0_Count value to set.
445 * Sets the CP0_Count value and updates the timer accordingly.
447 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
449 struct mips_coproc *cop0 = vcpu->arch.cop0;
453 now = kvm_mips_count_time(vcpu);
454 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
456 if (kvm_mips_count_disabled(vcpu))
457 /* The timer's disabled, adjust the static count */
458 kvm_write_c0_guest_count(cop0, count);
461 kvm_mips_resume_hrtimer(vcpu, now, count);
465 * kvm_mips_init_count() - Initialise timer.
466 * @vcpu: Virtual CPU.
468 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
469 * it going if it's enabled.
471 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
474 vcpu->arch.count_hz = 100*1000*1000;
475 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
476 vcpu->arch.count_hz);
477 vcpu->arch.count_dyn_bias = 0;
480 kvm_mips_write_count(vcpu, 0);
484 * kvm_mips_set_count_hz() - Update the frequency of the timer.
485 * @vcpu: Virtual CPU.
486 * @count_hz: Frequency of CP0_Count timer in Hz.
488 * Change the frequency of the CP0_Count timer. This is done atomically so that
489 * CP0_Count is continuous and no timer interrupt is lost.
491 * Returns: -EINVAL if @count_hz is out of range.
494 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
496 struct mips_coproc *cop0 = vcpu->arch.cop0;
501 /* ensure the frequency is in a sensible range... */
502 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
504 /* ... and has actually changed */
505 if (vcpu->arch.count_hz == count_hz)
508 /* Safely freeze timer so we can keep it continuous */
509 dc = kvm_mips_count_disabled(vcpu);
511 now = kvm_mips_count_time(vcpu);
512 count = kvm_read_c0_guest_count(cop0);
514 now = kvm_mips_freeze_hrtimer(vcpu, &count);
517 /* Update the frequency */
518 vcpu->arch.count_hz = count_hz;
519 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
520 vcpu->arch.count_dyn_bias = 0;
522 /* Calculate adjusted bias so dynamic count is unchanged */
523 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
525 /* Update and resume hrtimer */
527 kvm_mips_resume_hrtimer(vcpu, now, count);
532 * kvm_mips_write_compare() - Modify compare and update timer.
533 * @vcpu: Virtual CPU.
534 * @compare: New CP0_Compare value.
535 * @ack: Whether to acknowledge timer interrupt.
537 * Update CP0_Compare to a new value and update the timeout.
538 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
539 * any pending timer interrupt is preserved.
541 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
543 struct mips_coproc *cop0 = vcpu->arch.cop0;
545 u32 old_compare = kvm_read_c0_guest_compare(cop0);
549 /* if unchanged, must just be an ack */
550 if (old_compare == compare) {
553 kvm_mips_callbacks->dequeue_timer_int(vcpu);
554 kvm_write_c0_guest_compare(cop0, compare);
558 /* freeze_hrtimer() takes care of timer interrupts <= count */
559 dc = kvm_mips_count_disabled(vcpu);
561 now = kvm_mips_freeze_hrtimer(vcpu, &count);
564 kvm_mips_callbacks->dequeue_timer_int(vcpu);
566 kvm_write_c0_guest_compare(cop0, compare);
568 /* resume_hrtimer() takes care of timer interrupts > count */
570 kvm_mips_resume_hrtimer(vcpu, now, count);
574 * kvm_mips_count_disable() - Disable count.
575 * @vcpu: Virtual CPU.
577 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
578 * time will be handled but not after.
580 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
581 * count_ctl.DC has been set (count disabled).
583 * Returns: The time that the timer was stopped.
585 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
587 struct mips_coproc *cop0 = vcpu->arch.cop0;
592 hrtimer_cancel(&vcpu->arch.comparecount_timer);
594 /* Set the static count from the dynamic count, handling pending TI */
596 count = kvm_mips_read_count_running(vcpu, now);
597 kvm_write_c0_guest_count(cop0, count);
603 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
604 * @vcpu: Virtual CPU.
606 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
607 * before the final stop time will be handled if the timer isn't disabled by
608 * count_ctl.DC, but not after.
610 * Assumes CP0_Cause.DC is clear (count enabled).
612 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
614 struct mips_coproc *cop0 = vcpu->arch.cop0;
616 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
617 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
618 kvm_mips_count_disable(vcpu);
622 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
623 * @vcpu: Virtual CPU.
625 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
626 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
627 * potentially before even returning, so the caller should be careful with
628 * ordering of CP0_Cause modifications so as not to lose it.
630 * Assumes CP0_Cause.DC is set (count disabled).
632 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
634 struct mips_coproc *cop0 = vcpu->arch.cop0;
637 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
640 * Set the dynamic count to match the static count.
641 * This starts the hrtimer if count_ctl.DC allows it.
642 * Otherwise it conveniently updates the biases.
644 count = kvm_read_c0_guest_count(cop0);
645 kvm_mips_write_count(vcpu, count);
649 * kvm_mips_set_count_ctl() - Update the count control KVM register.
650 * @vcpu: Virtual CPU.
651 * @count_ctl: Count control register new value.
653 * Set the count control KVM register. The timer is updated accordingly.
655 * Returns: -EINVAL if reserved bits are set.
658 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
660 struct mips_coproc *cop0 = vcpu->arch.cop0;
661 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
664 uint32_t count, compare;
666 /* Only allow defined bits to be changed */
667 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
670 /* Apply new value */
671 vcpu->arch.count_ctl = count_ctl;
673 /* Master CP0_Count disable */
674 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
675 /* Is CP0_Cause.DC already disabling CP0_Count? */
676 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
677 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
678 /* Just record the current time */
679 vcpu->arch.count_resume = ktime_get();
680 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
681 /* disable timer and record current time */
682 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
685 * Calculate timeout relative to static count at resume
686 * time (wrap 0 to 2^32).
688 count = kvm_read_c0_guest_count(cop0);
689 compare = kvm_read_c0_guest_compare(cop0);
690 delta = (u64)(uint32_t)(compare - count - 1) + 1;
691 delta = div_u64(delta * NSEC_PER_SEC,
692 vcpu->arch.count_hz);
693 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
695 /* Handle pending interrupt */
697 if (ktime_compare(now, expire) >= 0)
698 /* Nothing should be waiting on the timeout */
699 kvm_mips_callbacks->queue_timer_int(vcpu);
701 /* Resume hrtimer without changing bias */
702 count = kvm_mips_read_count_running(vcpu, now);
703 kvm_mips_resume_hrtimer(vcpu, now, count);
711 * kvm_mips_set_count_resume() - Update the count resume KVM register.
712 * @vcpu: Virtual CPU.
713 * @count_resume: Count resume register new value.
715 * Set the count resume KVM register.
717 * Returns: -EINVAL if out of valid range (0..now).
720 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
723 * It doesn't make sense for the resume time to be in the future, as it
724 * would be possible for the next interrupt to be more than a full
725 * period in the future.
727 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
730 vcpu->arch.count_resume = ns_to_ktime(count_resume);
735 * kvm_mips_count_timeout() - Push timer forward on timeout.
736 * @vcpu: Virtual CPU.
738 * Handle an hrtimer event by push the hrtimer forward a period.
740 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
742 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
744 /* Add the Count period to the current expiry time */
745 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
746 vcpu->arch.count_period);
747 return HRTIMER_RESTART;
750 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
752 struct mips_coproc *cop0 = vcpu->arch.cop0;
753 enum emulation_result er = EMULATE_DONE;
755 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
756 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
757 kvm_read_c0_guest_epc(cop0));
758 kvm_clear_c0_guest_status(cop0, ST0_EXL);
759 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
761 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
762 kvm_clear_c0_guest_status(cop0, ST0_ERL);
763 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
765 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
773 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
775 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
776 vcpu->arch.pending_exceptions);
778 ++vcpu->stat.wait_exits;
779 trace_kvm_exit(vcpu, WAIT_EXITS);
780 if (!vcpu->arch.pending_exceptions) {
782 kvm_vcpu_block(vcpu);
785 * We we are runnable, then definitely go off to user space to
786 * check if any I/O interrupts are pending.
788 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
789 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
790 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
798 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
799 * we can catch this, if things ever change
801 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
803 struct mips_coproc *cop0 = vcpu->arch.cop0;
804 uint32_t pc = vcpu->arch.pc;
806 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
810 /* Write Guest TLB Entry @ Index */
811 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
813 struct mips_coproc *cop0 = vcpu->arch.cop0;
814 int index = kvm_read_c0_guest_index(cop0);
815 struct kvm_mips_tlb *tlb = NULL;
816 uint32_t pc = vcpu->arch.pc;
818 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
819 kvm_debug("%s: illegal index: %d\n", __func__, index);
820 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
821 pc, index, kvm_read_c0_guest_entryhi(cop0),
822 kvm_read_c0_guest_entrylo0(cop0),
823 kvm_read_c0_guest_entrylo1(cop0),
824 kvm_read_c0_guest_pagemask(cop0));
825 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
828 tlb = &vcpu->arch.guest_tlb[index];
830 * Probe the shadow host TLB for the entry being overwritten, if one
831 * matches, invalidate it
833 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
835 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
836 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
837 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
838 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
840 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
841 pc, index, kvm_read_c0_guest_entryhi(cop0),
842 kvm_read_c0_guest_entrylo0(cop0),
843 kvm_read_c0_guest_entrylo1(cop0),
844 kvm_read_c0_guest_pagemask(cop0));
849 /* Write Guest TLB Entry @ Random Index */
850 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
852 struct mips_coproc *cop0 = vcpu->arch.cop0;
853 struct kvm_mips_tlb *tlb = NULL;
854 uint32_t pc = vcpu->arch.pc;
857 get_random_bytes(&index, sizeof(index));
858 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
860 tlb = &vcpu->arch.guest_tlb[index];
863 * Probe the shadow host TLB for the entry being overwritten, if one
864 * matches, invalidate it
866 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
868 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
869 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
870 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
871 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
873 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
874 pc, index, kvm_read_c0_guest_entryhi(cop0),
875 kvm_read_c0_guest_entrylo0(cop0),
876 kvm_read_c0_guest_entrylo1(cop0));
881 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
883 struct mips_coproc *cop0 = vcpu->arch.cop0;
884 long entryhi = kvm_read_c0_guest_entryhi(cop0);
885 uint32_t pc = vcpu->arch.pc;
888 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
890 kvm_write_c0_guest_index(cop0, index);
892 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
899 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
900 * @vcpu: Virtual CPU.
902 * Finds the mask of bits which are writable in the guest's Config1 CP0
903 * register, by userland (currently read-only to the guest).
905 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
907 unsigned int mask = 0;
909 /* Permit FPU to be present if FPU is supported */
910 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
911 mask |= MIPS_CONF1_FP;
917 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
918 * @vcpu: Virtual CPU.
920 * Finds the mask of bits which are writable in the guest's Config3 CP0
921 * register, by userland (currently read-only to the guest).
923 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
925 /* Config4 is optional */
926 unsigned int mask = MIPS_CONF_M;
928 /* Permit MSA to be present if MSA is supported */
929 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
930 mask |= MIPS_CONF3_MSA;
936 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
937 * @vcpu: Virtual CPU.
939 * Finds the mask of bits which are writable in the guest's Config4 CP0
940 * register, by userland (currently read-only to the guest).
942 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
944 /* Config5 is optional */
949 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
950 * @vcpu: Virtual CPU.
952 * Finds the mask of bits which are writable in the guest's Config5 CP0
953 * register, by the guest itself.
955 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
957 unsigned int mask = 0;
959 /* Permit MSAEn changes if MSA supported and enabled */
960 if (kvm_mips_guest_has_msa(&vcpu->arch))
961 mask |= MIPS_CONF5_MSAEN;
964 * Permit guest FPU mode changes if FPU is enabled and the relevant
965 * feature exists according to FIR register.
967 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
969 mask |= MIPS_CONF5_FRE;
970 /* We don't support UFR or UFE */
976 enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
977 uint32_t cause, struct kvm_run *run,
978 struct kvm_vcpu *vcpu)
980 struct mips_coproc *cop0 = vcpu->arch.cop0;
981 enum emulation_result er = EMULATE_DONE;
982 int32_t rt, rd, copz, sel, co_bit, op;
983 uint32_t pc = vcpu->arch.pc;
984 unsigned long curr_pc;
987 * Update PC and hold onto current PC in case there is
988 * an error and we want to rollback the PC
990 curr_pc = vcpu->arch.pc;
991 er = update_pc(vcpu, cause);
992 if (er == EMULATE_FAIL)
995 copz = (inst >> 21) & 0x1f;
996 rt = (inst >> 16) & 0x1f;
997 rd = (inst >> 11) & 0x1f;
999 co_bit = (inst >> 25) & 1;
1005 case tlbr_op: /* Read indexed TLB entry */
1006 er = kvm_mips_emul_tlbr(vcpu);
1008 case tlbwi_op: /* Write indexed */
1009 er = kvm_mips_emul_tlbwi(vcpu);
1011 case tlbwr_op: /* Write random */
1012 er = kvm_mips_emul_tlbwr(vcpu);
1014 case tlbp_op: /* TLB Probe */
1015 er = kvm_mips_emul_tlbp(vcpu);
1018 kvm_err("!!!COP0_RFE!!!\n");
1021 er = kvm_mips_emul_eret(vcpu);
1022 goto dont_update_pc;
1025 er = kvm_mips_emul_wait(vcpu);
1031 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1032 cop0->stat[rd][sel]++;
1035 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1036 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
1037 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1038 vcpu->arch.gprs[rt] = 0x0;
1039 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1040 kvm_mips_trans_mfc0(inst, opc, vcpu);
1043 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1045 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1046 kvm_mips_trans_mfc0(inst, opc, vcpu);
1051 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1052 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
1057 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1061 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1062 cop0->stat[rd][sel]++;
1064 if ((rd == MIPS_CP0_TLB_INDEX)
1065 && (vcpu->arch.gprs[rt] >=
1066 KVM_MIPS_GUEST_TLB_SIZE)) {
1067 kvm_err("Invalid TLB Index: %ld",
1068 vcpu->arch.gprs[rt]);
1072 #define C0_EBASE_CORE_MASK 0xff
1073 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1074 /* Preserve CORE number */
1075 kvm_change_c0_guest_ebase(cop0,
1076 ~(C0_EBASE_CORE_MASK),
1077 vcpu->arch.gprs[rt]);
1078 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1079 kvm_read_c0_guest_ebase(cop0));
1080 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1082 vcpu->arch.gprs[rt] & ASID_MASK;
1083 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1084 ((kvm_read_c0_guest_entryhi(cop0) &
1085 ASID_MASK) != nasid)) {
1086 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1087 kvm_read_c0_guest_entryhi(cop0)
1092 /* Blow away the shadow host TLBs */
1093 kvm_mips_flush_host_tlb(1);
1095 kvm_write_c0_guest_entryhi(cop0,
1096 vcpu->arch.gprs[rt]);
1098 /* Are we writing to COUNT */
1099 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1100 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1102 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1103 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1104 pc, kvm_read_c0_guest_compare(cop0),
1105 vcpu->arch.gprs[rt]);
1107 /* If we are writing to COMPARE */
1108 /* Clear pending timer interrupt, if any */
1109 kvm_mips_write_compare(vcpu,
1110 vcpu->arch.gprs[rt],
1112 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1113 unsigned int old_val, val, change;
1115 old_val = kvm_read_c0_guest_status(cop0);
1116 val = vcpu->arch.gprs[rt];
1117 change = val ^ old_val;
1119 /* Make sure that the NMI bit is never set */
1123 * Don't allow CU1 or FR to be set unless FPU
1124 * capability enabled and exists in guest
1127 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1128 val &= ~(ST0_CU1 | ST0_FR);
1131 * Also don't allow FR to be set if host doesn't
1134 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1138 /* Handle changes in FPU mode */
1142 * FPU and Vector register state is made
1143 * UNPREDICTABLE by a change of FR, so don't
1144 * even bother saving it.
1146 if (change & ST0_FR)
1150 * If MSA state is already live, it is undefined
1151 * how it interacts with FR=0 FPU state, and we
1152 * don't want to hit reserved instruction
1153 * exceptions trying to save the MSA state later
1154 * when CU=1 && FR=1, so play it safe and save
1157 if (change & ST0_CU1 && !(val & ST0_FR) &&
1158 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1162 * Propagate CU1 (FPU enable) changes
1163 * immediately if the FPU context is already
1164 * loaded. When disabling we leave the context
1165 * loaded so it can be quickly enabled again in
1168 if (change & ST0_CU1 &&
1169 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1170 change_c0_status(ST0_CU1, val);
1174 kvm_write_c0_guest_status(cop0, val);
1176 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1178 * If FPU present, we need CU1/FR bits to take
1179 * effect fairly soon.
1181 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1182 kvm_mips_trans_mtc0(inst, opc, vcpu);
1184 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1185 unsigned int old_val, val, change, wrmask;
1187 old_val = kvm_read_c0_guest_config5(cop0);
1188 val = vcpu->arch.gprs[rt];
1190 /* Only a few bits are writable in Config5 */
1191 wrmask = kvm_mips_config5_wrmask(vcpu);
1192 change = (val ^ old_val) & wrmask;
1193 val = old_val ^ change;
1196 /* Handle changes in FPU/MSA modes */
1200 * Propagate FRE changes immediately if the FPU
1201 * context is already loaded.
1203 if (change & MIPS_CONF5_FRE &&
1204 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1205 change_c0_config5(MIPS_CONF5_FRE, val);
1208 * Propagate MSAEn changes immediately if the
1209 * MSA context is already loaded. When disabling
1210 * we leave the context loaded so it can be
1211 * quickly enabled again in the near future.
1213 if (change & MIPS_CONF5_MSAEN &&
1214 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1215 change_c0_config5(MIPS_CONF5_MSAEN,
1220 kvm_write_c0_guest_config5(cop0, val);
1221 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1222 uint32_t old_cause, new_cause;
1224 old_cause = kvm_read_c0_guest_cause(cop0);
1225 new_cause = vcpu->arch.gprs[rt];
1226 /* Update R/W bits */
1227 kvm_change_c0_guest_cause(cop0, 0x08800300,
1229 /* DC bit enabling/disabling timer? */
1230 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1231 if (new_cause & CAUSEF_DC)
1232 kvm_mips_count_disable_cause(vcpu);
1234 kvm_mips_count_enable_cause(vcpu);
1237 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1238 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1239 kvm_mips_trans_mtc0(inst, opc, vcpu);
1243 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1244 rd, sel, cop0->reg[rd][sel]);
1248 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1249 vcpu->arch.pc, rt, rd, sel);
1254 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1255 cop0->stat[MIPS_CP0_STATUS][0]++;
1258 vcpu->arch.gprs[rt] =
1259 kvm_read_c0_guest_status(cop0);
1263 kvm_debug("[%#lx] mfmcz_op: EI\n",
1265 kvm_set_c0_guest_status(cop0, ST0_IE);
1267 kvm_debug("[%#lx] mfmcz_op: DI\n",
1269 kvm_clear_c0_guest_status(cop0, ST0_IE);
1277 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1279 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1281 * We don't support any shadow register sets, so
1282 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1288 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1289 vcpu->arch.gprs[rt]);
1290 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1294 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1295 vcpu->arch.pc, copz);
1302 /* Rollback PC only if emulation was unsuccessful */
1303 if (er == EMULATE_FAIL)
1304 vcpu->arch.pc = curr_pc;
1308 * This is for special instructions whose emulation
1309 * updates the PC, so do not overwrite the PC under
1316 enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1317 struct kvm_run *run,
1318 struct kvm_vcpu *vcpu)
1320 enum emulation_result er = EMULATE_DO_MMIO;
1321 int32_t op, base, rt, offset;
1323 void *data = run->mmio.data;
1324 unsigned long curr_pc;
1327 * Update PC and hold onto current PC in case there is
1328 * an error and we want to rollback the PC
1330 curr_pc = vcpu->arch.pc;
1331 er = update_pc(vcpu, cause);
1332 if (er == EMULATE_FAIL)
1335 rt = (inst >> 16) & 0x1f;
1336 base = (inst >> 21) & 0x1f;
1337 offset = inst & 0xffff;
1338 op = (inst >> 26) & 0x3f;
1343 if (bytes > sizeof(run->mmio.data)) {
1344 kvm_err("%s: bad MMIO length: %d\n", __func__,
1347 run->mmio.phys_addr =
1348 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1350 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1354 run->mmio.len = bytes;
1355 run->mmio.is_write = 1;
1356 vcpu->mmio_needed = 1;
1357 vcpu->mmio_is_write = 1;
1358 *(u8 *) data = vcpu->arch.gprs[rt];
1359 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1360 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1367 if (bytes > sizeof(run->mmio.data)) {
1368 kvm_err("%s: bad MMIO length: %d\n", __func__,
1371 run->mmio.phys_addr =
1372 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1374 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1379 run->mmio.len = bytes;
1380 run->mmio.is_write = 1;
1381 vcpu->mmio_needed = 1;
1382 vcpu->mmio_is_write = 1;
1383 *(uint32_t *) data = vcpu->arch.gprs[rt];
1385 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1386 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1387 vcpu->arch.gprs[rt], *(uint32_t *) data);
1392 if (bytes > sizeof(run->mmio.data)) {
1393 kvm_err("%s: bad MMIO length: %d\n", __func__,
1396 run->mmio.phys_addr =
1397 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1399 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1404 run->mmio.len = bytes;
1405 run->mmio.is_write = 1;
1406 vcpu->mmio_needed = 1;
1407 vcpu->mmio_is_write = 1;
1408 *(uint16_t *) data = vcpu->arch.gprs[rt];
1410 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1411 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1412 vcpu->arch.gprs[rt], *(uint32_t *) data);
1416 kvm_err("Store not yet supported");
1421 /* Rollback PC if emulation was unsuccessful */
1422 if (er == EMULATE_FAIL)
1423 vcpu->arch.pc = curr_pc;
1428 enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1429 struct kvm_run *run,
1430 struct kvm_vcpu *vcpu)
1432 enum emulation_result er = EMULATE_DO_MMIO;
1433 int32_t op, base, rt, offset;
1436 rt = (inst >> 16) & 0x1f;
1437 base = (inst >> 21) & 0x1f;
1438 offset = inst & 0xffff;
1439 op = (inst >> 26) & 0x3f;
1441 vcpu->arch.pending_load_cause = cause;
1442 vcpu->arch.io_gpr = rt;
1447 if (bytes > sizeof(run->mmio.data)) {
1448 kvm_err("%s: bad MMIO length: %d\n", __func__,
1453 run->mmio.phys_addr =
1454 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1456 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1461 run->mmio.len = bytes;
1462 run->mmio.is_write = 0;
1463 vcpu->mmio_needed = 1;
1464 vcpu->mmio_is_write = 0;
1470 if (bytes > sizeof(run->mmio.data)) {
1471 kvm_err("%s: bad MMIO length: %d\n", __func__,
1476 run->mmio.phys_addr =
1477 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1479 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1484 run->mmio.len = bytes;
1485 run->mmio.is_write = 0;
1486 vcpu->mmio_needed = 1;
1487 vcpu->mmio_is_write = 0;
1490 vcpu->mmio_needed = 2;
1492 vcpu->mmio_needed = 1;
1499 if (bytes > sizeof(run->mmio.data)) {
1500 kvm_err("%s: bad MMIO length: %d\n", __func__,
1505 run->mmio.phys_addr =
1506 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1508 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1513 run->mmio.len = bytes;
1514 run->mmio.is_write = 0;
1515 vcpu->mmio_is_write = 0;
1518 vcpu->mmio_needed = 2;
1520 vcpu->mmio_needed = 1;
1525 kvm_err("Load not yet supported");
1533 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1535 unsigned long offset = (va & ~PAGE_MASK);
1536 struct kvm *kvm = vcpu->kvm;
1541 gfn = va >> PAGE_SHIFT;
1543 if (gfn >= kvm->arch.guest_pmap_npages) {
1544 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1545 kvm_mips_dump_host_tlbs();
1546 kvm_arch_vcpu_dump_regs(vcpu);
1549 pfn = kvm->arch.guest_pmap[gfn];
1550 pa = (pfn << PAGE_SHIFT) | offset;
1552 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1555 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1559 #define MIPS_CACHE_OP_INDEX_INV 0x0
1560 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1561 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1562 #define MIPS_CACHE_OP_IMP 0x3
1563 #define MIPS_CACHE_OP_HIT_INV 0x4
1564 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
1565 #define MIPS_CACHE_OP_HIT_HB 0x6
1566 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
1568 #define MIPS_CACHE_ICACHE 0x0
1569 #define MIPS_CACHE_DCACHE 0x1
1570 #define MIPS_CACHE_SEC 0x3
1572 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1574 struct kvm_run *run,
1575 struct kvm_vcpu *vcpu)
1577 struct mips_coproc *cop0 = vcpu->arch.cop0;
1578 enum emulation_result er = EMULATE_DONE;
1579 int32_t offset, cache, op_inst, op, base;
1580 struct kvm_vcpu_arch *arch = &vcpu->arch;
1582 unsigned long curr_pc;
1585 * Update PC and hold onto current PC in case there is
1586 * an error and we want to rollback the PC
1588 curr_pc = vcpu->arch.pc;
1589 er = update_pc(vcpu, cause);
1590 if (er == EMULATE_FAIL)
1593 base = (inst >> 21) & 0x1f;
1594 op_inst = (inst >> 16) & 0x1f;
1595 offset = (int16_t)inst;
1596 cache = (inst >> 16) & 0x3;
1597 op = (inst >> 18) & 0x7;
1599 va = arch->gprs[base] + offset;
1601 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1602 cache, op, base, arch->gprs[base], offset);
1605 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1606 * invalidate the caches entirely by stepping through all the
1609 if (op == MIPS_CACHE_OP_INDEX_INV) {
1610 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1611 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1612 arch->gprs[base], offset);
1614 if (cache == MIPS_CACHE_DCACHE)
1616 else if (cache == MIPS_CACHE_ICACHE)
1619 kvm_err("%s: unsupported CACHE INDEX operation\n",
1621 return EMULATE_FAIL;
1624 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1625 kvm_mips_trans_cache_index(inst, opc, vcpu);
1631 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1632 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1633 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1634 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1635 __func__, va, vcpu, read_c0_entryhi());
1640 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1641 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1644 /* If an entry already exists then skip */
1645 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1649 * If address not in the guest TLB, then give the guest a fault,
1650 * the resulting handler will do the right thing
1652 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1653 (kvm_read_c0_guest_entryhi
1654 (cop0) & ASID_MASK));
1657 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1658 vcpu->arch.host_cp0_badvaddr = va;
1659 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1662 goto dont_update_pc;
1664 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1666 * Check if the entry is valid, if not then setup a TLB
1667 * invalid exception to the guest
1669 if (!TLB_IS_VALID(*tlb, va)) {
1670 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1673 goto dont_update_pc;
1676 * We fault an entry from the guest tlb to the
1679 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1681 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1682 __func__, va, index, vcpu,
1690 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1691 cache, op, base, arch->gprs[base], offset);
1694 goto dont_update_pc;
1699 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1700 if (cache == MIPS_CACHE_DCACHE
1701 && (op == MIPS_CACHE_OP_FILL_WB_INV
1702 || op == MIPS_CACHE_OP_HIT_INV)) {
1703 flush_dcache_line(va);
1705 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1707 * Replace the CACHE instruction, with a SYNCI, not the same,
1710 kvm_mips_trans_cache_va(inst, opc, vcpu);
1712 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1713 flush_dcache_line(va);
1714 flush_icache_line(va);
1716 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1717 /* Replace the CACHE instruction, with a SYNCI */
1718 kvm_mips_trans_cache_va(inst, opc, vcpu);
1721 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1722 cache, op, base, arch->gprs[base], offset);
1725 goto dont_update_pc;
1732 vcpu->arch.pc = curr_pc;
1737 enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1738 struct kvm_run *run,
1739 struct kvm_vcpu *vcpu)
1741 enum emulation_result er = EMULATE_DONE;
1744 /* Fetch the instruction. */
1745 if (cause & CAUSEF_BD)
1748 inst = kvm_get_inst(opc, vcpu);
1750 switch (((union mips_instruction)inst).r_format.opcode) {
1752 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1757 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1764 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1768 ++vcpu->stat.cache_exits;
1769 trace_kvm_exit(vcpu, CACHE_EXITS);
1770 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1774 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1776 kvm_arch_vcpu_dump_regs(vcpu);
1784 enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1786 struct kvm_run *run,
1787 struct kvm_vcpu *vcpu)
1789 struct mips_coproc *cop0 = vcpu->arch.cop0;
1790 struct kvm_vcpu_arch *arch = &vcpu->arch;
1791 enum emulation_result er = EMULATE_DONE;
1793 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1795 kvm_write_c0_guest_epc(cop0, arch->pc);
1796 kvm_set_c0_guest_status(cop0, ST0_EXL);
1798 if (cause & CAUSEF_BD)
1799 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1801 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1803 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1805 kvm_change_c0_guest_cause(cop0, (0xff),
1806 (T_SYSCALL << CAUSEB_EXCCODE));
1808 /* Set PC to the exception entry point */
1809 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1812 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1819 enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1821 struct kvm_run *run,
1822 struct kvm_vcpu *vcpu)
1824 struct mips_coproc *cop0 = vcpu->arch.cop0;
1825 struct kvm_vcpu_arch *arch = &vcpu->arch;
1826 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1827 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1829 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1831 kvm_write_c0_guest_epc(cop0, arch->pc);
1832 kvm_set_c0_guest_status(cop0, ST0_EXL);
1834 if (cause & CAUSEF_BD)
1835 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1837 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1839 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1842 /* set pc to the exception entry point */
1843 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1846 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1849 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1852 kvm_change_c0_guest_cause(cop0, (0xff),
1853 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1855 /* setup badvaddr, context and entryhi registers for the guest */
1856 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1857 /* XXXKYMA: is the context register used by linux??? */
1858 kvm_write_c0_guest_entryhi(cop0, entryhi);
1859 /* Blow away the shadow host TLBs */
1860 kvm_mips_flush_host_tlb(1);
1862 return EMULATE_DONE;
1865 enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1867 struct kvm_run *run,
1868 struct kvm_vcpu *vcpu)
1870 struct mips_coproc *cop0 = vcpu->arch.cop0;
1871 struct kvm_vcpu_arch *arch = &vcpu->arch;
1872 unsigned long entryhi =
1873 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1874 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1876 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1878 kvm_write_c0_guest_epc(cop0, arch->pc);
1879 kvm_set_c0_guest_status(cop0, ST0_EXL);
1881 if (cause & CAUSEF_BD)
1882 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1884 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1886 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1889 /* set pc to the exception entry point */
1890 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1893 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1895 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1898 kvm_change_c0_guest_cause(cop0, (0xff),
1899 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1901 /* setup badvaddr, context and entryhi registers for the guest */
1902 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1903 /* XXXKYMA: is the context register used by linux??? */
1904 kvm_write_c0_guest_entryhi(cop0, entryhi);
1905 /* Blow away the shadow host TLBs */
1906 kvm_mips_flush_host_tlb(1);
1908 return EMULATE_DONE;
1911 enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1913 struct kvm_run *run,
1914 struct kvm_vcpu *vcpu)
1916 struct mips_coproc *cop0 = vcpu->arch.cop0;
1917 struct kvm_vcpu_arch *arch = &vcpu->arch;
1918 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1919 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1921 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1923 kvm_write_c0_guest_epc(cop0, arch->pc);
1924 kvm_set_c0_guest_status(cop0, ST0_EXL);
1926 if (cause & CAUSEF_BD)
1927 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1929 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1931 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1934 /* Set PC to the exception entry point */
1935 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1937 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1939 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1942 kvm_change_c0_guest_cause(cop0, (0xff),
1943 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1945 /* setup badvaddr, context and entryhi registers for the guest */
1946 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1947 /* XXXKYMA: is the context register used by linux??? */
1948 kvm_write_c0_guest_entryhi(cop0, entryhi);
1949 /* Blow away the shadow host TLBs */
1950 kvm_mips_flush_host_tlb(1);
1952 return EMULATE_DONE;
1955 enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1957 struct kvm_run *run,
1958 struct kvm_vcpu *vcpu)
1960 struct mips_coproc *cop0 = vcpu->arch.cop0;
1961 struct kvm_vcpu_arch *arch = &vcpu->arch;
1962 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1963 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1965 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1967 kvm_write_c0_guest_epc(cop0, arch->pc);
1968 kvm_set_c0_guest_status(cop0, ST0_EXL);
1970 if (cause & CAUSEF_BD)
1971 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1973 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1975 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1978 /* Set PC to the exception entry point */
1979 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1981 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1983 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1986 kvm_change_c0_guest_cause(cop0, (0xff),
1987 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1989 /* setup badvaddr, context and entryhi registers for the guest */
1990 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1991 /* XXXKYMA: is the context register used by linux??? */
1992 kvm_write_c0_guest_entryhi(cop0, entryhi);
1993 /* Blow away the shadow host TLBs */
1994 kvm_mips_flush_host_tlb(1);
1996 return EMULATE_DONE;
1999 /* TLBMOD: store into address matching TLB with Dirty bit off */
2000 enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
2001 struct kvm_run *run,
2002 struct kvm_vcpu *vcpu)
2004 enum emulation_result er = EMULATE_DONE;
2006 struct mips_coproc *cop0 = vcpu->arch.cop0;
2007 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2008 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
2011 /* If address not in the guest TLB, then we are in trouble */
2012 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2014 /* XXXKYMA Invalidate and retry */
2015 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2016 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2018 kvm_mips_dump_guest_tlbs(vcpu);
2019 kvm_mips_dump_host_tlbs();
2020 return EMULATE_FAIL;
2024 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2028 enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
2030 struct kvm_run *run,
2031 struct kvm_vcpu *vcpu)
2033 struct mips_coproc *cop0 = vcpu->arch.cop0;
2034 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2035 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
2036 struct kvm_vcpu_arch *arch = &vcpu->arch;
2038 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2040 kvm_write_c0_guest_epc(cop0, arch->pc);
2041 kvm_set_c0_guest_status(cop0, ST0_EXL);
2043 if (cause & CAUSEF_BD)
2044 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2046 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2048 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2051 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2053 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2055 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2058 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
2060 /* setup badvaddr, context and entryhi registers for the guest */
2061 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2062 /* XXXKYMA: is the context register used by linux??? */
2063 kvm_write_c0_guest_entryhi(cop0, entryhi);
2064 /* Blow away the shadow host TLBs */
2065 kvm_mips_flush_host_tlb(1);
2067 return EMULATE_DONE;
2070 enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2072 struct kvm_run *run,
2073 struct kvm_vcpu *vcpu)
2075 struct mips_coproc *cop0 = vcpu->arch.cop0;
2076 struct kvm_vcpu_arch *arch = &vcpu->arch;
2078 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2080 kvm_write_c0_guest_epc(cop0, arch->pc);
2081 kvm_set_c0_guest_status(cop0, ST0_EXL);
2083 if (cause & CAUSEF_BD)
2084 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2086 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2090 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2092 kvm_change_c0_guest_cause(cop0, (0xff),
2093 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
2094 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2096 return EMULATE_DONE;
2099 enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2101 struct kvm_run *run,
2102 struct kvm_vcpu *vcpu)
2104 struct mips_coproc *cop0 = vcpu->arch.cop0;
2105 struct kvm_vcpu_arch *arch = &vcpu->arch;
2106 enum emulation_result er = EMULATE_DONE;
2108 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2110 kvm_write_c0_guest_epc(cop0, arch->pc);
2111 kvm_set_c0_guest_status(cop0, ST0_EXL);
2113 if (cause & CAUSEF_BD)
2114 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2116 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2118 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2120 kvm_change_c0_guest_cause(cop0, (0xff),
2121 (T_RES_INST << CAUSEB_EXCCODE));
2123 /* Set PC to the exception entry point */
2124 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2127 kvm_err("Trying to deliver RI when EXL is already set\n");
2134 enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2136 struct kvm_run *run,
2137 struct kvm_vcpu *vcpu)
2139 struct mips_coproc *cop0 = vcpu->arch.cop0;
2140 struct kvm_vcpu_arch *arch = &vcpu->arch;
2141 enum emulation_result er = EMULATE_DONE;
2143 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2145 kvm_write_c0_guest_epc(cop0, arch->pc);
2146 kvm_set_c0_guest_status(cop0, ST0_EXL);
2148 if (cause & CAUSEF_BD)
2149 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2151 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2153 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2155 kvm_change_c0_guest_cause(cop0, (0xff),
2156 (T_BREAK << CAUSEB_EXCCODE));
2158 /* Set PC to the exception entry point */
2159 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2162 kvm_err("Trying to deliver BP when EXL is already set\n");
2169 enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2171 struct kvm_run *run,
2172 struct kvm_vcpu *vcpu)
2174 struct mips_coproc *cop0 = vcpu->arch.cop0;
2175 struct kvm_vcpu_arch *arch = &vcpu->arch;
2176 enum emulation_result er = EMULATE_DONE;
2178 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2180 kvm_write_c0_guest_epc(cop0, arch->pc);
2181 kvm_set_c0_guest_status(cop0, ST0_EXL);
2183 if (cause & CAUSEF_BD)
2184 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2186 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2188 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2190 kvm_change_c0_guest_cause(cop0, (0xff),
2191 (T_TRAP << CAUSEB_EXCCODE));
2193 /* Set PC to the exception entry point */
2194 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2197 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2204 enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2206 struct kvm_run *run,
2207 struct kvm_vcpu *vcpu)
2209 struct mips_coproc *cop0 = vcpu->arch.cop0;
2210 struct kvm_vcpu_arch *arch = &vcpu->arch;
2211 enum emulation_result er = EMULATE_DONE;
2213 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2215 kvm_write_c0_guest_epc(cop0, arch->pc);
2216 kvm_set_c0_guest_status(cop0, ST0_EXL);
2218 if (cause & CAUSEF_BD)
2219 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2221 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2223 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2225 kvm_change_c0_guest_cause(cop0, (0xff),
2226 (T_MSAFPE << CAUSEB_EXCCODE));
2228 /* Set PC to the exception entry point */
2229 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2232 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2239 enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2241 struct kvm_run *run,
2242 struct kvm_vcpu *vcpu)
2244 struct mips_coproc *cop0 = vcpu->arch.cop0;
2245 struct kvm_vcpu_arch *arch = &vcpu->arch;
2246 enum emulation_result er = EMULATE_DONE;
2248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2250 kvm_write_c0_guest_epc(cop0, arch->pc);
2251 kvm_set_c0_guest_status(cop0, ST0_EXL);
2253 if (cause & CAUSEF_BD)
2254 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2256 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2258 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2260 kvm_change_c0_guest_cause(cop0, (0xff),
2261 (T_FPE << CAUSEB_EXCCODE));
2263 /* Set PC to the exception entry point */
2264 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2267 kvm_err("Trying to deliver FPE when EXL is already set\n");
2274 enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2276 struct kvm_run *run,
2277 struct kvm_vcpu *vcpu)
2279 struct mips_coproc *cop0 = vcpu->arch.cop0;
2280 struct kvm_vcpu_arch *arch = &vcpu->arch;
2281 enum emulation_result er = EMULATE_DONE;
2283 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2285 kvm_write_c0_guest_epc(cop0, arch->pc);
2286 kvm_set_c0_guest_status(cop0, ST0_EXL);
2288 if (cause & CAUSEF_BD)
2289 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2291 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2293 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2295 kvm_change_c0_guest_cause(cop0, (0xff),
2296 (T_MSADIS << CAUSEB_EXCCODE));
2298 /* Set PC to the exception entry point */
2299 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2302 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2309 /* ll/sc, rdhwr, sync emulation */
2311 #define OPCODE 0xfc000000
2312 #define BASE 0x03e00000
2313 #define RT 0x001f0000
2314 #define OFFSET 0x0000ffff
2315 #define LL 0xc0000000
2316 #define SC 0xe0000000
2317 #define SPEC0 0x00000000
2318 #define SPEC3 0x7c000000
2319 #define RD 0x0000f800
2320 #define FUNC 0x0000003f
2321 #define SYNC 0x0000000f
2322 #define RDHWR 0x0000003b
2324 enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2325 struct kvm_run *run,
2326 struct kvm_vcpu *vcpu)
2328 struct mips_coproc *cop0 = vcpu->arch.cop0;
2329 struct kvm_vcpu_arch *arch = &vcpu->arch;
2330 enum emulation_result er = EMULATE_DONE;
2331 unsigned long curr_pc;
2335 * Update PC and hold onto current PC in case there is
2336 * an error and we want to rollback the PC
2338 curr_pc = vcpu->arch.pc;
2339 er = update_pc(vcpu, cause);
2340 if (er == EMULATE_FAIL)
2343 /* Fetch the instruction. */
2344 if (cause & CAUSEF_BD)
2347 inst = kvm_get_inst(opc, vcpu);
2349 if (inst == KVM_INVALID_INST) {
2350 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2351 return EMULATE_FAIL;
2354 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2355 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2356 int rd = (inst & RD) >> 11;
2357 int rt = (inst & RT) >> 16;
2358 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2359 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2360 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2365 case 0: /* CPU number */
2368 case 1: /* SYNCI length */
2369 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2370 current_cpu_data.icache.linesz);
2372 case 2: /* Read count register */
2373 arch->gprs[rt] = kvm_mips_read_count(vcpu);
2375 case 3: /* Count register resolution */
2376 switch (current_cpu_data.cputype) {
2386 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2390 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2394 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2398 return EMULATE_DONE;
2402 * Rollback PC (if in branch delay slot then the PC already points to
2403 * branch target), and pass the RI exception to the guest OS.
2405 vcpu->arch.pc = curr_pc;
2406 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2409 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2410 struct kvm_run *run)
2412 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2413 enum emulation_result er = EMULATE_DONE;
2415 if (run->mmio.len > sizeof(*gpr)) {
2416 kvm_err("Bad MMIO length: %d", run->mmio.len);
2421 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2422 if (er == EMULATE_FAIL)
2425 switch (run->mmio.len) {
2427 *gpr = *(int32_t *) run->mmio.data;
2431 if (vcpu->mmio_needed == 2)
2432 *gpr = *(int16_t *) run->mmio.data;
2434 *gpr = *(uint16_t *)run->mmio.data;
2438 if (vcpu->mmio_needed == 2)
2439 *gpr = *(int8_t *) run->mmio.data;
2441 *gpr = *(u8 *) run->mmio.data;
2445 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2446 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2447 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2454 static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2456 struct kvm_run *run,
2457 struct kvm_vcpu *vcpu)
2459 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2460 struct mips_coproc *cop0 = vcpu->arch.cop0;
2461 struct kvm_vcpu_arch *arch = &vcpu->arch;
2462 enum emulation_result er = EMULATE_DONE;
2464 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2466 kvm_write_c0_guest_epc(cop0, arch->pc);
2467 kvm_set_c0_guest_status(cop0, ST0_EXL);
2469 if (cause & CAUSEF_BD)
2470 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2472 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2474 kvm_change_c0_guest_cause(cop0, (0xff),
2475 (exccode << CAUSEB_EXCCODE));
2477 /* Set PC to the exception entry point */
2478 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2479 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2481 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2482 exccode, kvm_read_c0_guest_epc(cop0),
2483 kvm_read_c0_guest_badvaddr(cop0));
2485 kvm_err("Trying to deliver EXC when EXL is already set\n");
2492 enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2494 struct kvm_run *run,
2495 struct kvm_vcpu *vcpu)
2497 enum emulation_result er = EMULATE_DONE;
2498 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2499 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2501 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2515 case T_COP_UNUSABLE:
2516 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2517 er = EMULATE_PRIV_FAIL;
2525 * We we are accessing Guest kernel space, then send an
2526 * address error exception to the guest
2528 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2529 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2532 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2533 er = EMULATE_PRIV_FAIL;
2539 * We we are accessing Guest kernel space, then send an
2540 * address error exception to the guest
2542 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2543 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2546 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2547 er = EMULATE_PRIV_FAIL;
2552 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2554 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2556 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2558 er = EMULATE_PRIV_FAIL;
2561 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2563 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2565 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2567 er = EMULATE_PRIV_FAIL;
2570 er = EMULATE_PRIV_FAIL;
2575 if (er == EMULATE_PRIV_FAIL)
2576 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2582 * User Address (UA) fault, this could happen if
2583 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2584 * case we pass on the fault to the guest kernel and let it handle it.
2585 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2586 * case we inject the TLB from the Guest TLB into the shadow host TLB
2588 enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2590 struct kvm_run *run,
2591 struct kvm_vcpu *vcpu)
2593 enum emulation_result er = EMULATE_DONE;
2594 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2595 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2598 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2599 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2602 * KVM would not have got the exception if this entry was valid in the
2603 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2604 * send the guest an exception. The guest exc handler should then inject
2605 * an entry into the guest TLB.
2607 index = kvm_mips_guest_tlb_lookup(vcpu,
2609 (kvm_read_c0_guest_entryhi
2610 (vcpu->arch.cop0) & ASID_MASK));
2612 if (exccode == T_TLB_LD_MISS) {
2613 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2614 } else if (exccode == T_TLB_ST_MISS) {
2615 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2617 kvm_err("%s: invalid exc code: %d\n", __func__,
2622 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2625 * Check if the entry is valid, if not then setup a TLB invalid
2626 * exception to the guest
2628 if (!TLB_IS_VALID(*tlb, va)) {
2629 if (exccode == T_TLB_LD_MISS) {
2630 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2632 } else if (exccode == T_TLB_ST_MISS) {
2633 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2636 kvm_err("%s: invalid exc code: %d\n", __func__,
2641 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2642 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2644 * OK we have a Guest TLB entry, now inject it into the
2647 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
2649 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2650 __func__, va, index, vcpu,