2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
54 /* Read the instruction */
55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
60 switch (insn.i_format.opcode) {
61 /* jr and jalr are in r_format format. */
63 switch (insn.r_format.func) {
65 arch->gprs[insn.r_format.rd] = epc + 8;
68 nextpc = arch->gprs[insn.r_format.rs];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn.i_format.rt) {
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
121 dspcontrol = rddsp(0x01);
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
132 /* These are unconditional and in j_format. */
134 arch->gprs[31] = instpc + 8;
139 epc |= (insn.j_format.target << 2);
143 /* These are conditional and in i_format. */
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 case blez_op: /* not really i_format */
166 /* rt field assumed to be zero */
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 /* rt field assumed to be zero */
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 /* And now the FPA/cp1 branch instructions. */
186 kvm_err("%s: unsupported cop1_op\n", __func__);
193 kvm_err("%s: unaligned epc\n", __func__);
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
211 vcpu->arch.pc = branch_pc;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
229 * 0 otherwise (in which case CP0_Count timer is running).
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
254 if (delta >= vcpu->arch.count_period) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258 /* Recalculate delta with new bias */
259 delta = now_ns + vcpu->arch.count_dyn_bias;
263 * We've ensured that:
264 * delta < count_period
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
283 * Returns: Effective monotonic ktime for CP0_Count.
285 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
301 * Returns: The current value of the guest CP0_Count register.
303 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 ktime_t expires, threshold;
307 uint32_t count, compare;
310 /* Calculate the biased and scaled guest CP0_Count */
311 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
312 compare = kvm_read_c0_guest_compare(cop0);
315 * Find whether CP0_Count has reached the closest timer interrupt. If
316 * not, we shouldn't inject it.
318 if ((int32_t)(count - compare) < 0)
322 * The CP0_Count we're going to return has already reached the closest
323 * timer interrupt. Quickly check if it really is a new interrupt by
324 * looking at whether the interval until the hrtimer expiry time is
325 * less than 1/4 of the timer period.
327 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
328 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
329 if (ktime_before(expires, threshold)) {
331 * Cancel it while we handle it so there's no chance of
332 * interference with the timeout handler.
334 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
336 /* Nothing should be waiting on the timeout */
337 kvm_mips_callbacks->queue_timer_int(vcpu);
340 * Restart the timer if it was running based on the expiry time
341 * we read, so that we don't push it back 2 periods.
344 expires = ktime_add_ns(expires,
345 vcpu->arch.count_period);
346 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
355 * kvm_mips_read_count() - Read the current count value.
356 * @vcpu: Virtual CPU.
358 * Read the current guest CP0_Count value, taking into account whether the timer
361 * Returns: The current guest CP0_Count value.
363 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
365 struct mips_coproc *cop0 = vcpu->arch.cop0;
367 /* If count disabled just read static copy of count */
368 if (kvm_mips_count_disabled(vcpu))
369 return kvm_read_c0_guest_count(cop0);
371 return kvm_mips_read_count_running(vcpu, ktime_get());
375 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
376 * @vcpu: Virtual CPU.
377 * @count: Output pointer for CP0_Count value at point of freeze.
379 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
380 * at the point it was frozen. It is guaranteed that any pending interrupts at
381 * the point it was frozen are handled, and none after that point.
383 * This is useful where the time/CP0_Count is needed in the calculation of the
386 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
388 * Returns: The ktime at the point of freeze.
390 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
395 /* stop hrtimer before finding time */
396 hrtimer_cancel(&vcpu->arch.comparecount_timer);
399 /* find count at this point and handle pending hrtimer */
400 *count = kvm_mips_read_count_running(vcpu, now);
406 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
407 * @vcpu: Virtual CPU.
408 * @now: ktime at point of resume.
409 * @count: CP0_Count at point of resume.
411 * Resumes the timer and updates the timer expiry based on @now and @count.
412 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
413 * parameters need to be changed.
415 * It is guaranteed that a timer interrupt immediately after resume will be
416 * handled, but not if CP_Compare is exactly at @count. That case is already
417 * handled by kvm_mips_freeze_timer().
419 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
421 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
422 ktime_t now, uint32_t count)
424 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 /* Calculate timeout (wrap 0 to 2^32) */
430 compare = kvm_read_c0_guest_compare(cop0);
431 delta = (u64)(uint32_t)(compare - count - 1) + 1;
432 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
433 expire = ktime_add_ns(now, delta);
435 /* Update hrtimer to use new timeout */
436 hrtimer_cancel(&vcpu->arch.comparecount_timer);
437 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
441 * kvm_mips_write_count() - Modify the count and update timer.
442 * @vcpu: Virtual CPU.
443 * @count: Guest CP0_Count value to set.
445 * Sets the CP0_Count value and updates the timer accordingly.
447 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
449 struct mips_coproc *cop0 = vcpu->arch.cop0;
453 now = kvm_mips_count_time(vcpu);
454 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
456 if (kvm_mips_count_disabled(vcpu))
457 /* The timer's disabled, adjust the static count */
458 kvm_write_c0_guest_count(cop0, count);
461 kvm_mips_resume_hrtimer(vcpu, now, count);
465 * kvm_mips_init_count() - Initialise timer.
466 * @vcpu: Virtual CPU.
468 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
469 * it going if it's enabled.
471 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
474 vcpu->arch.count_hz = 100*1000*1000;
475 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
476 vcpu->arch.count_hz);
477 vcpu->arch.count_dyn_bias = 0;
480 kvm_mips_write_count(vcpu, 0);
484 * kvm_mips_set_count_hz() - Update the frequency of the timer.
485 * @vcpu: Virtual CPU.
486 * @count_hz: Frequency of CP0_Count timer in Hz.
488 * Change the frequency of the CP0_Count timer. This is done atomically so that
489 * CP0_Count is continuous and no timer interrupt is lost.
491 * Returns: -EINVAL if @count_hz is out of range.
494 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
496 struct mips_coproc *cop0 = vcpu->arch.cop0;
501 /* ensure the frequency is in a sensible range... */
502 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
504 /* ... and has actually changed */
505 if (vcpu->arch.count_hz == count_hz)
508 /* Safely freeze timer so we can keep it continuous */
509 dc = kvm_mips_count_disabled(vcpu);
511 now = kvm_mips_count_time(vcpu);
512 count = kvm_read_c0_guest_count(cop0);
514 now = kvm_mips_freeze_hrtimer(vcpu, &count);
517 /* Update the frequency */
518 vcpu->arch.count_hz = count_hz;
519 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
520 vcpu->arch.count_dyn_bias = 0;
522 /* Calculate adjusted bias so dynamic count is unchanged */
523 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
525 /* Update and resume hrtimer */
527 kvm_mips_resume_hrtimer(vcpu, now, count);
532 * kvm_mips_write_compare() - Modify compare and update timer.
533 * @vcpu: Virtual CPU.
534 * @compare: New CP0_Compare value.
535 * @ack: Whether to acknowledge timer interrupt.
537 * Update CP0_Compare to a new value and update the timeout.
538 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
539 * any pending timer interrupt is preserved.
541 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
543 struct mips_coproc *cop0 = vcpu->arch.cop0;
545 u32 old_compare = kvm_read_c0_guest_compare(cop0);
549 /* if unchanged, must just be an ack */
550 if (old_compare == compare) {
553 kvm_mips_callbacks->dequeue_timer_int(vcpu);
554 kvm_write_c0_guest_compare(cop0, compare);
558 /* freeze_hrtimer() takes care of timer interrupts <= count */
559 dc = kvm_mips_count_disabled(vcpu);
561 now = kvm_mips_freeze_hrtimer(vcpu, &count);
564 kvm_mips_callbacks->dequeue_timer_int(vcpu);
566 kvm_write_c0_guest_compare(cop0, compare);
568 /* resume_hrtimer() takes care of timer interrupts > count */
570 kvm_mips_resume_hrtimer(vcpu, now, count);
574 * kvm_mips_count_disable() - Disable count.
575 * @vcpu: Virtual CPU.
577 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
578 * time will be handled but not after.
580 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
581 * count_ctl.DC has been set (count disabled).
583 * Returns: The time that the timer was stopped.
585 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
587 struct mips_coproc *cop0 = vcpu->arch.cop0;
592 hrtimer_cancel(&vcpu->arch.comparecount_timer);
594 /* Set the static count from the dynamic count, handling pending TI */
596 count = kvm_mips_read_count_running(vcpu, now);
597 kvm_write_c0_guest_count(cop0, count);
603 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
604 * @vcpu: Virtual CPU.
606 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
607 * before the final stop time will be handled if the timer isn't disabled by
608 * count_ctl.DC, but not after.
610 * Assumes CP0_Cause.DC is clear (count enabled).
612 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
614 struct mips_coproc *cop0 = vcpu->arch.cop0;
616 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
617 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
618 kvm_mips_count_disable(vcpu);
622 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
623 * @vcpu: Virtual CPU.
625 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
626 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
627 * potentially before even returning, so the caller should be careful with
628 * ordering of CP0_Cause modifications so as not to lose it.
630 * Assumes CP0_Cause.DC is set (count disabled).
632 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
634 struct mips_coproc *cop0 = vcpu->arch.cop0;
637 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
640 * Set the dynamic count to match the static count.
641 * This starts the hrtimer if count_ctl.DC allows it.
642 * Otherwise it conveniently updates the biases.
644 count = kvm_read_c0_guest_count(cop0);
645 kvm_mips_write_count(vcpu, count);
649 * kvm_mips_set_count_ctl() - Update the count control KVM register.
650 * @vcpu: Virtual CPU.
651 * @count_ctl: Count control register new value.
653 * Set the count control KVM register. The timer is updated accordingly.
655 * Returns: -EINVAL if reserved bits are set.
658 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
660 struct mips_coproc *cop0 = vcpu->arch.cop0;
661 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
664 uint32_t count, compare;
666 /* Only allow defined bits to be changed */
667 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
670 /* Apply new value */
671 vcpu->arch.count_ctl = count_ctl;
673 /* Master CP0_Count disable */
674 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
675 /* Is CP0_Cause.DC already disabling CP0_Count? */
676 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
677 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
678 /* Just record the current time */
679 vcpu->arch.count_resume = ktime_get();
680 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
681 /* disable timer and record current time */
682 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
685 * Calculate timeout relative to static count at resume
686 * time (wrap 0 to 2^32).
688 count = kvm_read_c0_guest_count(cop0);
689 compare = kvm_read_c0_guest_compare(cop0);
690 delta = (u64)(uint32_t)(compare - count - 1) + 1;
691 delta = div_u64(delta * NSEC_PER_SEC,
692 vcpu->arch.count_hz);
693 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
695 /* Handle pending interrupt */
697 if (ktime_compare(now, expire) >= 0)
698 /* Nothing should be waiting on the timeout */
699 kvm_mips_callbacks->queue_timer_int(vcpu);
701 /* Resume hrtimer without changing bias */
702 count = kvm_mips_read_count_running(vcpu, now);
703 kvm_mips_resume_hrtimer(vcpu, now, count);
711 * kvm_mips_set_count_resume() - Update the count resume KVM register.
712 * @vcpu: Virtual CPU.
713 * @count_resume: Count resume register new value.
715 * Set the count resume KVM register.
717 * Returns: -EINVAL if out of valid range (0..now).
720 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
723 * It doesn't make sense for the resume time to be in the future, as it
724 * would be possible for the next interrupt to be more than a full
725 * period in the future.
727 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
730 vcpu->arch.count_resume = ns_to_ktime(count_resume);
735 * kvm_mips_count_timeout() - Push timer forward on timeout.
736 * @vcpu: Virtual CPU.
738 * Handle an hrtimer event by push the hrtimer forward a period.
740 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
742 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
744 /* Add the Count period to the current expiry time */
745 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
746 vcpu->arch.count_period);
747 return HRTIMER_RESTART;
750 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
752 struct mips_coproc *cop0 = vcpu->arch.cop0;
753 enum emulation_result er = EMULATE_DONE;
755 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
756 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
757 kvm_read_c0_guest_epc(cop0));
758 kvm_clear_c0_guest_status(cop0, ST0_EXL);
759 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
761 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
762 kvm_clear_c0_guest_status(cop0, ST0_ERL);
763 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
765 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
773 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
775 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
776 vcpu->arch.pending_exceptions);
778 ++vcpu->stat.wait_exits;
779 trace_kvm_exit(vcpu, WAIT_EXITS);
780 if (!vcpu->arch.pending_exceptions) {
782 kvm_vcpu_block(vcpu);
785 * We we are runnable, then definitely go off to user space to
786 * check if any I/O interrupts are pending.
788 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
789 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
790 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
798 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
799 * we can catch this, if things ever change
801 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
803 struct mips_coproc *cop0 = vcpu->arch.cop0;
804 uint32_t pc = vcpu->arch.pc;
806 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
811 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
812 * @vcpu: VCPU with changed mappings.
813 * @tlb: TLB entry being removed.
815 * This is called to indicate a single change in guest MMU mappings, so that we
816 * can arrange TLB flushes on this and other CPUs.
818 static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
819 struct kvm_mips_tlb *tlb)
824 /* No need to flush for entries which are already invalid */
825 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
827 /* User address space doesn't need flushing for KSeg2/3 changes */
828 user = tlb->tlb_hi < KVM_GUEST_KSEG0;
833 * Probe the shadow host TLB for the entry being overwritten, if one
834 * matches, invalidate it
836 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
838 /* Invalidate the whole ASID on other CPUs */
839 cpu = smp_processor_id();
840 for_each_possible_cpu(i) {
844 vcpu->arch.guest_user_asid[i] = 0;
845 vcpu->arch.guest_kernel_asid[i] = 0;
851 /* Write Guest TLB Entry @ Index */
852 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
854 struct mips_coproc *cop0 = vcpu->arch.cop0;
855 int index = kvm_read_c0_guest_index(cop0);
856 struct kvm_mips_tlb *tlb = NULL;
857 uint32_t pc = vcpu->arch.pc;
859 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
860 kvm_debug("%s: illegal index: %d\n", __func__, index);
861 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
862 pc, index, kvm_read_c0_guest_entryhi(cop0),
863 kvm_read_c0_guest_entrylo0(cop0),
864 kvm_read_c0_guest_entrylo1(cop0),
865 kvm_read_c0_guest_pagemask(cop0));
866 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
869 tlb = &vcpu->arch.guest_tlb[index];
871 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
873 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
874 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
875 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
876 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
878 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
879 pc, index, kvm_read_c0_guest_entryhi(cop0),
880 kvm_read_c0_guest_entrylo0(cop0),
881 kvm_read_c0_guest_entrylo1(cop0),
882 kvm_read_c0_guest_pagemask(cop0));
887 /* Write Guest TLB Entry @ Random Index */
888 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
890 struct mips_coproc *cop0 = vcpu->arch.cop0;
891 struct kvm_mips_tlb *tlb = NULL;
892 uint32_t pc = vcpu->arch.pc;
895 get_random_bytes(&index, sizeof(index));
896 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
898 tlb = &vcpu->arch.guest_tlb[index];
900 kvm_mips_invalidate_guest_tlb(vcpu, tlb);
902 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
903 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
904 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
905 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
907 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
908 pc, index, kvm_read_c0_guest_entryhi(cop0),
909 kvm_read_c0_guest_entrylo0(cop0),
910 kvm_read_c0_guest_entrylo1(cop0));
915 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
917 struct mips_coproc *cop0 = vcpu->arch.cop0;
918 long entryhi = kvm_read_c0_guest_entryhi(cop0);
919 uint32_t pc = vcpu->arch.pc;
922 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
924 kvm_write_c0_guest_index(cop0, index);
926 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
933 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
934 * @vcpu: Virtual CPU.
936 * Finds the mask of bits which are writable in the guest's Config1 CP0
937 * register, by userland (currently read-only to the guest).
939 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
941 unsigned int mask = 0;
943 /* Permit FPU to be present if FPU is supported */
944 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
945 mask |= MIPS_CONF1_FP;
951 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
952 * @vcpu: Virtual CPU.
954 * Finds the mask of bits which are writable in the guest's Config3 CP0
955 * register, by userland (currently read-only to the guest).
957 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
959 /* Config4 is optional */
960 unsigned int mask = MIPS_CONF_M;
962 /* Permit MSA to be present if MSA is supported */
963 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
964 mask |= MIPS_CONF3_MSA;
970 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
971 * @vcpu: Virtual CPU.
973 * Finds the mask of bits which are writable in the guest's Config4 CP0
974 * register, by userland (currently read-only to the guest).
976 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
978 /* Config5 is optional */
983 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
984 * @vcpu: Virtual CPU.
986 * Finds the mask of bits which are writable in the guest's Config5 CP0
987 * register, by the guest itself.
989 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
991 unsigned int mask = 0;
993 /* Permit MSAEn changes if MSA supported and enabled */
994 if (kvm_mips_guest_has_msa(&vcpu->arch))
995 mask |= MIPS_CONF5_MSAEN;
998 * Permit guest FPU mode changes if FPU is enabled and the relevant
999 * feature exists according to FIR register.
1001 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1003 mask |= MIPS_CONF5_FRE;
1004 /* We don't support UFR or UFE */
1010 enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
1011 uint32_t cause, struct kvm_run *run,
1012 struct kvm_vcpu *vcpu)
1014 struct mips_coproc *cop0 = vcpu->arch.cop0;
1015 enum emulation_result er = EMULATE_DONE;
1016 int32_t rt, rd, copz, sel, co_bit, op;
1017 uint32_t pc = vcpu->arch.pc;
1018 unsigned long curr_pc;
1022 * Update PC and hold onto current PC in case there is
1023 * an error and we want to rollback the PC
1025 curr_pc = vcpu->arch.pc;
1026 er = update_pc(vcpu, cause);
1027 if (er == EMULATE_FAIL)
1030 copz = (inst >> 21) & 0x1f;
1031 rt = (inst >> 16) & 0x1f;
1032 rd = (inst >> 11) & 0x1f;
1034 co_bit = (inst >> 25) & 1;
1040 case tlbr_op: /* Read indexed TLB entry */
1041 er = kvm_mips_emul_tlbr(vcpu);
1043 case tlbwi_op: /* Write indexed */
1044 er = kvm_mips_emul_tlbwi(vcpu);
1046 case tlbwr_op: /* Write random */
1047 er = kvm_mips_emul_tlbwr(vcpu);
1049 case tlbp_op: /* TLB Probe */
1050 er = kvm_mips_emul_tlbp(vcpu);
1053 kvm_err("!!!COP0_RFE!!!\n");
1056 er = kvm_mips_emul_eret(vcpu);
1057 goto dont_update_pc;
1060 er = kvm_mips_emul_wait(vcpu);
1066 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1067 cop0->stat[rd][sel]++;
1070 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1071 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
1072 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1073 vcpu->arch.gprs[rt] = 0x0;
1074 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1075 kvm_mips_trans_mfc0(inst, opc, vcpu);
1078 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1080 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1081 kvm_mips_trans_mfc0(inst, opc, vcpu);
1086 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1087 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
1092 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1096 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1097 cop0->stat[rd][sel]++;
1099 if ((rd == MIPS_CP0_TLB_INDEX)
1100 && (vcpu->arch.gprs[rt] >=
1101 KVM_MIPS_GUEST_TLB_SIZE)) {
1102 kvm_err("Invalid TLB Index: %ld",
1103 vcpu->arch.gprs[rt]);
1107 #define C0_EBASE_CORE_MASK 0xff
1108 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1109 /* Preserve CORE number */
1110 kvm_change_c0_guest_ebase(cop0,
1111 ~(C0_EBASE_CORE_MASK),
1112 vcpu->arch.gprs[rt]);
1113 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1114 kvm_read_c0_guest_ebase(cop0));
1115 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1117 vcpu->arch.gprs[rt] & ASID_MASK;
1118 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1119 ((kvm_read_c0_guest_entryhi(cop0) &
1120 ASID_MASK) != nasid)) {
1121 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1122 kvm_read_c0_guest_entryhi(cop0)
1128 /* Blow away the shadow host TLBs */
1129 kvm_mips_flush_host_tlb(1);
1130 cpu = smp_processor_id();
1131 for_each_possible_cpu(i)
1133 vcpu->arch.guest_user_asid[i] = 0;
1134 vcpu->arch.guest_kernel_asid[i] = 0;
1138 kvm_write_c0_guest_entryhi(cop0,
1139 vcpu->arch.gprs[rt]);
1141 /* Are we writing to COUNT */
1142 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1143 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1145 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1146 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1147 pc, kvm_read_c0_guest_compare(cop0),
1148 vcpu->arch.gprs[rt]);
1150 /* If we are writing to COMPARE */
1151 /* Clear pending timer interrupt, if any */
1152 kvm_mips_write_compare(vcpu,
1153 vcpu->arch.gprs[rt],
1155 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1156 unsigned int old_val, val, change;
1158 old_val = kvm_read_c0_guest_status(cop0);
1159 val = vcpu->arch.gprs[rt];
1160 change = val ^ old_val;
1162 /* Make sure that the NMI bit is never set */
1166 * Don't allow CU1 or FR to be set unless FPU
1167 * capability enabled and exists in guest
1170 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1171 val &= ~(ST0_CU1 | ST0_FR);
1174 * Also don't allow FR to be set if host doesn't
1177 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1181 /* Handle changes in FPU mode */
1185 * FPU and Vector register state is made
1186 * UNPREDICTABLE by a change of FR, so don't
1187 * even bother saving it.
1189 if (change & ST0_FR)
1193 * If MSA state is already live, it is undefined
1194 * how it interacts with FR=0 FPU state, and we
1195 * don't want to hit reserved instruction
1196 * exceptions trying to save the MSA state later
1197 * when CU=1 && FR=1, so play it safe and save
1200 if (change & ST0_CU1 && !(val & ST0_FR) &&
1201 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1205 * Propagate CU1 (FPU enable) changes
1206 * immediately if the FPU context is already
1207 * loaded. When disabling we leave the context
1208 * loaded so it can be quickly enabled again in
1211 if (change & ST0_CU1 &&
1212 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1213 change_c0_status(ST0_CU1, val);
1217 kvm_write_c0_guest_status(cop0, val);
1219 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1221 * If FPU present, we need CU1/FR bits to take
1222 * effect fairly soon.
1224 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1225 kvm_mips_trans_mtc0(inst, opc, vcpu);
1227 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1228 unsigned int old_val, val, change, wrmask;
1230 old_val = kvm_read_c0_guest_config5(cop0);
1231 val = vcpu->arch.gprs[rt];
1233 /* Only a few bits are writable in Config5 */
1234 wrmask = kvm_mips_config5_wrmask(vcpu);
1235 change = (val ^ old_val) & wrmask;
1236 val = old_val ^ change;
1239 /* Handle changes in FPU/MSA modes */
1243 * Propagate FRE changes immediately if the FPU
1244 * context is already loaded.
1246 if (change & MIPS_CONF5_FRE &&
1247 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1248 change_c0_config5(MIPS_CONF5_FRE, val);
1251 * Propagate MSAEn changes immediately if the
1252 * MSA context is already loaded. When disabling
1253 * we leave the context loaded so it can be
1254 * quickly enabled again in the near future.
1256 if (change & MIPS_CONF5_MSAEN &&
1257 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
1258 change_c0_config5(MIPS_CONF5_MSAEN,
1263 kvm_write_c0_guest_config5(cop0, val);
1264 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1265 uint32_t old_cause, new_cause;
1267 old_cause = kvm_read_c0_guest_cause(cop0);
1268 new_cause = vcpu->arch.gprs[rt];
1269 /* Update R/W bits */
1270 kvm_change_c0_guest_cause(cop0, 0x08800300,
1272 /* DC bit enabling/disabling timer? */
1273 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1274 if (new_cause & CAUSEF_DC)
1275 kvm_mips_count_disable_cause(vcpu);
1277 kvm_mips_count_enable_cause(vcpu);
1280 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1281 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1282 kvm_mips_trans_mtc0(inst, opc, vcpu);
1286 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1287 rd, sel, cop0->reg[rd][sel]);
1291 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1292 vcpu->arch.pc, rt, rd, sel);
1297 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1298 cop0->stat[MIPS_CP0_STATUS][0]++;
1301 vcpu->arch.gprs[rt] =
1302 kvm_read_c0_guest_status(cop0);
1306 kvm_debug("[%#lx] mfmcz_op: EI\n",
1308 kvm_set_c0_guest_status(cop0, ST0_IE);
1310 kvm_debug("[%#lx] mfmcz_op: DI\n",
1312 kvm_clear_c0_guest_status(cop0, ST0_IE);
1320 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1322 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1324 * We don't support any shadow register sets, so
1325 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1331 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1332 vcpu->arch.gprs[rt]);
1333 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1337 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1338 vcpu->arch.pc, copz);
1345 /* Rollback PC only if emulation was unsuccessful */
1346 if (er == EMULATE_FAIL)
1347 vcpu->arch.pc = curr_pc;
1351 * This is for special instructions whose emulation
1352 * updates the PC, so do not overwrite the PC under
1359 enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1360 struct kvm_run *run,
1361 struct kvm_vcpu *vcpu)
1363 enum emulation_result er = EMULATE_DO_MMIO;
1364 int32_t op, base, rt, offset;
1366 void *data = run->mmio.data;
1367 unsigned long curr_pc;
1370 * Update PC and hold onto current PC in case there is
1371 * an error and we want to rollback the PC
1373 curr_pc = vcpu->arch.pc;
1374 er = update_pc(vcpu, cause);
1375 if (er == EMULATE_FAIL)
1378 rt = (inst >> 16) & 0x1f;
1379 base = (inst >> 21) & 0x1f;
1380 offset = inst & 0xffff;
1381 op = (inst >> 26) & 0x3f;
1386 if (bytes > sizeof(run->mmio.data)) {
1387 kvm_err("%s: bad MMIO length: %d\n", __func__,
1390 run->mmio.phys_addr =
1391 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1393 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1397 run->mmio.len = bytes;
1398 run->mmio.is_write = 1;
1399 vcpu->mmio_needed = 1;
1400 vcpu->mmio_is_write = 1;
1401 *(u8 *) data = vcpu->arch.gprs[rt];
1402 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1403 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1410 if (bytes > sizeof(run->mmio.data)) {
1411 kvm_err("%s: bad MMIO length: %d\n", __func__,
1414 run->mmio.phys_addr =
1415 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1417 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1422 run->mmio.len = bytes;
1423 run->mmio.is_write = 1;
1424 vcpu->mmio_needed = 1;
1425 vcpu->mmio_is_write = 1;
1426 *(uint32_t *) data = vcpu->arch.gprs[rt];
1428 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1429 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1430 vcpu->arch.gprs[rt], *(uint32_t *) data);
1435 if (bytes > sizeof(run->mmio.data)) {
1436 kvm_err("%s: bad MMIO length: %d\n", __func__,
1439 run->mmio.phys_addr =
1440 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1442 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1447 run->mmio.len = bytes;
1448 run->mmio.is_write = 1;
1449 vcpu->mmio_needed = 1;
1450 vcpu->mmio_is_write = 1;
1451 *(uint16_t *) data = vcpu->arch.gprs[rt];
1453 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1454 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1455 vcpu->arch.gprs[rt], *(uint32_t *) data);
1459 kvm_err("Store not yet supported");
1464 /* Rollback PC if emulation was unsuccessful */
1465 if (er == EMULATE_FAIL)
1466 vcpu->arch.pc = curr_pc;
1471 enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1472 struct kvm_run *run,
1473 struct kvm_vcpu *vcpu)
1475 enum emulation_result er = EMULATE_DO_MMIO;
1476 int32_t op, base, rt, offset;
1479 rt = (inst >> 16) & 0x1f;
1480 base = (inst >> 21) & 0x1f;
1481 offset = inst & 0xffff;
1482 op = (inst >> 26) & 0x3f;
1484 vcpu->arch.pending_load_cause = cause;
1485 vcpu->arch.io_gpr = rt;
1490 if (bytes > sizeof(run->mmio.data)) {
1491 kvm_err("%s: bad MMIO length: %d\n", __func__,
1496 run->mmio.phys_addr =
1497 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1499 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1504 run->mmio.len = bytes;
1505 run->mmio.is_write = 0;
1506 vcpu->mmio_needed = 1;
1507 vcpu->mmio_is_write = 0;
1513 if (bytes > sizeof(run->mmio.data)) {
1514 kvm_err("%s: bad MMIO length: %d\n", __func__,
1519 run->mmio.phys_addr =
1520 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1522 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1527 run->mmio.len = bytes;
1528 run->mmio.is_write = 0;
1529 vcpu->mmio_needed = 1;
1530 vcpu->mmio_is_write = 0;
1533 vcpu->mmio_needed = 2;
1535 vcpu->mmio_needed = 1;
1542 if (bytes > sizeof(run->mmio.data)) {
1543 kvm_err("%s: bad MMIO length: %d\n", __func__,
1548 run->mmio.phys_addr =
1549 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1551 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1556 run->mmio.len = bytes;
1557 run->mmio.is_write = 0;
1558 vcpu->mmio_is_write = 0;
1561 vcpu->mmio_needed = 2;
1563 vcpu->mmio_needed = 1;
1568 kvm_err("Load not yet supported");
1576 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1578 unsigned long offset = (va & ~PAGE_MASK);
1579 struct kvm *kvm = vcpu->kvm;
1584 gfn = va >> PAGE_SHIFT;
1586 if (gfn >= kvm->arch.guest_pmap_npages) {
1587 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1588 kvm_mips_dump_host_tlbs();
1589 kvm_arch_vcpu_dump_regs(vcpu);
1592 pfn = kvm->arch.guest_pmap[gfn];
1593 pa = (pfn << PAGE_SHIFT) | offset;
1595 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1598 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1602 #define MIPS_CACHE_OP_INDEX_INV 0x0
1603 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1604 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1605 #define MIPS_CACHE_OP_IMP 0x3
1606 #define MIPS_CACHE_OP_HIT_INV 0x4
1607 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
1608 #define MIPS_CACHE_OP_HIT_HB 0x6
1609 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
1611 #define MIPS_CACHE_ICACHE 0x0
1612 #define MIPS_CACHE_DCACHE 0x1
1613 #define MIPS_CACHE_SEC 0x3
1615 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1617 struct kvm_run *run,
1618 struct kvm_vcpu *vcpu)
1620 struct mips_coproc *cop0 = vcpu->arch.cop0;
1621 enum emulation_result er = EMULATE_DONE;
1622 int32_t offset, cache, op_inst, op, base;
1623 struct kvm_vcpu_arch *arch = &vcpu->arch;
1625 unsigned long curr_pc;
1628 * Update PC and hold onto current PC in case there is
1629 * an error and we want to rollback the PC
1631 curr_pc = vcpu->arch.pc;
1632 er = update_pc(vcpu, cause);
1633 if (er == EMULATE_FAIL)
1636 base = (inst >> 21) & 0x1f;
1637 op_inst = (inst >> 16) & 0x1f;
1638 offset = (int16_t)inst;
1639 cache = (inst >> 16) & 0x3;
1640 op = (inst >> 18) & 0x7;
1642 va = arch->gprs[base] + offset;
1644 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1645 cache, op, base, arch->gprs[base], offset);
1648 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1649 * invalidate the caches entirely by stepping through all the
1652 if (op == MIPS_CACHE_OP_INDEX_INV) {
1653 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1654 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1655 arch->gprs[base], offset);
1657 if (cache == MIPS_CACHE_DCACHE)
1659 else if (cache == MIPS_CACHE_ICACHE)
1662 kvm_err("%s: unsupported CACHE INDEX operation\n",
1664 return EMULATE_FAIL;
1667 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1668 kvm_mips_trans_cache_index(inst, opc, vcpu);
1674 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1675 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1676 kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1677 kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1678 __func__, va, vcpu, read_c0_entryhi());
1683 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1684 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1687 /* If an entry already exists then skip */
1688 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1692 * If address not in the guest TLB, then give the guest a fault,
1693 * the resulting handler will do the right thing
1695 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1696 (kvm_read_c0_guest_entryhi
1697 (cop0) & ASID_MASK));
1700 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1701 vcpu->arch.host_cp0_badvaddr = va;
1702 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1705 goto dont_update_pc;
1707 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1709 * Check if the entry is valid, if not then setup a TLB
1710 * invalid exception to the guest
1712 if (!TLB_IS_VALID(*tlb, va)) {
1713 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1716 goto dont_update_pc;
1719 * We fault an entry from the guest tlb to the
1722 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1724 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1725 __func__, va, index, vcpu,
1733 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1734 cache, op, base, arch->gprs[base], offset);
1737 goto dont_update_pc;
1742 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1743 if (cache == MIPS_CACHE_DCACHE
1744 && (op == MIPS_CACHE_OP_FILL_WB_INV
1745 || op == MIPS_CACHE_OP_HIT_INV)) {
1746 flush_dcache_line(va);
1748 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1750 * Replace the CACHE instruction, with a SYNCI, not the same,
1753 kvm_mips_trans_cache_va(inst, opc, vcpu);
1755 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1756 flush_dcache_line(va);
1757 flush_icache_line(va);
1759 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1760 /* Replace the CACHE instruction, with a SYNCI */
1761 kvm_mips_trans_cache_va(inst, opc, vcpu);
1764 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1765 cache, op, base, arch->gprs[base], offset);
1768 goto dont_update_pc;
1775 vcpu->arch.pc = curr_pc;
1780 enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1781 struct kvm_run *run,
1782 struct kvm_vcpu *vcpu)
1784 enum emulation_result er = EMULATE_DONE;
1787 /* Fetch the instruction. */
1788 if (cause & CAUSEF_BD)
1791 inst = kvm_get_inst(opc, vcpu);
1793 switch (((union mips_instruction)inst).r_format.opcode) {
1795 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1800 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1807 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1811 ++vcpu->stat.cache_exits;
1812 trace_kvm_exit(vcpu, CACHE_EXITS);
1813 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1817 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1819 kvm_arch_vcpu_dump_regs(vcpu);
1827 enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1829 struct kvm_run *run,
1830 struct kvm_vcpu *vcpu)
1832 struct mips_coproc *cop0 = vcpu->arch.cop0;
1833 struct kvm_vcpu_arch *arch = &vcpu->arch;
1834 enum emulation_result er = EMULATE_DONE;
1836 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1838 kvm_write_c0_guest_epc(cop0, arch->pc);
1839 kvm_set_c0_guest_status(cop0, ST0_EXL);
1841 if (cause & CAUSEF_BD)
1842 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1844 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1846 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1848 kvm_change_c0_guest_cause(cop0, (0xff),
1849 (T_SYSCALL << CAUSEB_EXCCODE));
1851 /* Set PC to the exception entry point */
1852 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1855 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1862 enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1864 struct kvm_run *run,
1865 struct kvm_vcpu *vcpu)
1867 struct mips_coproc *cop0 = vcpu->arch.cop0;
1868 struct kvm_vcpu_arch *arch = &vcpu->arch;
1869 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1870 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1872 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1874 kvm_write_c0_guest_epc(cop0, arch->pc);
1875 kvm_set_c0_guest_status(cop0, ST0_EXL);
1877 if (cause & CAUSEF_BD)
1878 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1880 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1882 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1885 /* set pc to the exception entry point */
1886 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1889 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1892 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1895 kvm_change_c0_guest_cause(cop0, (0xff),
1896 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1898 /* setup badvaddr, context and entryhi registers for the guest */
1899 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1900 /* XXXKYMA: is the context register used by linux??? */
1901 kvm_write_c0_guest_entryhi(cop0, entryhi);
1902 /* Blow away the shadow host TLBs */
1903 kvm_mips_flush_host_tlb(1);
1905 return EMULATE_DONE;
1908 enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1910 struct kvm_run *run,
1911 struct kvm_vcpu *vcpu)
1913 struct mips_coproc *cop0 = vcpu->arch.cop0;
1914 struct kvm_vcpu_arch *arch = &vcpu->arch;
1915 unsigned long entryhi =
1916 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1917 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1919 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1921 kvm_write_c0_guest_epc(cop0, arch->pc);
1922 kvm_set_c0_guest_status(cop0, ST0_EXL);
1924 if (cause & CAUSEF_BD)
1925 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1927 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1929 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1932 /* set pc to the exception entry point */
1933 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1936 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1938 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1941 kvm_change_c0_guest_cause(cop0, (0xff),
1942 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1944 /* setup badvaddr, context and entryhi registers for the guest */
1945 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1946 /* XXXKYMA: is the context register used by linux??? */
1947 kvm_write_c0_guest_entryhi(cop0, entryhi);
1948 /* Blow away the shadow host TLBs */
1949 kvm_mips_flush_host_tlb(1);
1951 return EMULATE_DONE;
1954 enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1956 struct kvm_run *run,
1957 struct kvm_vcpu *vcpu)
1959 struct mips_coproc *cop0 = vcpu->arch.cop0;
1960 struct kvm_vcpu_arch *arch = &vcpu->arch;
1961 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1962 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1964 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1966 kvm_write_c0_guest_epc(cop0, arch->pc);
1967 kvm_set_c0_guest_status(cop0, ST0_EXL);
1969 if (cause & CAUSEF_BD)
1970 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1972 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1974 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1977 /* Set PC to the exception entry point */
1978 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1980 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1982 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1985 kvm_change_c0_guest_cause(cop0, (0xff),
1986 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1988 /* setup badvaddr, context and entryhi registers for the guest */
1989 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1990 /* XXXKYMA: is the context register used by linux??? */
1991 kvm_write_c0_guest_entryhi(cop0, entryhi);
1992 /* Blow away the shadow host TLBs */
1993 kvm_mips_flush_host_tlb(1);
1995 return EMULATE_DONE;
1998 enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
2000 struct kvm_run *run,
2001 struct kvm_vcpu *vcpu)
2003 struct mips_coproc *cop0 = vcpu->arch.cop0;
2004 struct kvm_vcpu_arch *arch = &vcpu->arch;
2005 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2006 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
2008 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2010 kvm_write_c0_guest_epc(cop0, arch->pc);
2011 kvm_set_c0_guest_status(cop0, ST0_EXL);
2013 if (cause & CAUSEF_BD)
2014 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2016 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2018 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2021 /* Set PC to the exception entry point */
2022 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2024 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2026 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2029 kvm_change_c0_guest_cause(cop0, (0xff),
2030 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
2032 /* setup badvaddr, context and entryhi registers for the guest */
2033 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2034 /* XXXKYMA: is the context register used by linux??? */
2035 kvm_write_c0_guest_entryhi(cop0, entryhi);
2036 /* Blow away the shadow host TLBs */
2037 kvm_mips_flush_host_tlb(1);
2039 return EMULATE_DONE;
2042 /* TLBMOD: store into address matching TLB with Dirty bit off */
2043 enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
2044 struct kvm_run *run,
2045 struct kvm_vcpu *vcpu)
2047 enum emulation_result er = EMULATE_DONE;
2049 struct mips_coproc *cop0 = vcpu->arch.cop0;
2050 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2051 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
2054 /* If address not in the guest TLB, then we are in trouble */
2055 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2057 /* XXXKYMA Invalidate and retry */
2058 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2059 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2061 kvm_mips_dump_guest_tlbs(vcpu);
2062 kvm_mips_dump_host_tlbs();
2063 return EMULATE_FAIL;
2067 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2071 enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
2073 struct kvm_run *run,
2074 struct kvm_vcpu *vcpu)
2076 struct mips_coproc *cop0 = vcpu->arch.cop0;
2077 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2078 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
2079 struct kvm_vcpu_arch *arch = &vcpu->arch;
2081 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2083 kvm_write_c0_guest_epc(cop0, arch->pc);
2084 kvm_set_c0_guest_status(cop0, ST0_EXL);
2086 if (cause & CAUSEF_BD)
2087 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2089 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2091 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2094 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2096 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2098 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2101 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
2103 /* setup badvaddr, context and entryhi registers for the guest */
2104 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2105 /* XXXKYMA: is the context register used by linux??? */
2106 kvm_write_c0_guest_entryhi(cop0, entryhi);
2107 /* Blow away the shadow host TLBs */
2108 kvm_mips_flush_host_tlb(1);
2110 return EMULATE_DONE;
2113 enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2115 struct kvm_run *run,
2116 struct kvm_vcpu *vcpu)
2118 struct mips_coproc *cop0 = vcpu->arch.cop0;
2119 struct kvm_vcpu_arch *arch = &vcpu->arch;
2121 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2123 kvm_write_c0_guest_epc(cop0, arch->pc);
2124 kvm_set_c0_guest_status(cop0, ST0_EXL);
2126 if (cause & CAUSEF_BD)
2127 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2129 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2133 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2135 kvm_change_c0_guest_cause(cop0, (0xff),
2136 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
2137 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2139 return EMULATE_DONE;
2142 enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2144 struct kvm_run *run,
2145 struct kvm_vcpu *vcpu)
2147 struct mips_coproc *cop0 = vcpu->arch.cop0;
2148 struct kvm_vcpu_arch *arch = &vcpu->arch;
2149 enum emulation_result er = EMULATE_DONE;
2151 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2153 kvm_write_c0_guest_epc(cop0, arch->pc);
2154 kvm_set_c0_guest_status(cop0, ST0_EXL);
2156 if (cause & CAUSEF_BD)
2157 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2159 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2161 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2163 kvm_change_c0_guest_cause(cop0, (0xff),
2164 (T_RES_INST << CAUSEB_EXCCODE));
2166 /* Set PC to the exception entry point */
2167 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2170 kvm_err("Trying to deliver RI when EXL is already set\n");
2177 enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2179 struct kvm_run *run,
2180 struct kvm_vcpu *vcpu)
2182 struct mips_coproc *cop0 = vcpu->arch.cop0;
2183 struct kvm_vcpu_arch *arch = &vcpu->arch;
2184 enum emulation_result er = EMULATE_DONE;
2186 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2188 kvm_write_c0_guest_epc(cop0, arch->pc);
2189 kvm_set_c0_guest_status(cop0, ST0_EXL);
2191 if (cause & CAUSEF_BD)
2192 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2194 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2196 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2198 kvm_change_c0_guest_cause(cop0, (0xff),
2199 (T_BREAK << CAUSEB_EXCCODE));
2201 /* Set PC to the exception entry point */
2202 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2205 kvm_err("Trying to deliver BP when EXL is already set\n");
2212 enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2214 struct kvm_run *run,
2215 struct kvm_vcpu *vcpu)
2217 struct mips_coproc *cop0 = vcpu->arch.cop0;
2218 struct kvm_vcpu_arch *arch = &vcpu->arch;
2219 enum emulation_result er = EMULATE_DONE;
2221 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2223 kvm_write_c0_guest_epc(cop0, arch->pc);
2224 kvm_set_c0_guest_status(cop0, ST0_EXL);
2226 if (cause & CAUSEF_BD)
2227 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2229 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2231 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2233 kvm_change_c0_guest_cause(cop0, (0xff),
2234 (T_TRAP << CAUSEB_EXCCODE));
2236 /* Set PC to the exception entry point */
2237 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2240 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2247 enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
2249 struct kvm_run *run,
2250 struct kvm_vcpu *vcpu)
2252 struct mips_coproc *cop0 = vcpu->arch.cop0;
2253 struct kvm_vcpu_arch *arch = &vcpu->arch;
2254 enum emulation_result er = EMULATE_DONE;
2256 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2258 kvm_write_c0_guest_epc(cop0, arch->pc);
2259 kvm_set_c0_guest_status(cop0, ST0_EXL);
2261 if (cause & CAUSEF_BD)
2262 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2264 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2266 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2268 kvm_change_c0_guest_cause(cop0, (0xff),
2269 (T_MSAFPE << CAUSEB_EXCCODE));
2271 /* Set PC to the exception entry point */
2272 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2275 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2282 enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
2284 struct kvm_run *run,
2285 struct kvm_vcpu *vcpu)
2287 struct mips_coproc *cop0 = vcpu->arch.cop0;
2288 struct kvm_vcpu_arch *arch = &vcpu->arch;
2289 enum emulation_result er = EMULATE_DONE;
2291 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2293 kvm_write_c0_guest_epc(cop0, arch->pc);
2294 kvm_set_c0_guest_status(cop0, ST0_EXL);
2296 if (cause & CAUSEF_BD)
2297 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2299 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2301 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2303 kvm_change_c0_guest_cause(cop0, (0xff),
2304 (T_FPE << CAUSEB_EXCCODE));
2306 /* Set PC to the exception entry point */
2307 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2310 kvm_err("Trying to deliver FPE when EXL is already set\n");
2317 enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
2319 struct kvm_run *run,
2320 struct kvm_vcpu *vcpu)
2322 struct mips_coproc *cop0 = vcpu->arch.cop0;
2323 struct kvm_vcpu_arch *arch = &vcpu->arch;
2324 enum emulation_result er = EMULATE_DONE;
2326 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2328 kvm_write_c0_guest_epc(cop0, arch->pc);
2329 kvm_set_c0_guest_status(cop0, ST0_EXL);
2331 if (cause & CAUSEF_BD)
2332 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2334 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2336 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2338 kvm_change_c0_guest_cause(cop0, (0xff),
2339 (T_MSADIS << CAUSEB_EXCCODE));
2341 /* Set PC to the exception entry point */
2342 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2345 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2352 /* ll/sc, rdhwr, sync emulation */
2354 #define OPCODE 0xfc000000
2355 #define BASE 0x03e00000
2356 #define RT 0x001f0000
2357 #define OFFSET 0x0000ffff
2358 #define LL 0xc0000000
2359 #define SC 0xe0000000
2360 #define SPEC0 0x00000000
2361 #define SPEC3 0x7c000000
2362 #define RD 0x0000f800
2363 #define FUNC 0x0000003f
2364 #define SYNC 0x0000000f
2365 #define RDHWR 0x0000003b
2367 enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2368 struct kvm_run *run,
2369 struct kvm_vcpu *vcpu)
2371 struct mips_coproc *cop0 = vcpu->arch.cop0;
2372 struct kvm_vcpu_arch *arch = &vcpu->arch;
2373 enum emulation_result er = EMULATE_DONE;
2374 unsigned long curr_pc;
2378 * Update PC and hold onto current PC in case there is
2379 * an error and we want to rollback the PC
2381 curr_pc = vcpu->arch.pc;
2382 er = update_pc(vcpu, cause);
2383 if (er == EMULATE_FAIL)
2386 /* Fetch the instruction. */
2387 if (cause & CAUSEF_BD)
2390 inst = kvm_get_inst(opc, vcpu);
2392 if (inst == KVM_INVALID_INST) {
2393 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2394 return EMULATE_FAIL;
2397 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2398 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2399 int rd = (inst & RD) >> 11;
2400 int rt = (inst & RT) >> 16;
2401 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2402 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2403 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2408 case 0: /* CPU number */
2411 case 1: /* SYNCI length */
2412 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2413 current_cpu_data.icache.linesz);
2415 case 2: /* Read count register */
2416 arch->gprs[rt] = kvm_mips_read_count(vcpu);
2418 case 3: /* Count register resolution */
2419 switch (current_cpu_data.cputype) {
2429 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2433 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2437 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2441 return EMULATE_DONE;
2445 * Rollback PC (if in branch delay slot then the PC already points to
2446 * branch target), and pass the RI exception to the guest OS.
2448 vcpu->arch.pc = curr_pc;
2449 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2452 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2453 struct kvm_run *run)
2455 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2456 enum emulation_result er = EMULATE_DONE;
2458 if (run->mmio.len > sizeof(*gpr)) {
2459 kvm_err("Bad MMIO length: %d", run->mmio.len);
2464 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2465 if (er == EMULATE_FAIL)
2468 switch (run->mmio.len) {
2470 *gpr = *(int32_t *) run->mmio.data;
2474 if (vcpu->mmio_needed == 2)
2475 *gpr = *(int16_t *) run->mmio.data;
2477 *gpr = *(uint16_t *)run->mmio.data;
2481 if (vcpu->mmio_needed == 2)
2482 *gpr = *(int8_t *) run->mmio.data;
2484 *gpr = *(u8 *) run->mmio.data;
2488 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2489 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2490 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2497 static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2499 struct kvm_run *run,
2500 struct kvm_vcpu *vcpu)
2502 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2503 struct mips_coproc *cop0 = vcpu->arch.cop0;
2504 struct kvm_vcpu_arch *arch = &vcpu->arch;
2505 enum emulation_result er = EMULATE_DONE;
2507 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2509 kvm_write_c0_guest_epc(cop0, arch->pc);
2510 kvm_set_c0_guest_status(cop0, ST0_EXL);
2512 if (cause & CAUSEF_BD)
2513 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2515 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2517 kvm_change_c0_guest_cause(cop0, (0xff),
2518 (exccode << CAUSEB_EXCCODE));
2520 /* Set PC to the exception entry point */
2521 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2522 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2524 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2525 exccode, kvm_read_c0_guest_epc(cop0),
2526 kvm_read_c0_guest_badvaddr(cop0));
2528 kvm_err("Trying to deliver EXC when EXL is already set\n");
2535 enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2537 struct kvm_run *run,
2538 struct kvm_vcpu *vcpu)
2540 enum emulation_result er = EMULATE_DONE;
2541 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2542 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2544 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2558 case T_COP_UNUSABLE:
2559 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2560 er = EMULATE_PRIV_FAIL;
2568 * We we are accessing Guest kernel space, then send an
2569 * address error exception to the guest
2571 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2572 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2575 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2576 er = EMULATE_PRIV_FAIL;
2582 * We we are accessing Guest kernel space, then send an
2583 * address error exception to the guest
2585 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2586 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2589 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2590 er = EMULATE_PRIV_FAIL;
2595 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2597 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2599 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2601 er = EMULATE_PRIV_FAIL;
2604 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2606 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2608 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2610 er = EMULATE_PRIV_FAIL;
2613 er = EMULATE_PRIV_FAIL;
2618 if (er == EMULATE_PRIV_FAIL)
2619 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2625 * User Address (UA) fault, this could happen if
2626 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2627 * case we pass on the fault to the guest kernel and let it handle it.
2628 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2629 * case we inject the TLB from the Guest TLB into the shadow host TLB
2631 enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2633 struct kvm_run *run,
2634 struct kvm_vcpu *vcpu)
2636 enum emulation_result er = EMULATE_DONE;
2637 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2638 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2641 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2642 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2645 * KVM would not have got the exception if this entry was valid in the
2646 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2647 * send the guest an exception. The guest exc handler should then inject
2648 * an entry into the guest TLB.
2650 index = kvm_mips_guest_tlb_lookup(vcpu,
2652 (kvm_read_c0_guest_entryhi
2653 (vcpu->arch.cop0) & ASID_MASK));
2655 if (exccode == T_TLB_LD_MISS) {
2656 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2657 } else if (exccode == T_TLB_ST_MISS) {
2658 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2660 kvm_err("%s: invalid exc code: %d\n", __func__,
2665 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2668 * Check if the entry is valid, if not then setup a TLB invalid
2669 * exception to the guest
2671 if (!TLB_IS_VALID(*tlb, va)) {
2672 if (exccode == T_TLB_LD_MISS) {
2673 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2675 } else if (exccode == T_TLB_ST_MISS) {
2676 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2679 kvm_err("%s: invalid exc code: %d\n", __func__,
2684 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2685 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2687 * OK we have a Guest TLB entry, now inject it into the
2690 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
2692 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2693 __func__, va, index, vcpu,