Merge branch 'locks-3.15' of git://git.samba.org/jlayton/linux
[firefly-linux-kernel-4.4.55.git] / arch / mips / kvm / kvm_mips_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
17 #include <linux/fs.h>
18 #include <linux/bootmem.h>
19 #include <linux/random.h>
20 #include <asm/page.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpu-info.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/inst.h>
26
27 #undef CONFIG_MIPS_MT
28 #include <asm/r4kcache.h>
29 #define CONFIG_MIPS_MT
30
31 #include "kvm_mips_opcode.h"
32 #include "kvm_mips_int.h"
33 #include "kvm_mips_comm.h"
34
35 #include "trace.h"
36
37 /*
38  * Compute the return address and do emulate branch simulation, if required.
39  * This function should be called only in branch delay slot active.
40  */
41 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42         unsigned long instpc)
43 {
44         unsigned int dspcontrol;
45         union mips_instruction insn;
46         struct kvm_vcpu_arch *arch = &vcpu->arch;
47         long epc = instpc;
48         long nextpc = KVM_INVALID_INST;
49
50         if (epc & 3)
51                 goto unaligned;
52
53         /*
54          * Read the instruction
55          */
56         insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58         if (insn.word == KVM_INVALID_INST)
59                 return KVM_INVALID_INST;
60
61         switch (insn.i_format.opcode) {
62                 /*
63                  * jr and jalr are in r_format format.
64                  */
65         case spec_op:
66                 switch (insn.r_format.func) {
67                 case jalr_op:
68                         arch->gprs[insn.r_format.rd] = epc + 8;
69                         /* Fall through */
70                 case jr_op:
71                         nextpc = arch->gprs[insn.r_format.rs];
72                         break;
73                 }
74                 break;
75
76                 /*
77                  * This group contains:
78                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
79                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80                  */
81         case bcond_op:
82                 switch (insn.i_format.rt) {
83                 case bltz_op:
84                 case bltzl_op:
85                         if ((long)arch->gprs[insn.i_format.rs] < 0)
86                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
87                         else
88                                 epc += 8;
89                         nextpc = epc;
90                         break;
91
92                 case bgez_op:
93                 case bgezl_op:
94                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
95                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
96                         else
97                                 epc += 8;
98                         nextpc = epc;
99                         break;
100
101                 case bltzal_op:
102                 case bltzall_op:
103                         arch->gprs[31] = epc + 8;
104                         if ((long)arch->gprs[insn.i_format.rs] < 0)
105                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
106                         else
107                                 epc += 8;
108                         nextpc = epc;
109                         break;
110
111                 case bgezal_op:
112                 case bgezall_op:
113                         arch->gprs[31] = epc + 8;
114                         if ((long)arch->gprs[insn.i_format.rs] >= 0)
115                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
116                         else
117                                 epc += 8;
118                         nextpc = epc;
119                         break;
120                 case bposge32_op:
121                         if (!cpu_has_dsp)
122                                 goto sigill;
123
124                         dspcontrol = rddsp(0x01);
125
126                         if (dspcontrol >= 32) {
127                                 epc = epc + 4 + (insn.i_format.simmediate << 2);
128                         } else
129                                 epc += 8;
130                         nextpc = epc;
131                         break;
132                 }
133                 break;
134
135                 /*
136                  * These are unconditional and in j_format.
137                  */
138         case jal_op:
139                 arch->gprs[31] = instpc + 8;
140         case j_op:
141                 epc += 4;
142                 epc >>= 28;
143                 epc <<= 28;
144                 epc |= (insn.j_format.target << 2);
145                 nextpc = epc;
146                 break;
147
148                 /*
149                  * These are conditional and in i_format.
150                  */
151         case beq_op:
152         case beql_op:
153                 if (arch->gprs[insn.i_format.rs] ==
154                     arch->gprs[insn.i_format.rt])
155                         epc = epc + 4 + (insn.i_format.simmediate << 2);
156                 else
157                         epc += 8;
158                 nextpc = epc;
159                 break;
160
161         case bne_op:
162         case bnel_op:
163                 if (arch->gprs[insn.i_format.rs] !=
164                     arch->gprs[insn.i_format.rt])
165                         epc = epc + 4 + (insn.i_format.simmediate << 2);
166                 else
167                         epc += 8;
168                 nextpc = epc;
169                 break;
170
171         case blez_op:           /* not really i_format */
172         case blezl_op:
173                 /* rt field assumed to be zero */
174                 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175                         epc = epc + 4 + (insn.i_format.simmediate << 2);
176                 else
177                         epc += 8;
178                 nextpc = epc;
179                 break;
180
181         case bgtz_op:
182         case bgtzl_op:
183                 /* rt field assumed to be zero */
184                 if ((long)arch->gprs[insn.i_format.rs] > 0)
185                         epc = epc + 4 + (insn.i_format.simmediate << 2);
186                 else
187                         epc += 8;
188                 nextpc = epc;
189                 break;
190
191                 /*
192                  * And now the FPA/cp1 branch instructions.
193                  */
194         case cop1_op:
195                 printk("%s: unsupported cop1_op\n", __func__);
196                 break;
197         }
198
199         return nextpc;
200
201 unaligned:
202         printk("%s: unaligned epc\n", __func__);
203         return nextpc;
204
205 sigill:
206         printk("%s: DSP branch but not DSP ASE\n", __func__);
207         return nextpc;
208 }
209
210 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211 {
212         unsigned long branch_pc;
213         enum emulation_result er = EMULATE_DONE;
214
215         if (cause & CAUSEF_BD) {
216                 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217                 if (branch_pc == KVM_INVALID_INST) {
218                         er = EMULATE_FAIL;
219                 } else {
220                         vcpu->arch.pc = branch_pc;
221                         kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222                 }
223         } else
224                 vcpu->arch.pc += 4;
225
226         kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228         return er;
229 }
230
231 /* Everytime the compare register is written to, we need to decide when to fire
232  * the timer that represents timer ticks to the GUEST.
233  *
234  */
235 enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236 {
237         struct mips_coproc *cop0 = vcpu->arch.cop0;
238         enum emulation_result er = EMULATE_DONE;
239
240         /* If COUNT is enabled */
241         if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242                 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243                 hrtimer_start(&vcpu->arch.comparecount_timer,
244                               ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245         } else {
246                 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247         }
248
249         return er;
250 }
251
252 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253 {
254         struct mips_coproc *cop0 = vcpu->arch.cop0;
255         enum emulation_result er = EMULATE_DONE;
256
257         if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258                 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259                           kvm_read_c0_guest_epc(cop0));
260                 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261                 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263         } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264                 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265                 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266         } else {
267                 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268                        vcpu->arch.pc);
269                 er = EMULATE_FAIL;
270         }
271
272         return er;
273 }
274
275 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276 {
277         enum emulation_result er = EMULATE_DONE;
278
279         kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280                   vcpu->arch.pending_exceptions);
281
282         ++vcpu->stat.wait_exits;
283         trace_kvm_exit(vcpu, WAIT_EXITS);
284         if (!vcpu->arch.pending_exceptions) {
285                 vcpu->arch.wait = 1;
286                 kvm_vcpu_block(vcpu);
287
288                 /* We we are runnable, then definitely go off to user space to check if any
289                  * I/O interrupts are pending.
290                  */
291                 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292                         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294                 }
295         }
296
297         return er;
298 }
299
300 /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301  * this, if things ever change
302  */
303 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304 {
305         struct mips_coproc *cop0 = vcpu->arch.cop0;
306         enum emulation_result er = EMULATE_FAIL;
307         uint32_t pc = vcpu->arch.pc;
308
309         printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310         return er;
311 }
312
313 /* Write Guest TLB Entry @ Index */
314 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315 {
316         struct mips_coproc *cop0 = vcpu->arch.cop0;
317         int index = kvm_read_c0_guest_index(cop0);
318         enum emulation_result er = EMULATE_DONE;
319         struct kvm_mips_tlb *tlb = NULL;
320         uint32_t pc = vcpu->arch.pc;
321
322         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323                 printk("%s: illegal index: %d\n", __func__, index);
324                 printk
325                     ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326                      pc, index, kvm_read_c0_guest_entryhi(cop0),
327                      kvm_read_c0_guest_entrylo0(cop0),
328                      kvm_read_c0_guest_entrylo1(cop0),
329                      kvm_read_c0_guest_pagemask(cop0));
330                 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331         }
332
333         tlb = &vcpu->arch.guest_tlb[index];
334 #if 1
335         /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337 #endif
338
339         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344         kvm_debug
345             ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346              pc, index, kvm_read_c0_guest_entryhi(cop0),
347              kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348              kvm_read_c0_guest_pagemask(cop0));
349
350         return er;
351 }
352
353 /* Write Guest TLB Entry @ Random Index */
354 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355 {
356         struct mips_coproc *cop0 = vcpu->arch.cop0;
357         enum emulation_result er = EMULATE_DONE;
358         struct kvm_mips_tlb *tlb = NULL;
359         uint32_t pc = vcpu->arch.pc;
360         int index;
361
362 #if 1
363         get_random_bytes(&index, sizeof(index));
364         index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365 #else
366         index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367 #endif
368
369         if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370                 printk("%s: illegal index: %d\n", __func__, index);
371                 return EMULATE_FAIL;
372         }
373
374         tlb = &vcpu->arch.guest_tlb[index];
375
376 #if 1
377         /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378         kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379 #endif
380
381         tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382         tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383         tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384         tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386         kvm_debug
387             ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388              pc, index, kvm_read_c0_guest_entryhi(cop0),
389              kvm_read_c0_guest_entrylo0(cop0),
390              kvm_read_c0_guest_entrylo1(cop0));
391
392         return er;
393 }
394
395 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396 {
397         struct mips_coproc *cop0 = vcpu->arch.cop0;
398         long entryhi = kvm_read_c0_guest_entryhi(cop0);
399         enum emulation_result er = EMULATE_DONE;
400         uint32_t pc = vcpu->arch.pc;
401         int index = -1;
402
403         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405         kvm_write_c0_guest_index(cop0, index);
406
407         kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408                   index);
409
410         return er;
411 }
412
413 enum emulation_result
414 kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415                      struct kvm_run *run, struct kvm_vcpu *vcpu)
416 {
417         struct mips_coproc *cop0 = vcpu->arch.cop0;
418         enum emulation_result er = EMULATE_DONE;
419         int32_t rt, rd, copz, sel, co_bit, op;
420         uint32_t pc = vcpu->arch.pc;
421         unsigned long curr_pc;
422
423         /*
424          * Update PC and hold onto current PC in case there is
425          * an error and we want to rollback the PC
426          */
427         curr_pc = vcpu->arch.pc;
428         er = update_pc(vcpu, cause);
429         if (er == EMULATE_FAIL) {
430                 return er;
431         }
432
433         copz = (inst >> 21) & 0x1f;
434         rt = (inst >> 16) & 0x1f;
435         rd = (inst >> 11) & 0x1f;
436         sel = inst & 0x7;
437         co_bit = (inst >> 25) & 1;
438
439         if (co_bit) {
440                 op = (inst) & 0xff;
441
442                 switch (op) {
443                 case tlbr_op:   /*  Read indexed TLB entry  */
444                         er = kvm_mips_emul_tlbr(vcpu);
445                         break;
446                 case tlbwi_op:  /*  Write indexed  */
447                         er = kvm_mips_emul_tlbwi(vcpu);
448                         break;
449                 case tlbwr_op:  /*  Write random  */
450                         er = kvm_mips_emul_tlbwr(vcpu);
451                         break;
452                 case tlbp_op:   /* TLB Probe */
453                         er = kvm_mips_emul_tlbp(vcpu);
454                         break;
455                 case rfe_op:
456                         printk("!!!COP0_RFE!!!\n");
457                         break;
458                 case eret_op:
459                         er = kvm_mips_emul_eret(vcpu);
460                         goto dont_update_pc;
461                         break;
462                 case wait_op:
463                         er = kvm_mips_emul_wait(vcpu);
464                         break;
465                 }
466         } else {
467                 switch (copz) {
468                 case mfc_op:
469 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
470                         cop0->stat[rd][sel]++;
471 #endif
472                         /* Get reg */
473                         if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
474                                 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
475                                 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
476                         } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
477                                 vcpu->arch.gprs[rt] = 0x0;
478 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
479                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
480 #endif
481                         }
482                         else {
483                                 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
484
485 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
486                                 kvm_mips_trans_mfc0(inst, opc, vcpu);
487 #endif
488                         }
489
490                         kvm_debug
491                             ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
492                              pc, rd, sel, rt, vcpu->arch.gprs[rt]);
493
494                         break;
495
496                 case dmfc_op:
497                         vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
498                         break;
499
500                 case mtc_op:
501 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
502                         cop0->stat[rd][sel]++;
503 #endif
504                         if ((rd == MIPS_CP0_TLB_INDEX)
505                             && (vcpu->arch.gprs[rt] >=
506                                 KVM_MIPS_GUEST_TLB_SIZE)) {
507                                 printk("Invalid TLB Index: %ld",
508                                        vcpu->arch.gprs[rt]);
509                                 er = EMULATE_FAIL;
510                                 break;
511                         }
512 #define C0_EBASE_CORE_MASK 0xff
513                         if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
514                                 /* Preserve CORE number */
515                                 kvm_change_c0_guest_ebase(cop0,
516                                                           ~(C0_EBASE_CORE_MASK),
517                                                           vcpu->arch.gprs[rt]);
518                                 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
519                                        kvm_read_c0_guest_ebase(cop0));
520                         } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
521                                 uint32_t nasid =
522                                     vcpu->arch.gprs[rt] & ASID_MASK;
523                                 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
524                                     &&
525                                     ((kvm_read_c0_guest_entryhi(cop0) &
526                                       ASID_MASK) != nasid)) {
527
528                                         kvm_debug
529                                             ("MTCz, change ASID from %#lx to %#lx\n",
530                                              kvm_read_c0_guest_entryhi(cop0) &
531                                              ASID_MASK,
532                                              vcpu->arch.gprs[rt] & ASID_MASK);
533
534                                         /* Blow away the shadow host TLBs */
535                                         kvm_mips_flush_host_tlb(1);
536                                 }
537                                 kvm_write_c0_guest_entryhi(cop0,
538                                                            vcpu->arch.gprs[rt]);
539                         }
540                         /* Are we writing to COUNT */
541                         else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
542                                 /* Linux doesn't seem to write into COUNT, we throw an error
543                                  * if we notice a write to COUNT
544                                  */
545                                 /*er = EMULATE_FAIL; */
546                                 goto done;
547                         } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
548                                 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
549                                           pc, kvm_read_c0_guest_compare(cop0),
550                                           vcpu->arch.gprs[rt]);
551
552                                 /* If we are writing to COMPARE */
553                                 /* Clear pending timer interrupt, if any */
554                                 kvm_mips_callbacks->dequeue_timer_int(vcpu);
555                                 kvm_write_c0_guest_compare(cop0,
556                                                            vcpu->arch.gprs[rt]);
557                         } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
558                                 kvm_write_c0_guest_status(cop0,
559                                                           vcpu->arch.gprs[rt]);
560                                 /* Make sure that CU1 and NMI bits are never set */
561                                 kvm_clear_c0_guest_status(cop0,
562                                                           (ST0_CU1 | ST0_NMI));
563
564 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
565                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
566 #endif
567                         } else {
568                                 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
569 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
570                                 kvm_mips_trans_mtc0(inst, opc, vcpu);
571 #endif
572                         }
573
574                         kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
575                                   rd, sel, cop0->reg[rd][sel]);
576                         break;
577
578                 case dmtc_op:
579                         printk
580                             ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
581                              vcpu->arch.pc, rt, rd, sel);
582                         er = EMULATE_FAIL;
583                         break;
584
585                 case mfmcz_op:
586 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
587                         cop0->stat[MIPS_CP0_STATUS][0]++;
588 #endif
589                         if (rt != 0) {
590                                 vcpu->arch.gprs[rt] =
591                                     kvm_read_c0_guest_status(cop0);
592                         }
593                         /* EI */
594                         if (inst & 0x20) {
595                                 kvm_debug("[%#lx] mfmcz_op: EI\n",
596                                           vcpu->arch.pc);
597                                 kvm_set_c0_guest_status(cop0, ST0_IE);
598                         } else {
599                                 kvm_debug("[%#lx] mfmcz_op: DI\n",
600                                           vcpu->arch.pc);
601                                 kvm_clear_c0_guest_status(cop0, ST0_IE);
602                         }
603
604                         break;
605
606                 case wrpgpr_op:
607                         {
608                                 uint32_t css =
609                                     cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
610                                 uint32_t pss =
611                                     (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
612                                 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
613                                 if (css || pss) {
614                                         er = EMULATE_FAIL;
615                                         break;
616                                 }
617                                 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
618                                           vcpu->arch.gprs[rt]);
619                                 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
620                         }
621                         break;
622                 default:
623                         printk
624                             ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
625                              vcpu->arch.pc, copz);
626                         er = EMULATE_FAIL;
627                         break;
628                 }
629         }
630
631 done:
632         /*
633          * Rollback PC only if emulation was unsuccessful
634          */
635         if (er == EMULATE_FAIL) {
636                 vcpu->arch.pc = curr_pc;
637         }
638
639 dont_update_pc:
640         /*
641          * This is for special instructions whose emulation
642          * updates the PC, so do not overwrite the PC under
643          * any circumstances
644          */
645
646         return er;
647 }
648
649 enum emulation_result
650 kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
651                        struct kvm_run *run, struct kvm_vcpu *vcpu)
652 {
653         enum emulation_result er = EMULATE_DO_MMIO;
654         int32_t op, base, rt, offset;
655         uint32_t bytes;
656         void *data = run->mmio.data;
657         unsigned long curr_pc;
658
659         /*
660          * Update PC and hold onto current PC in case there is
661          * an error and we want to rollback the PC
662          */
663         curr_pc = vcpu->arch.pc;
664         er = update_pc(vcpu, cause);
665         if (er == EMULATE_FAIL)
666                 return er;
667
668         rt = (inst >> 16) & 0x1f;
669         base = (inst >> 21) & 0x1f;
670         offset = inst & 0xffff;
671         op = (inst >> 26) & 0x3f;
672
673         switch (op) {
674         case sb_op:
675                 bytes = 1;
676                 if (bytes > sizeof(run->mmio.data)) {
677                         kvm_err("%s: bad MMIO length: %d\n", __func__,
678                                run->mmio.len);
679                 }
680                 run->mmio.phys_addr =
681                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
682                                                    host_cp0_badvaddr);
683                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
684                         er = EMULATE_FAIL;
685                         break;
686                 }
687                 run->mmio.len = bytes;
688                 run->mmio.is_write = 1;
689                 vcpu->mmio_needed = 1;
690                 vcpu->mmio_is_write = 1;
691                 *(u8 *) data = vcpu->arch.gprs[rt];
692                 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
693                           vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
694                           *(uint8_t *) data);
695
696                 break;
697
698         case sw_op:
699                 bytes = 4;
700                 if (bytes > sizeof(run->mmio.data)) {
701                         kvm_err("%s: bad MMIO length: %d\n", __func__,
702                                run->mmio.len);
703                 }
704                 run->mmio.phys_addr =
705                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
706                                                    host_cp0_badvaddr);
707                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
708                         er = EMULATE_FAIL;
709                         break;
710                 }
711
712                 run->mmio.len = bytes;
713                 run->mmio.is_write = 1;
714                 vcpu->mmio_needed = 1;
715                 vcpu->mmio_is_write = 1;
716                 *(uint32_t *) data = vcpu->arch.gprs[rt];
717
718                 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
719                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
720                           vcpu->arch.gprs[rt], *(uint32_t *) data);
721                 break;
722
723         case sh_op:
724                 bytes = 2;
725                 if (bytes > sizeof(run->mmio.data)) {
726                         kvm_err("%s: bad MMIO length: %d\n", __func__,
727                                run->mmio.len);
728                 }
729                 run->mmio.phys_addr =
730                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
731                                                    host_cp0_badvaddr);
732                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
733                         er = EMULATE_FAIL;
734                         break;
735                 }
736
737                 run->mmio.len = bytes;
738                 run->mmio.is_write = 1;
739                 vcpu->mmio_needed = 1;
740                 vcpu->mmio_is_write = 1;
741                 *(uint16_t *) data = vcpu->arch.gprs[rt];
742
743                 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
744                           vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
745                           vcpu->arch.gprs[rt], *(uint32_t *) data);
746                 break;
747
748         default:
749                 printk("Store not yet supported");
750                 er = EMULATE_FAIL;
751                 break;
752         }
753
754         /*
755          * Rollback PC if emulation was unsuccessful
756          */
757         if (er == EMULATE_FAIL) {
758                 vcpu->arch.pc = curr_pc;
759         }
760
761         return er;
762 }
763
764 enum emulation_result
765 kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
766                       struct kvm_run *run, struct kvm_vcpu *vcpu)
767 {
768         enum emulation_result er = EMULATE_DO_MMIO;
769         int32_t op, base, rt, offset;
770         uint32_t bytes;
771
772         rt = (inst >> 16) & 0x1f;
773         base = (inst >> 21) & 0x1f;
774         offset = inst & 0xffff;
775         op = (inst >> 26) & 0x3f;
776
777         vcpu->arch.pending_load_cause = cause;
778         vcpu->arch.io_gpr = rt;
779
780         switch (op) {
781         case lw_op:
782                 bytes = 4;
783                 if (bytes > sizeof(run->mmio.data)) {
784                         kvm_err("%s: bad MMIO length: %d\n", __func__,
785                                run->mmio.len);
786                         er = EMULATE_FAIL;
787                         break;
788                 }
789                 run->mmio.phys_addr =
790                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
791                                                    host_cp0_badvaddr);
792                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
793                         er = EMULATE_FAIL;
794                         break;
795                 }
796
797                 run->mmio.len = bytes;
798                 run->mmio.is_write = 0;
799                 vcpu->mmio_needed = 1;
800                 vcpu->mmio_is_write = 0;
801                 break;
802
803         case lh_op:
804         case lhu_op:
805                 bytes = 2;
806                 if (bytes > sizeof(run->mmio.data)) {
807                         kvm_err("%s: bad MMIO length: %d\n", __func__,
808                                run->mmio.len);
809                         er = EMULATE_FAIL;
810                         break;
811                 }
812                 run->mmio.phys_addr =
813                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
814                                                    host_cp0_badvaddr);
815                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
816                         er = EMULATE_FAIL;
817                         break;
818                 }
819
820                 run->mmio.len = bytes;
821                 run->mmio.is_write = 0;
822                 vcpu->mmio_needed = 1;
823                 vcpu->mmio_is_write = 0;
824
825                 if (op == lh_op)
826                         vcpu->mmio_needed = 2;
827                 else
828                         vcpu->mmio_needed = 1;
829
830                 break;
831
832         case lbu_op:
833         case lb_op:
834                 bytes = 1;
835                 if (bytes > sizeof(run->mmio.data)) {
836                         kvm_err("%s: bad MMIO length: %d\n", __func__,
837                                run->mmio.len);
838                         er = EMULATE_FAIL;
839                         break;
840                 }
841                 run->mmio.phys_addr =
842                     kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
843                                                    host_cp0_badvaddr);
844                 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
845                         er = EMULATE_FAIL;
846                         break;
847                 }
848
849                 run->mmio.len = bytes;
850                 run->mmio.is_write = 0;
851                 vcpu->mmio_is_write = 0;
852
853                 if (op == lb_op)
854                         vcpu->mmio_needed = 2;
855                 else
856                         vcpu->mmio_needed = 1;
857
858                 break;
859
860         default:
861                 printk("Load not yet supported");
862                 er = EMULATE_FAIL;
863                 break;
864         }
865
866         return er;
867 }
868
869 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
870 {
871         unsigned long offset = (va & ~PAGE_MASK);
872         struct kvm *kvm = vcpu->kvm;
873         unsigned long pa;
874         gfn_t gfn;
875         pfn_t pfn;
876
877         gfn = va >> PAGE_SHIFT;
878
879         if (gfn >= kvm->arch.guest_pmap_npages) {
880                 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
881                 kvm_mips_dump_host_tlbs();
882                 kvm_arch_vcpu_dump_regs(vcpu);
883                 return -1;
884         }
885         pfn = kvm->arch.guest_pmap[gfn];
886         pa = (pfn << PAGE_SHIFT) | offset;
887
888         printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
889
890         mips32_SyncICache(CKSEG0ADDR(pa), 32);
891         return 0;
892 }
893
894 #define MIPS_CACHE_OP_INDEX_INV         0x0
895 #define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
896 #define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
897 #define MIPS_CACHE_OP_IMP               0x3
898 #define MIPS_CACHE_OP_HIT_INV           0x4
899 #define MIPS_CACHE_OP_FILL_WB_INV       0x5
900 #define MIPS_CACHE_OP_HIT_HB            0x6
901 #define MIPS_CACHE_OP_FETCH_LOCK        0x7
902
903 #define MIPS_CACHE_ICACHE               0x0
904 #define MIPS_CACHE_DCACHE               0x1
905 #define MIPS_CACHE_SEC                  0x3
906
907 enum emulation_result
908 kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
909                        struct kvm_run *run, struct kvm_vcpu *vcpu)
910 {
911         struct mips_coproc *cop0 = vcpu->arch.cop0;
912         extern void (*r4k_blast_dcache) (void);
913         extern void (*r4k_blast_icache) (void);
914         enum emulation_result er = EMULATE_DONE;
915         int32_t offset, cache, op_inst, op, base;
916         struct kvm_vcpu_arch *arch = &vcpu->arch;
917         unsigned long va;
918         unsigned long curr_pc;
919
920         /*
921          * Update PC and hold onto current PC in case there is
922          * an error and we want to rollback the PC
923          */
924         curr_pc = vcpu->arch.pc;
925         er = update_pc(vcpu, cause);
926         if (er == EMULATE_FAIL)
927                 return er;
928
929         base = (inst >> 21) & 0x1f;
930         op_inst = (inst >> 16) & 0x1f;
931         offset = inst & 0xffff;
932         cache = (inst >> 16) & 0x3;
933         op = (inst >> 18) & 0x7;
934
935         va = arch->gprs[base] + offset;
936
937         kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
938                   cache, op, base, arch->gprs[base], offset);
939
940         /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
941          * the caches entirely by stepping through all the ways/indexes
942          */
943         if (op == MIPS_CACHE_OP_INDEX_INV) {
944                 kvm_debug
945                     ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
946                      vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
947                      arch->gprs[base], offset);
948
949                 if (cache == MIPS_CACHE_DCACHE)
950                         r4k_blast_dcache();
951                 else if (cache == MIPS_CACHE_ICACHE)
952                         r4k_blast_icache();
953                 else {
954                         printk("%s: unsupported CACHE INDEX operation\n",
955                                __func__);
956                         return EMULATE_FAIL;
957                 }
958
959 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
960                 kvm_mips_trans_cache_index(inst, opc, vcpu);
961 #endif
962                 goto done;
963         }
964
965         preempt_disable();
966         if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
967
968                 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
969                         kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
970                 }
971         } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
972                    KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
973                 int index;
974
975                 /* If an entry already exists then skip */
976                 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
977                         goto skip_fault;
978                 }
979
980                 /* If address not in the guest TLB, then give the guest a fault, the
981                  * resulting handler will do the right thing
982                  */
983                 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
984                                                   (kvm_read_c0_guest_entryhi
985                                                    (cop0) & ASID_MASK));
986
987                 if (index < 0) {
988                         vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
989                         vcpu->arch.host_cp0_badvaddr = va;
990                         er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
991                                                          vcpu);
992                         preempt_enable();
993                         goto dont_update_pc;
994                 } else {
995                         struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
996                         /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
997                         if (!TLB_IS_VALID(*tlb, va)) {
998                                 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
999                                                                 run, vcpu);
1000                                 preempt_enable();
1001                                 goto dont_update_pc;
1002                         } else {
1003                                 /* We fault an entry from the guest tlb to the shadow host TLB */
1004                                 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1005                                                                      NULL,
1006                                                                      NULL);
1007                         }
1008                 }
1009         } else {
1010                 printk
1011                     ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1012                      cache, op, base, arch->gprs[base], offset);
1013                 er = EMULATE_FAIL;
1014                 preempt_enable();
1015                 goto dont_update_pc;
1016
1017         }
1018
1019 skip_fault:
1020         /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1021         if (cache == MIPS_CACHE_DCACHE
1022             && (op == MIPS_CACHE_OP_FILL_WB_INV
1023                 || op == MIPS_CACHE_OP_HIT_INV)) {
1024                 flush_dcache_line(va);
1025
1026 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1027                 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1028                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1029 #endif
1030         } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1031                 flush_dcache_line(va);
1032                 flush_icache_line(va);
1033
1034 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1035                 /* Replace the CACHE instruction, with a SYNCI */
1036                 kvm_mips_trans_cache_va(inst, opc, vcpu);
1037 #endif
1038         } else {
1039                 printk
1040                     ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1041                      cache, op, base, arch->gprs[base], offset);
1042                 er = EMULATE_FAIL;
1043                 preempt_enable();
1044                 goto dont_update_pc;
1045         }
1046
1047         preempt_enable();
1048
1049       dont_update_pc:
1050         /*
1051          * Rollback PC
1052          */
1053         vcpu->arch.pc = curr_pc;
1054       done:
1055         return er;
1056 }
1057
1058 enum emulation_result
1059 kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1060                       struct kvm_run *run, struct kvm_vcpu *vcpu)
1061 {
1062         enum emulation_result er = EMULATE_DONE;
1063         uint32_t inst;
1064
1065         /*
1066          *  Fetch the instruction.
1067          */
1068         if (cause & CAUSEF_BD) {
1069                 opc += 1;
1070         }
1071
1072         inst = kvm_get_inst(opc, vcpu);
1073
1074         switch (((union mips_instruction)inst).r_format.opcode) {
1075         case cop0_op:
1076                 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1077                 break;
1078         case sb_op:
1079         case sh_op:
1080         case sw_op:
1081                 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1082                 break;
1083         case lb_op:
1084         case lbu_op:
1085         case lhu_op:
1086         case lh_op:
1087         case lw_op:
1088                 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1089                 break;
1090
1091         case cache_op:
1092                 ++vcpu->stat.cache_exits;
1093                 trace_kvm_exit(vcpu, CACHE_EXITS);
1094                 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1095                 break;
1096
1097         default:
1098                 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1099                        inst);
1100                 kvm_arch_vcpu_dump_regs(vcpu);
1101                 er = EMULATE_FAIL;
1102                 break;
1103         }
1104
1105         return er;
1106 }
1107
1108 enum emulation_result
1109 kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1110                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1111 {
1112         struct mips_coproc *cop0 = vcpu->arch.cop0;
1113         struct kvm_vcpu_arch *arch = &vcpu->arch;
1114         enum emulation_result er = EMULATE_DONE;
1115
1116         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1117                 /* save old pc */
1118                 kvm_write_c0_guest_epc(cop0, arch->pc);
1119                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1120
1121                 if (cause & CAUSEF_BD)
1122                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1123                 else
1124                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1125
1126                 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1127
1128                 kvm_change_c0_guest_cause(cop0, (0xff),
1129                                           (T_SYSCALL << CAUSEB_EXCCODE));
1130
1131                 /* Set PC to the exception entry point */
1132                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1133
1134         } else {
1135                 printk("Trying to deliver SYSCALL when EXL is already set\n");
1136                 er = EMULATE_FAIL;
1137         }
1138
1139         return er;
1140 }
1141
1142 enum emulation_result
1143 kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1144                             struct kvm_run *run, struct kvm_vcpu *vcpu)
1145 {
1146         struct mips_coproc *cop0 = vcpu->arch.cop0;
1147         struct kvm_vcpu_arch *arch = &vcpu->arch;
1148         enum emulation_result er = EMULATE_DONE;
1149         unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1150                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1151
1152         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1153                 /* save old pc */
1154                 kvm_write_c0_guest_epc(cop0, arch->pc);
1155                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1156
1157                 if (cause & CAUSEF_BD)
1158                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1159                 else
1160                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1161
1162                 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1163                           arch->pc);
1164
1165                 /* set pc to the exception entry point */
1166                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1167
1168         } else {
1169                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1170                           arch->pc);
1171
1172                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1173         }
1174
1175         kvm_change_c0_guest_cause(cop0, (0xff),
1176                                   (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1177
1178         /* setup badvaddr, context and entryhi registers for the guest */
1179         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1180         /* XXXKYMA: is the context register used by linux??? */
1181         kvm_write_c0_guest_entryhi(cop0, entryhi);
1182         /* Blow away the shadow host TLBs */
1183         kvm_mips_flush_host_tlb(1);
1184
1185         return er;
1186 }
1187
1188 enum emulation_result
1189 kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1190                            struct kvm_run *run, struct kvm_vcpu *vcpu)
1191 {
1192         struct mips_coproc *cop0 = vcpu->arch.cop0;
1193         struct kvm_vcpu_arch *arch = &vcpu->arch;
1194         enum emulation_result er = EMULATE_DONE;
1195         unsigned long entryhi =
1196                 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1197                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1198
1199         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1200                 /* save old pc */
1201                 kvm_write_c0_guest_epc(cop0, arch->pc);
1202                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1203
1204                 if (cause & CAUSEF_BD)
1205                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1206                 else
1207                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1208
1209                 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1210                           arch->pc);
1211
1212                 /* set pc to the exception entry point */
1213                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1214
1215         } else {
1216                 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1217                           arch->pc);
1218                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1219         }
1220
1221         kvm_change_c0_guest_cause(cop0, (0xff),
1222                                   (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1223
1224         /* setup badvaddr, context and entryhi registers for the guest */
1225         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1226         /* XXXKYMA: is the context register used by linux??? */
1227         kvm_write_c0_guest_entryhi(cop0, entryhi);
1228         /* Blow away the shadow host TLBs */
1229         kvm_mips_flush_host_tlb(1);
1230
1231         return er;
1232 }
1233
1234 enum emulation_result
1235 kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1236                             struct kvm_run *run, struct kvm_vcpu *vcpu)
1237 {
1238         struct mips_coproc *cop0 = vcpu->arch.cop0;
1239         struct kvm_vcpu_arch *arch = &vcpu->arch;
1240         enum emulation_result er = EMULATE_DONE;
1241         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1242                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1243
1244         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1245                 /* save old pc */
1246                 kvm_write_c0_guest_epc(cop0, arch->pc);
1247                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1248
1249                 if (cause & CAUSEF_BD)
1250                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1251                 else
1252                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1253
1254                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1255                           arch->pc);
1256
1257                 /* Set PC to the exception entry point */
1258                 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1259         } else {
1260                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1261                           arch->pc);
1262                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1263         }
1264
1265         kvm_change_c0_guest_cause(cop0, (0xff),
1266                                   (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1267
1268         /* setup badvaddr, context and entryhi registers for the guest */
1269         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1270         /* XXXKYMA: is the context register used by linux??? */
1271         kvm_write_c0_guest_entryhi(cop0, entryhi);
1272         /* Blow away the shadow host TLBs */
1273         kvm_mips_flush_host_tlb(1);
1274
1275         return er;
1276 }
1277
1278 enum emulation_result
1279 kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1280                            struct kvm_run *run, struct kvm_vcpu *vcpu)
1281 {
1282         struct mips_coproc *cop0 = vcpu->arch.cop0;
1283         struct kvm_vcpu_arch *arch = &vcpu->arch;
1284         enum emulation_result er = EMULATE_DONE;
1285         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1286                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1287
1288         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1289                 /* save old pc */
1290                 kvm_write_c0_guest_epc(cop0, arch->pc);
1291                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1292
1293                 if (cause & CAUSEF_BD)
1294                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1295                 else
1296                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1297
1298                 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1299                           arch->pc);
1300
1301                 /* Set PC to the exception entry point */
1302                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1303         } else {
1304                 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1305                           arch->pc);
1306                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1307         }
1308
1309         kvm_change_c0_guest_cause(cop0, (0xff),
1310                                   (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1311
1312         /* setup badvaddr, context and entryhi registers for the guest */
1313         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1314         /* XXXKYMA: is the context register used by linux??? */
1315         kvm_write_c0_guest_entryhi(cop0, entryhi);
1316         /* Blow away the shadow host TLBs */
1317         kvm_mips_flush_host_tlb(1);
1318
1319         return er;
1320 }
1321
1322 /* TLBMOD: store into address matching TLB with Dirty bit off */
1323 enum emulation_result
1324 kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1325                        struct kvm_run *run, struct kvm_vcpu *vcpu)
1326 {
1327         enum emulation_result er = EMULATE_DONE;
1328
1329 #ifdef DEBUG
1330         /*
1331          * If address not in the guest TLB, then we are in trouble
1332          */
1333         index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1334         if (index < 0) {
1335                 /* XXXKYMA Invalidate and retry */
1336                 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1337                 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1338                      __func__, entryhi);
1339                 kvm_mips_dump_guest_tlbs(vcpu);
1340                 kvm_mips_dump_host_tlbs();
1341                 return EMULATE_FAIL;
1342         }
1343 #endif
1344
1345         er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1346         return er;
1347 }
1348
1349 enum emulation_result
1350 kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1351                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1352 {
1353         struct mips_coproc *cop0 = vcpu->arch.cop0;
1354         unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1355                                 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1356         struct kvm_vcpu_arch *arch = &vcpu->arch;
1357         enum emulation_result er = EMULATE_DONE;
1358
1359         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1360                 /* save old pc */
1361                 kvm_write_c0_guest_epc(cop0, arch->pc);
1362                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1363
1364                 if (cause & CAUSEF_BD)
1365                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1366                 else
1367                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1368
1369                 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1370                           arch->pc);
1371
1372                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1373         } else {
1374                 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1375                           arch->pc);
1376                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1377         }
1378
1379         kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1380
1381         /* setup badvaddr, context and entryhi registers for the guest */
1382         kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1383         /* XXXKYMA: is the context register used by linux??? */
1384         kvm_write_c0_guest_entryhi(cop0, entryhi);
1385         /* Blow away the shadow host TLBs */
1386         kvm_mips_flush_host_tlb(1);
1387
1388         return er;
1389 }
1390
1391 enum emulation_result
1392 kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1393                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1394 {
1395         struct mips_coproc *cop0 = vcpu->arch.cop0;
1396         struct kvm_vcpu_arch *arch = &vcpu->arch;
1397         enum emulation_result er = EMULATE_DONE;
1398
1399         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1400                 /* save old pc */
1401                 kvm_write_c0_guest_epc(cop0, arch->pc);
1402                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1403
1404                 if (cause & CAUSEF_BD)
1405                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1406                 else
1407                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1408
1409         }
1410
1411         arch->pc = KVM_GUEST_KSEG0 + 0x180;
1412
1413         kvm_change_c0_guest_cause(cop0, (0xff),
1414                                   (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1415         kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1416
1417         return er;
1418 }
1419
1420 enum emulation_result
1421 kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1422                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1423 {
1424         struct mips_coproc *cop0 = vcpu->arch.cop0;
1425         struct kvm_vcpu_arch *arch = &vcpu->arch;
1426         enum emulation_result er = EMULATE_DONE;
1427
1428         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1429                 /* save old pc */
1430                 kvm_write_c0_guest_epc(cop0, arch->pc);
1431                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1432
1433                 if (cause & CAUSEF_BD)
1434                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1435                 else
1436                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1437
1438                 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1439
1440                 kvm_change_c0_guest_cause(cop0, (0xff),
1441                                           (T_RES_INST << CAUSEB_EXCCODE));
1442
1443                 /* Set PC to the exception entry point */
1444                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1445
1446         } else {
1447                 kvm_err("Trying to deliver RI when EXL is already set\n");
1448                 er = EMULATE_FAIL;
1449         }
1450
1451         return er;
1452 }
1453
1454 enum emulation_result
1455 kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1456                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1457 {
1458         struct mips_coproc *cop0 = vcpu->arch.cop0;
1459         struct kvm_vcpu_arch *arch = &vcpu->arch;
1460         enum emulation_result er = EMULATE_DONE;
1461
1462         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1463                 /* save old pc */
1464                 kvm_write_c0_guest_epc(cop0, arch->pc);
1465                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1466
1467                 if (cause & CAUSEF_BD)
1468                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1469                 else
1470                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1471
1472                 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1473
1474                 kvm_change_c0_guest_cause(cop0, (0xff),
1475                                           (T_BREAK << CAUSEB_EXCCODE));
1476
1477                 /* Set PC to the exception entry point */
1478                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1479
1480         } else {
1481                 printk("Trying to deliver BP when EXL is already set\n");
1482                 er = EMULATE_FAIL;
1483         }
1484
1485         return er;
1486 }
1487
1488 /*
1489  * ll/sc, rdhwr, sync emulation
1490  */
1491
1492 #define OPCODE 0xfc000000
1493 #define BASE   0x03e00000
1494 #define RT     0x001f0000
1495 #define OFFSET 0x0000ffff
1496 #define LL     0xc0000000
1497 #define SC     0xe0000000
1498 #define SPEC0  0x00000000
1499 #define SPEC3  0x7c000000
1500 #define RD     0x0000f800
1501 #define FUNC   0x0000003f
1502 #define SYNC   0x0000000f
1503 #define RDHWR  0x0000003b
1504
1505 enum emulation_result
1506 kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1507                    struct kvm_run *run, struct kvm_vcpu *vcpu)
1508 {
1509         struct mips_coproc *cop0 = vcpu->arch.cop0;
1510         struct kvm_vcpu_arch *arch = &vcpu->arch;
1511         enum emulation_result er = EMULATE_DONE;
1512         unsigned long curr_pc;
1513         uint32_t inst;
1514
1515         /*
1516          * Update PC and hold onto current PC in case there is
1517          * an error and we want to rollback the PC
1518          */
1519         curr_pc = vcpu->arch.pc;
1520         er = update_pc(vcpu, cause);
1521         if (er == EMULATE_FAIL)
1522                 return er;
1523
1524         /*
1525          *  Fetch the instruction.
1526          */
1527         if (cause & CAUSEF_BD)
1528                 opc += 1;
1529
1530         inst = kvm_get_inst(opc, vcpu);
1531
1532         if (inst == KVM_INVALID_INST) {
1533                 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1534                 return EMULATE_FAIL;
1535         }
1536
1537         if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1538                 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1539                 int rd = (inst & RD) >> 11;
1540                 int rt = (inst & RT) >> 16;
1541                 /* If usermode, check RDHWR rd is allowed by guest HWREna */
1542                 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
1543                         kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
1544                                   rd, opc);
1545                         goto emulate_ri;
1546                 }
1547                 switch (rd) {
1548                 case 0: /* CPU number */
1549                         arch->gprs[rt] = 0;
1550                         break;
1551                 case 1: /* SYNCI length */
1552                         arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1553                                              current_cpu_data.icache.linesz);
1554                         break;
1555                 case 2: /* Read count register */
1556                         printk("RDHWR: Cont register\n");
1557                         arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1558                         break;
1559                 case 3: /* Count register resolution */
1560                         switch (current_cpu_data.cputype) {
1561                         case CPU_20KC:
1562                         case CPU_25KF:
1563                                 arch->gprs[rt] = 1;
1564                                 break;
1565                         default:
1566                                 arch->gprs[rt] = 2;
1567                         }
1568                         break;
1569                 case 29:
1570                         arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1571                         break;
1572
1573                 default:
1574                         kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
1575                         goto emulate_ri;
1576                 }
1577         } else {
1578                 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
1579                 goto emulate_ri;
1580         }
1581
1582         return EMULATE_DONE;
1583
1584 emulate_ri:
1585         /*
1586          * Rollback PC (if in branch delay slot then the PC already points to
1587          * branch target), and pass the RI exception to the guest OS.
1588          */
1589         vcpu->arch.pc = curr_pc;
1590         return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1591 }
1592
1593 enum emulation_result
1594 kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1595 {
1596         unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1597         enum emulation_result er = EMULATE_DONE;
1598         unsigned long curr_pc;
1599
1600         if (run->mmio.len > sizeof(*gpr)) {
1601                 printk("Bad MMIO length: %d", run->mmio.len);
1602                 er = EMULATE_FAIL;
1603                 goto done;
1604         }
1605
1606         /*
1607          * Update PC and hold onto current PC in case there is
1608          * an error and we want to rollback the PC
1609          */
1610         curr_pc = vcpu->arch.pc;
1611         er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1612         if (er == EMULATE_FAIL)
1613                 return er;
1614
1615         switch (run->mmio.len) {
1616         case 4:
1617                 *gpr = *(int32_t *) run->mmio.data;
1618                 break;
1619
1620         case 2:
1621                 if (vcpu->mmio_needed == 2)
1622                         *gpr = *(int16_t *) run->mmio.data;
1623                 else
1624                         *gpr = *(int16_t *) run->mmio.data;
1625
1626                 break;
1627         case 1:
1628                 if (vcpu->mmio_needed == 2)
1629                         *gpr = *(int8_t *) run->mmio.data;
1630                 else
1631                         *gpr = *(u8 *) run->mmio.data;
1632                 break;
1633         }
1634
1635         if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1636                 kvm_debug
1637                     ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1638                      vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1639                      vcpu->mmio_needed);
1640
1641 done:
1642         return er;
1643 }
1644
1645 static enum emulation_result
1646 kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1647                      struct kvm_run *run, struct kvm_vcpu *vcpu)
1648 {
1649         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1650         struct mips_coproc *cop0 = vcpu->arch.cop0;
1651         struct kvm_vcpu_arch *arch = &vcpu->arch;
1652         enum emulation_result er = EMULATE_DONE;
1653
1654         if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1655                 /* save old pc */
1656                 kvm_write_c0_guest_epc(cop0, arch->pc);
1657                 kvm_set_c0_guest_status(cop0, ST0_EXL);
1658
1659                 if (cause & CAUSEF_BD)
1660                         kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1661                 else
1662                         kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1663
1664                 kvm_change_c0_guest_cause(cop0, (0xff),
1665                                           (exccode << CAUSEB_EXCCODE));
1666
1667                 /* Set PC to the exception entry point */
1668                 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1669                 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1670
1671                 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1672                           exccode, kvm_read_c0_guest_epc(cop0),
1673                           kvm_read_c0_guest_badvaddr(cop0));
1674         } else {
1675                 printk("Trying to deliver EXC when EXL is already set\n");
1676                 er = EMULATE_FAIL;
1677         }
1678
1679         return er;
1680 }
1681
1682 enum emulation_result
1683 kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1684                          struct kvm_run *run, struct kvm_vcpu *vcpu)
1685 {
1686         enum emulation_result er = EMULATE_DONE;
1687         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1688         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1689
1690         int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1691
1692         if (usermode) {
1693                 switch (exccode) {
1694                 case T_INT:
1695                 case T_SYSCALL:
1696                 case T_BREAK:
1697                 case T_RES_INST:
1698                         break;
1699
1700                 case T_COP_UNUSABLE:
1701                         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1702                                 er = EMULATE_PRIV_FAIL;
1703                         break;
1704
1705                 case T_TLB_MOD:
1706                         break;
1707
1708                 case T_TLB_LD_MISS:
1709                         /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1710                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1711                                 printk("%s: LD MISS @ %#lx\n", __func__,
1712                                        badvaddr);
1713                                 cause &= ~0xff;
1714                                 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1715                                 er = EMULATE_PRIV_FAIL;
1716                         }
1717                         break;
1718
1719                 case T_TLB_ST_MISS:
1720                         /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1721                         if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1722                                 printk("%s: ST MISS @ %#lx\n", __func__,
1723                                        badvaddr);
1724                                 cause &= ~0xff;
1725                                 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1726                                 er = EMULATE_PRIV_FAIL;
1727                         }
1728                         break;
1729
1730                 case T_ADDR_ERR_ST:
1731                         printk("%s: address error ST @ %#lx\n", __func__,
1732                                badvaddr);
1733                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1734                                 cause &= ~0xff;
1735                                 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1736                         }
1737                         er = EMULATE_PRIV_FAIL;
1738                         break;
1739                 case T_ADDR_ERR_LD:
1740                         printk("%s: address error LD @ %#lx\n", __func__,
1741                                badvaddr);
1742                         if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1743                                 cause &= ~0xff;
1744                                 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1745                         }
1746                         er = EMULATE_PRIV_FAIL;
1747                         break;
1748                 default:
1749                         er = EMULATE_PRIV_FAIL;
1750                         break;
1751                 }
1752         }
1753
1754         if (er == EMULATE_PRIV_FAIL) {
1755                 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1756         }
1757         return er;
1758 }
1759
1760 /* User Address (UA) fault, this could happen if
1761  * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1762  *     case we pass on the fault to the guest kernel and let it handle it.
1763  * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1764  *     case we inject the TLB from the Guest TLB into the shadow host TLB
1765  */
1766 enum emulation_result
1767 kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1768                         struct kvm_run *run, struct kvm_vcpu *vcpu)
1769 {
1770         enum emulation_result er = EMULATE_DONE;
1771         uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1772         unsigned long va = vcpu->arch.host_cp0_badvaddr;
1773         int index;
1774
1775         kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1776                   vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1777
1778         /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1779          * Check the Guest TLB, if the entry is not there then send the guest an
1780          * exception. The guest exc handler should then inject an entry into the
1781          * guest TLB
1782          */
1783         index = kvm_mips_guest_tlb_lookup(vcpu,
1784                                           (va & VPN2_MASK) |
1785                                           (kvm_read_c0_guest_entryhi
1786                                            (vcpu->arch.cop0) & ASID_MASK));
1787         if (index < 0) {
1788                 if (exccode == T_TLB_LD_MISS) {
1789                         er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1790                 } else if (exccode == T_TLB_ST_MISS) {
1791                         er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1792                 } else {
1793                         printk("%s: invalid exc code: %d\n", __func__, exccode);
1794                         er = EMULATE_FAIL;
1795                 }
1796         } else {
1797                 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1798
1799                 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1800                 if (!TLB_IS_VALID(*tlb, va)) {
1801                         if (exccode == T_TLB_LD_MISS) {
1802                                 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1803                                                                 vcpu);
1804                         } else if (exccode == T_TLB_ST_MISS) {
1805                                 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1806                                                                 vcpu);
1807                         } else {
1808                                 printk("%s: invalid exc code: %d\n", __func__,
1809                                        exccode);
1810                                 er = EMULATE_FAIL;
1811                         }
1812                 } else {
1813 #ifdef DEBUG
1814                         kvm_debug
1815                             ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1816                              tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1817 #endif
1818                         /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1819                         kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1820                                                              NULL);
1821                 }
1822         }
1823
1824         return er;
1825 }