powerpc/perf: Never program book3s PMCs with values >= 0x80000000
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include "timing.h"
34 #include "trace.h"
35
36 #define OP_TRAP 3
37 #define OP_TRAP_64 2
38
39 #define OP_31_XOP_TRAP      4
40 #define OP_31_XOP_LWZX      23
41 #define OP_31_XOP_DCBST     54
42 #define OP_31_XOP_TRAP_64   68
43 #define OP_31_XOP_DCBF      86
44 #define OP_31_XOP_LBZX      87
45 #define OP_31_XOP_STWX      151
46 #define OP_31_XOP_STBX      215
47 #define OP_31_XOP_LBZUX     119
48 #define OP_31_XOP_STBUX     247
49 #define OP_31_XOP_LHZX      279
50 #define OP_31_XOP_LHZUX     311
51 #define OP_31_XOP_MFSPR     339
52 #define OP_31_XOP_LHAX      343
53 #define OP_31_XOP_STHX      407
54 #define OP_31_XOP_STHUX     439
55 #define OP_31_XOP_MTSPR     467
56 #define OP_31_XOP_DCBI      470
57 #define OP_31_XOP_LWBRX     534
58 #define OP_31_XOP_TLBSYNC   566
59 #define OP_31_XOP_STWBRX    662
60 #define OP_31_XOP_LHBRX     790
61 #define OP_31_XOP_STHBRX    918
62
63 #define OP_LWZ  32
64 #define OP_LD   58
65 #define OP_LWZU 33
66 #define OP_LBZ  34
67 #define OP_LBZU 35
68 #define OP_STW  36
69 #define OP_STWU 37
70 #define OP_STD  62
71 #define OP_STB  38
72 #define OP_STBU 39
73 #define OP_LHZ  40
74 #define OP_LHZU 41
75 #define OP_LHA  42
76 #define OP_LHAU 43
77 #define OP_STH  44
78 #define OP_STHU 45
79
80 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
81 {
82         unsigned long dec_nsec;
83         unsigned long long dec_time;
84
85         pr_debug("mtDEC: %x\n", vcpu->arch.dec);
86         hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
87
88 #ifdef CONFIG_PPC_BOOK3S
89         /* mtdec lowers the interrupt line when positive. */
90         kvmppc_core_dequeue_dec(vcpu);
91
92         /* POWER4+ triggers a dec interrupt if the value is < 0 */
93         if (vcpu->arch.dec & 0x80000000) {
94                 kvmppc_core_queue_dec(vcpu);
95                 return;
96         }
97 #endif
98
99 #ifdef CONFIG_BOOKE
100         /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
101         if (vcpu->arch.dec == 0)
102                 return;
103 #endif
104
105         /*
106          * The decrementer ticks at the same rate as the timebase, so
107          * that's how we convert the guest DEC value to the number of
108          * host ticks.
109          */
110
111         dec_time = vcpu->arch.dec;
112         /*
113          * Guest timebase ticks at the same frequency as host decrementer.
114          * So use the host decrementer calculations for decrementer emulation.
115          */
116         dec_time = dec_time << decrementer_clockevent.shift;
117         do_div(dec_time, decrementer_clockevent.mult);
118         dec_nsec = do_div(dec_time, NSEC_PER_SEC);
119         hrtimer_start(&vcpu->arch.dec_timer,
120                 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
121         vcpu->arch.dec_jiffies = get_tb();
122 }
123
124 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
125 {
126         u64 jd = tb - vcpu->arch.dec_jiffies;
127
128 #ifdef CONFIG_BOOKE
129         if (vcpu->arch.dec < jd)
130                 return 0;
131 #endif
132
133         return vcpu->arch.dec - jd;
134 }
135
136 static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
137 {
138         enum emulation_result emulated = EMULATE_DONE;
139         ulong spr_val = kvmppc_get_gpr(vcpu, rs);
140
141         switch (sprn) {
142         case SPRN_SRR0:
143                 vcpu->arch.shared->srr0 = spr_val;
144                 break;
145         case SPRN_SRR1:
146                 vcpu->arch.shared->srr1 = spr_val;
147                 break;
148
149         /* XXX We need to context-switch the timebase for
150          * watchdog and FIT. */
151         case SPRN_TBWL: break;
152         case SPRN_TBWU: break;
153
154         case SPRN_DEC:
155                 vcpu->arch.dec = spr_val;
156                 kvmppc_emulate_dec(vcpu);
157                 break;
158
159         case SPRN_SPRG0:
160                 vcpu->arch.shared->sprg0 = spr_val;
161                 break;
162         case SPRN_SPRG1:
163                 vcpu->arch.shared->sprg1 = spr_val;
164                 break;
165         case SPRN_SPRG2:
166                 vcpu->arch.shared->sprg2 = spr_val;
167                 break;
168         case SPRN_SPRG3:
169                 vcpu->arch.shared->sprg3 = spr_val;
170                 break;
171
172         default:
173                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
174                                                      spr_val);
175                 if (emulated == EMULATE_FAIL)
176                         printk(KERN_INFO "mtspr: unknown spr "
177                                 "0x%x\n", sprn);
178                 break;
179         }
180
181         kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
182
183         return emulated;
184 }
185
186 static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
187 {
188         enum emulation_result emulated = EMULATE_DONE;
189         ulong spr_val = 0;
190
191         switch (sprn) {
192         case SPRN_SRR0:
193                 spr_val = vcpu->arch.shared->srr0;
194                 break;
195         case SPRN_SRR1:
196                 spr_val = vcpu->arch.shared->srr1;
197                 break;
198         case SPRN_PVR:
199                 spr_val = vcpu->arch.pvr;
200                 break;
201         case SPRN_PIR:
202                 spr_val = vcpu->vcpu_id;
203                 break;
204
205         /* Note: mftb and TBRL/TBWL are user-accessible, so
206          * the guest can always access the real TB anyways.
207          * In fact, we probably will never see these traps. */
208         case SPRN_TBWL:
209                 spr_val = get_tb() >> 32;
210                 break;
211         case SPRN_TBWU:
212                 spr_val = get_tb();
213                 break;
214
215         case SPRN_SPRG0:
216                 spr_val = vcpu->arch.shared->sprg0;
217                 break;
218         case SPRN_SPRG1:
219                 spr_val = vcpu->arch.shared->sprg1;
220                 break;
221         case SPRN_SPRG2:
222                 spr_val = vcpu->arch.shared->sprg2;
223                 break;
224         case SPRN_SPRG3:
225                 spr_val = vcpu->arch.shared->sprg3;
226                 break;
227         /* Note: SPRG4-7 are user-readable, so we don't get
228          * a trap. */
229
230         case SPRN_DEC:
231                 spr_val = kvmppc_get_dec(vcpu, get_tb());
232                 break;
233         default:
234                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
235                                                      &spr_val);
236                 if (unlikely(emulated == EMULATE_FAIL)) {
237                         printk(KERN_INFO "mfspr: unknown spr "
238                                 "0x%x\n", sprn);
239                 }
240                 break;
241         }
242
243         if (emulated == EMULATE_DONE)
244                 kvmppc_set_gpr(vcpu, rt, spr_val);
245         kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
246
247         return emulated;
248 }
249
250 /* XXX to do:
251  * lhax
252  * lhaux
253  * lswx
254  * lswi
255  * stswx
256  * stswi
257  * lha
258  * lhau
259  * lmw
260  * stmw
261  *
262  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
263  */
264 /* XXX Should probably auto-generate instruction decoding for a particular core
265  * from opcode tables in the future. */
266 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
267 {
268         u32 inst = kvmppc_get_last_inst(vcpu);
269         int ra = get_ra(inst);
270         int rs = get_rs(inst);
271         int rt = get_rt(inst);
272         int sprn = get_sprn(inst);
273         enum emulation_result emulated = EMULATE_DONE;
274         int advance = 1;
275
276         /* this default type might be overwritten by subcategories */
277         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
278
279         pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
280
281         switch (get_op(inst)) {
282         case OP_TRAP:
283 #ifdef CONFIG_PPC_BOOK3S
284         case OP_TRAP_64:
285                 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
286 #else
287                 kvmppc_core_queue_program(vcpu,
288                                           vcpu->arch.shared->esr | ESR_PTR);
289 #endif
290                 advance = 0;
291                 break;
292
293         case 31:
294                 switch (get_xop(inst)) {
295
296                 case OP_31_XOP_TRAP:
297 #ifdef CONFIG_64BIT
298                 case OP_31_XOP_TRAP_64:
299 #endif
300 #ifdef CONFIG_PPC_BOOK3S
301                         kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
302 #else
303                         kvmppc_core_queue_program(vcpu,
304                                         vcpu->arch.shared->esr | ESR_PTR);
305 #endif
306                         advance = 0;
307                         break;
308                 case OP_31_XOP_LWZX:
309                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
310                         break;
311
312                 case OP_31_XOP_LBZX:
313                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
314                         break;
315
316                 case OP_31_XOP_LBZUX:
317                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
318                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
319                         break;
320
321                 case OP_31_XOP_STWX:
322                         emulated = kvmppc_handle_store(run, vcpu,
323                                                        kvmppc_get_gpr(vcpu, rs),
324                                                        4, 1);
325                         break;
326
327                 case OP_31_XOP_STBX:
328                         emulated = kvmppc_handle_store(run, vcpu,
329                                                        kvmppc_get_gpr(vcpu, rs),
330                                                        1, 1);
331                         break;
332
333                 case OP_31_XOP_STBUX:
334                         emulated = kvmppc_handle_store(run, vcpu,
335                                                        kvmppc_get_gpr(vcpu, rs),
336                                                        1, 1);
337                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
338                         break;
339
340                 case OP_31_XOP_LHAX:
341                         emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
342                         break;
343
344                 case OP_31_XOP_LHZX:
345                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
346                         break;
347
348                 case OP_31_XOP_LHZUX:
349                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
350                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
351                         break;
352
353                 case OP_31_XOP_MFSPR:
354                         emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
355                         break;
356
357                 case OP_31_XOP_STHX:
358                         emulated = kvmppc_handle_store(run, vcpu,
359                                                        kvmppc_get_gpr(vcpu, rs),
360                                                        2, 1);
361                         break;
362
363                 case OP_31_XOP_STHUX:
364                         emulated = kvmppc_handle_store(run, vcpu,
365                                                        kvmppc_get_gpr(vcpu, rs),
366                                                        2, 1);
367                         kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
368                         break;
369
370                 case OP_31_XOP_MTSPR:
371                         emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
372                         break;
373
374                 case OP_31_XOP_DCBST:
375                 case OP_31_XOP_DCBF:
376                 case OP_31_XOP_DCBI:
377                         /* Do nothing. The guest is performing dcbi because
378                          * hardware DMA is not snooped by the dcache, but
379                          * emulated DMA either goes through the dcache as
380                          * normal writes, or the host kernel has handled dcache
381                          * coherence. */
382                         break;
383
384                 case OP_31_XOP_LWBRX:
385                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
386                         break;
387
388                 case OP_31_XOP_TLBSYNC:
389                         break;
390
391                 case OP_31_XOP_STWBRX:
392                         emulated = kvmppc_handle_store(run, vcpu,
393                                                        kvmppc_get_gpr(vcpu, rs),
394                                                        4, 0);
395                         break;
396
397                 case OP_31_XOP_LHBRX:
398                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
399                         break;
400
401                 case OP_31_XOP_STHBRX:
402                         emulated = kvmppc_handle_store(run, vcpu,
403                                                        kvmppc_get_gpr(vcpu, rs),
404                                                        2, 0);
405                         break;
406
407                 default:
408                         /* Attempt core-specific emulation below. */
409                         emulated = EMULATE_FAIL;
410                 }
411                 break;
412
413         case OP_LWZ:
414                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
415                 break;
416
417         /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
418         case OP_LD:
419                 rt = get_rt(inst);
420                 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
421                 break;
422
423         case OP_LWZU:
424                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
425                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
426                 break;
427
428         case OP_LBZ:
429                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
430                 break;
431
432         case OP_LBZU:
433                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
434                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
435                 break;
436
437         case OP_STW:
438                 emulated = kvmppc_handle_store(run, vcpu,
439                                                kvmppc_get_gpr(vcpu, rs),
440                                                4, 1);
441                 break;
442
443         /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
444         case OP_STD:
445                 rs = get_rs(inst);
446                 emulated = kvmppc_handle_store(run, vcpu,
447                                                kvmppc_get_gpr(vcpu, rs),
448                                                8, 1);
449                 break;
450
451         case OP_STWU:
452                 emulated = kvmppc_handle_store(run, vcpu,
453                                                kvmppc_get_gpr(vcpu, rs),
454                                                4, 1);
455                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
456                 break;
457
458         case OP_STB:
459                 emulated = kvmppc_handle_store(run, vcpu,
460                                                kvmppc_get_gpr(vcpu, rs),
461                                                1, 1);
462                 break;
463
464         case OP_STBU:
465                 emulated = kvmppc_handle_store(run, vcpu,
466                                                kvmppc_get_gpr(vcpu, rs),
467                                                1, 1);
468                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
469                 break;
470
471         case OP_LHZ:
472                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
473                 break;
474
475         case OP_LHZU:
476                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
477                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
478                 break;
479
480         case OP_LHA:
481                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
482                 break;
483
484         case OP_LHAU:
485                 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
486                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
487                 break;
488
489         case OP_STH:
490                 emulated = kvmppc_handle_store(run, vcpu,
491                                                kvmppc_get_gpr(vcpu, rs),
492                                                2, 1);
493                 break;
494
495         case OP_STHU:
496                 emulated = kvmppc_handle_store(run, vcpu,
497                                                kvmppc_get_gpr(vcpu, rs),
498                                                2, 1);
499                 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
500                 break;
501
502         default:
503                 emulated = EMULATE_FAIL;
504         }
505
506         if (emulated == EMULATE_FAIL) {
507                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
508                 if (emulated == EMULATE_AGAIN) {
509                         advance = 0;
510                 } else if (emulated == EMULATE_FAIL) {
511                         advance = 0;
512                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
513                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
514                         kvmppc_core_queue_program(vcpu, 0);
515                 }
516         }
517
518         trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
519
520         /* Advance past emulated instruction. */
521         if (advance)
522                 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
523
524         return emulated;
525 }