Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / priv.c
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
26 #include <asm/io.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
29 #include "gaccess.h"
30 #include "kvm-s390.h"
31 #include "trace.h"
32
33 static int handle_set_prefix(struct kvm_vcpu *vcpu)
34 {
35         u64 operand2;
36         u32 address = 0;
37         u8 tmp;
38
39         vcpu->stat.instruction_spx++;
40
41         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44         operand2 = kvm_s390_get_base_disp_s(vcpu);
45
46         /* must be word boundary */
47         if (operand2 & 3)
48                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
49
50         /* get the value */
51         if (get_guest(vcpu, address, (u32 __user *) operand2))
52                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
53
54         address = address & 0x7fffe000u;
55
56         /* make sure that the new value is valid memory */
57         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
58            (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
59                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
60
61         kvm_s390_set_prefix(vcpu, address);
62
63         VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
64         trace_kvm_s390_handle_prefix(vcpu, 1, address);
65         return 0;
66 }
67
68 static int handle_store_prefix(struct kvm_vcpu *vcpu)
69 {
70         u64 operand2;
71         u32 address;
72
73         vcpu->stat.instruction_stpx++;
74
75         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
76                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
77
78         operand2 = kvm_s390_get_base_disp_s(vcpu);
79
80         /* must be word boundary */
81         if (operand2 & 3)
82                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
83
84         address = vcpu->arch.sie_block->prefix;
85         address = address & 0x7fffe000u;
86
87         /* get the value */
88         if (put_guest(vcpu, address, (u32 __user *)operand2))
89                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
90
91         VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92         trace_kvm_s390_handle_prefix(vcpu, 0, address);
93         return 0;
94 }
95
96 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
97 {
98         u64 useraddr;
99
100         vcpu->stat.instruction_stap++;
101
102         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
103                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
104
105         useraddr = kvm_s390_get_base_disp_s(vcpu);
106
107         if (useraddr & 1)
108                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
109
110         if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
111                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112
113         VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
114         trace_kvm_s390_handle_stap(vcpu, useraddr);
115         return 0;
116 }
117
118 static int handle_skey(struct kvm_vcpu *vcpu)
119 {
120         vcpu->stat.instruction_storage_key++;
121
122         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
123                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
124
125         vcpu->arch.sie_block->gpsw.addr =
126                 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
127         VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
128         return 0;
129 }
130
131 static int handle_tpi(struct kvm_vcpu *vcpu)
132 {
133         struct kvm_s390_interrupt_info *inti;
134         u64 addr;
135         int cc;
136
137         addr = kvm_s390_get_base_disp_s(vcpu);
138         if (addr & 3)
139                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140         cc = 0;
141         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
142         if (!inti)
143                 goto no_interrupt;
144         cc = 1;
145         if (addr) {
146                 /*
147                  * Store the two-word I/O interruption code into the
148                  * provided area.
149                  */
150                 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
151                     || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
152                     || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
153                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
154         } else {
155                 /*
156                  * Store the three-word I/O interruption code into
157                  * the appropriate lowcore area.
158                  */
159                 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
160                 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
161                 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
162                 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
163         }
164         kfree(inti);
165 no_interrupt:
166         /* Set condition code and we're done. */
167         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
168         vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
169         return 0;
170 }
171
172 static int handle_tsch(struct kvm_vcpu *vcpu)
173 {
174         struct kvm_s390_interrupt_info *inti;
175
176         inti = kvm_s390_get_io_int(vcpu->kvm, 0,
177                                    vcpu->run->s.regs.gprs[1]);
178
179         /*
180          * Prepare exit to userspace.
181          * We indicate whether we dequeued a pending I/O interrupt
182          * so that userspace can re-inject it if the instruction gets
183          * a program check. While this may re-order the pending I/O
184          * interrupts, this is no problem since the priority is kept
185          * intact.
186          */
187         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
188         vcpu->run->s390_tsch.dequeued = !!inti;
189         if (inti) {
190                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
191                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
192                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
193                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
194         }
195         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
196         kfree(inti);
197         return -EREMOTE;
198 }
199
200 static int handle_io_inst(struct kvm_vcpu *vcpu)
201 {
202         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
203
204         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
205                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
206
207         if (vcpu->kvm->arch.css_support) {
208                 /*
209                  * Most I/O instructions will be handled by userspace.
210                  * Exceptions are tpi and the interrupt portion of tsch.
211                  */
212                 if (vcpu->arch.sie_block->ipa == 0xb236)
213                         return handle_tpi(vcpu);
214                 if (vcpu->arch.sie_block->ipa == 0xb235)
215                         return handle_tsch(vcpu);
216                 /* Handle in userspace. */
217                 return -EOPNOTSUPP;
218         } else {
219                 /*
220                  * Set condition code 3 to stop the guest from issueing channel
221                  * I/O instructions.
222                  */
223                 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
224                 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
225                 return 0;
226         }
227 }
228
229 static int handle_stfl(struct kvm_vcpu *vcpu)
230 {
231         unsigned int facility_list;
232         int rc;
233
234         vcpu->stat.instruction_stfl++;
235
236         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
237                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
238
239         /* only pass the facility bits, which we can handle */
240         facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
241
242         rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
243                            &facility_list, sizeof(facility_list));
244         if (rc)
245                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
246         VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
247         trace_kvm_s390_handle_stfl(vcpu, facility_list);
248         return 0;
249 }
250
251 static void handle_new_psw(struct kvm_vcpu *vcpu)
252 {
253         /* Check whether the new psw is enabled for machine checks. */
254         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
255                 kvm_s390_deliver_pending_machine_checks(vcpu);
256 }
257
258 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
259 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
260 #define PSW_ADDR_24 0x0000000000ffffffUL
261 #define PSW_ADDR_31 0x000000007fffffffUL
262
263 static int is_valid_psw(psw_t *psw) {
264         if (psw->mask & PSW_MASK_UNASSIGNED)
265                 return 0;
266         if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
267                 if (psw->addr & ~PSW_ADDR_31)
268                         return 0;
269         }
270         if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
271                 return 0;
272         if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
273                 return 0;
274         return 1;
275 }
276
277 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
278 {
279         psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
280         psw_compat_t new_psw;
281         u64 addr;
282
283         if (gpsw->mask & PSW_MASK_PSTATE)
284                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
285
286         addr = kvm_s390_get_base_disp_s(vcpu);
287         if (addr & 7)
288                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
289         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
290                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
291         if (!(new_psw.mask & PSW32_MASK_BASE))
292                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
293         gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
294         gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
295         gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
296         if (!is_valid_psw(gpsw))
297                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
298         handle_new_psw(vcpu);
299         return 0;
300 }
301
302 static int handle_lpswe(struct kvm_vcpu *vcpu)
303 {
304         psw_t new_psw;
305         u64 addr;
306
307         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
308                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
309
310         addr = kvm_s390_get_base_disp_s(vcpu);
311         if (addr & 7)
312                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
313         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
314                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
315         vcpu->arch.sie_block->gpsw = new_psw;
316         if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
317                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
318         handle_new_psw(vcpu);
319         return 0;
320 }
321
322 static int handle_stidp(struct kvm_vcpu *vcpu)
323 {
324         u64 operand2;
325
326         vcpu->stat.instruction_stidp++;
327
328         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
329                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
330
331         operand2 = kvm_s390_get_base_disp_s(vcpu);
332
333         if (operand2 & 7)
334                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
335
336         if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
337                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
338
339         VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
340         return 0;
341 }
342
343 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
344 {
345         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
346         int cpus = 0;
347         int n;
348
349         spin_lock(&fi->lock);
350         for (n = 0; n < KVM_MAX_VCPUS; n++)
351                 if (fi->local_int[n])
352                         cpus++;
353         spin_unlock(&fi->lock);
354
355         /* deal with other level 3 hypervisors */
356         if (stsi(mem, 3, 2, 2))
357                 mem->count = 0;
358         if (mem->count < 8)
359                 mem->count++;
360         for (n = mem->count - 1; n > 0 ; n--)
361                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
362
363         mem->vm[0].cpus_total = cpus;
364         mem->vm[0].cpus_configured = cpus;
365         mem->vm[0].cpus_standby = 0;
366         mem->vm[0].cpus_reserved = 0;
367         mem->vm[0].caf = 1000;
368         memcpy(mem->vm[0].name, "KVMguest", 8);
369         ASCEBC(mem->vm[0].name, 8);
370         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
371         ASCEBC(mem->vm[0].cpi, 16);
372 }
373
374 static int handle_stsi(struct kvm_vcpu *vcpu)
375 {
376         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
377         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
378         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
379         unsigned long mem = 0;
380         u64 operand2;
381         int rc = 0;
382
383         vcpu->stat.instruction_stsi++;
384         VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
385
386         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
387                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
388
389         if (fc > 3) {
390                 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;     /* cc 3 */
391                 return 0;
392         }
393
394         if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
395             || vcpu->run->s.regs.gprs[1] & 0xffff0000)
396                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
397
398         if (fc == 0) {
399                 vcpu->run->s.regs.gprs[0] = 3 << 28;
400                 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);  /* cc 0 */
401                 return 0;
402         }
403
404         operand2 = kvm_s390_get_base_disp_s(vcpu);
405
406         if (operand2 & 0xfff)
407                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
408
409         switch (fc) {
410         case 1: /* same handling for 1 and 2 */
411         case 2:
412                 mem = get_zeroed_page(GFP_KERNEL);
413                 if (!mem)
414                         goto out_no_data;
415                 if (stsi((void *) mem, fc, sel1, sel2))
416                         goto out_no_data;
417                 break;
418         case 3:
419                 if (sel1 != 2 || sel2 != 2)
420                         goto out_no_data;
421                 mem = get_zeroed_page(GFP_KERNEL);
422                 if (!mem)
423                         goto out_no_data;
424                 handle_stsi_3_2_2(vcpu, (void *) mem);
425                 break;
426         }
427
428         if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
429                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
430                 goto out_exception;
431         }
432         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
433         free_page(mem);
434         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
435         vcpu->run->s.regs.gprs[0] = 0;
436         return 0;
437 out_no_data:
438         /* condition code 3 */
439         vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
440 out_exception:
441         free_page(mem);
442         return rc;
443 }
444
445 static const intercept_handler_t b2_handlers[256] = {
446         [0x02] = handle_stidp,
447         [0x10] = handle_set_prefix,
448         [0x11] = handle_store_prefix,
449         [0x12] = handle_store_cpu_address,
450         [0x29] = handle_skey,
451         [0x2a] = handle_skey,
452         [0x2b] = handle_skey,
453         [0x30] = handle_io_inst,
454         [0x31] = handle_io_inst,
455         [0x32] = handle_io_inst,
456         [0x33] = handle_io_inst,
457         [0x34] = handle_io_inst,
458         [0x35] = handle_io_inst,
459         [0x36] = handle_io_inst,
460         [0x37] = handle_io_inst,
461         [0x38] = handle_io_inst,
462         [0x39] = handle_io_inst,
463         [0x3a] = handle_io_inst,
464         [0x3b] = handle_io_inst,
465         [0x3c] = handle_io_inst,
466         [0x5f] = handle_io_inst,
467         [0x74] = handle_io_inst,
468         [0x76] = handle_io_inst,
469         [0x7d] = handle_stsi,
470         [0xb1] = handle_stfl,
471         [0xb2] = handle_lpswe,
472 };
473
474 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
475 {
476         intercept_handler_t handler;
477
478         /*
479          * A lot of B2 instructions are priviledged. Here we check for
480          * the privileged ones, that we can handle in the kernel.
481          * Anything else goes to userspace.
482          */
483         handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
484         if (handler)
485                 return handler(vcpu);
486
487         return -EOPNOTSUPP;
488 }
489
490 static int handle_epsw(struct kvm_vcpu *vcpu)
491 {
492         int reg1, reg2;
493
494         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
495
496         /* This basically extracts the mask half of the psw. */
497         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
498         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
499         if (reg2) {
500                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
501                 vcpu->run->s.regs.gprs[reg2] |=
502                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
503         }
504         return 0;
505 }
506
507 #define PFMF_RESERVED   0xfffc0101UL
508 #define PFMF_SK         0x00020000UL
509 #define PFMF_CF         0x00010000UL
510 #define PFMF_UI         0x00008000UL
511 #define PFMF_FSC        0x00007000UL
512 #define PFMF_NQ         0x00000800UL
513 #define PFMF_MR         0x00000400UL
514 #define PFMF_MC         0x00000200UL
515 #define PFMF_KEY        0x000000feUL
516
517 static int handle_pfmf(struct kvm_vcpu *vcpu)
518 {
519         int reg1, reg2;
520         unsigned long start, end;
521
522         vcpu->stat.instruction_pfmf++;
523
524         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
525
526         if (!MACHINE_HAS_PFMF)
527                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
528
529         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
530                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
531
532         if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
533                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
534
535         /* Only provide non-quiescing support if the host supports it */
536         if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
537                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
538
539         /* No support for conditional-SSKE */
540         if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
541                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
542
543         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
544         switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
545         case 0x00000000:
546                 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
547                 break;
548         case 0x00001000:
549                 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
550                 break;
551         /* We dont support EDAT2
552         case 0x00002000:
553                 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
554                 break;*/
555         default:
556                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
557         }
558         while (start < end) {
559                 unsigned long useraddr;
560
561                 useraddr = gmap_translate(start, vcpu->arch.gmap);
562                 if (IS_ERR((void *)useraddr))
563                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
564
565                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
566                         if (clear_user((void __user *)useraddr, PAGE_SIZE))
567                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
568                 }
569
570                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
571                         if (set_guest_storage_key(current->mm, useraddr,
572                                         vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
573                                         vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
574                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
575                 }
576
577                 start += PAGE_SIZE;
578         }
579         if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
580                 vcpu->run->s.regs.gprs[reg2] = end;
581         return 0;
582 }
583
584 static const intercept_handler_t b9_handlers[256] = {
585         [0x8d] = handle_epsw,
586         [0x9c] = handle_io_inst,
587         [0xaf] = handle_pfmf,
588 };
589
590 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
591 {
592         intercept_handler_t handler;
593
594         /* This is handled just as for the B2 instructions. */
595         handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
596         if (handler)
597                 return handler(vcpu);
598
599         return -EOPNOTSUPP;
600 }
601
602 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
603 {
604         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
605         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
606         u64 useraddr;
607         u32 val = 0;
608         int reg, rc;
609
610         vcpu->stat.instruction_lctl++;
611
612         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
613                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
614
615         useraddr = kvm_s390_get_base_disp_rs(vcpu);
616
617         if (useraddr & 3)
618                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
619
620         VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
621                    useraddr);
622         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
623
624         reg = reg1;
625         do {
626                 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
627                 if (rc)
628                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
629                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
630                 vcpu->arch.sie_block->gcr[reg] |= val;
631                 useraddr += 4;
632                 if (reg == reg3)
633                         break;
634                 reg = (reg + 1) % 16;
635         } while (1);
636
637         return 0;
638 }
639
640 static int handle_lctlg(struct kvm_vcpu *vcpu)
641 {
642         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
643         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
644         u64 useraddr;
645         int reg, rc;
646
647         vcpu->stat.instruction_lctlg++;
648
649         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
650                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
651
652         useraddr = kvm_s390_get_base_disp_rsy(vcpu);
653
654         if (useraddr & 7)
655                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
656
657         reg = reg1;
658
659         VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
660                    useraddr);
661         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
662
663         do {
664                 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
665                                (u64 __user *) useraddr);
666                 if (rc)
667                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
668                 useraddr += 8;
669                 if (reg == reg3)
670                         break;
671                 reg = (reg + 1) % 16;
672         } while (1);
673
674         return 0;
675 }
676
677 static const intercept_handler_t eb_handlers[256] = {
678         [0x2f] = handle_lctlg,
679         [0x8a] = handle_io_inst,
680 };
681
682 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
683 {
684         intercept_handler_t handler;
685
686         handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
687         if (handler)
688                 return handler(vcpu);
689         return -EOPNOTSUPP;
690 }
691
692 static int handle_tprot(struct kvm_vcpu *vcpu)
693 {
694         u64 address1, address2;
695         struct vm_area_struct *vma;
696         unsigned long user_address;
697
698         vcpu->stat.instruction_tprot++;
699
700         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
701                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
702
703         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
704
705         /* we only handle the Linux memory detection case:
706          * access key == 0
707          * guest DAT == off
708          * everything else goes to userspace. */
709         if (address2 & 0xf0)
710                 return -EOPNOTSUPP;
711         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
712                 return -EOPNOTSUPP;
713
714         down_read(&current->mm->mmap_sem);
715         user_address = __gmap_translate(address1, vcpu->arch.gmap);
716         if (IS_ERR_VALUE(user_address))
717                 goto out_inject;
718         vma = find_vma(current->mm, user_address);
719         if (!vma)
720                 goto out_inject;
721         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
722         if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
723                 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
724         if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
725                 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
726
727         up_read(&current->mm->mmap_sem);
728         return 0;
729
730 out_inject:
731         up_read(&current->mm->mmap_sem);
732         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
733 }
734
735 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
736 {
737         /* For e5xx... instructions we only handle TPROT */
738         if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
739                 return handle_tprot(vcpu);
740         return -EOPNOTSUPP;
741 }
742
743 static int handle_sckpf(struct kvm_vcpu *vcpu)
744 {
745         u32 value;
746
747         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
748                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
749
750         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
751                 return kvm_s390_inject_program_int(vcpu,
752                                                    PGM_SPECIFICATION);
753
754         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
755         vcpu->arch.sie_block->todpr = value;
756
757         return 0;
758 }
759
760 static const intercept_handler_t x01_handlers[256] = {
761         [0x07] = handle_sckpf,
762 };
763
764 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
765 {
766         intercept_handler_t handler;
767
768         handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
769         if (handler)
770                 return handler(vcpu);
771         return -EOPNOTSUPP;
772 }