KVM: s390: fix return code handling in lpsw/lpswe handlers
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / priv.c
1 /*
2  * handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/current.h>
19 #include <asm/debug.h>
20 #include <asm/ebcdic.h>
21 #include <asm/sysinfo.h>
22 #include <asm/ptrace.h>
23 #include <asm/compat.h>
24 #include "gaccess.h"
25 #include "kvm-s390.h"
26 #include "trace.h"
27
28 static int handle_set_prefix(struct kvm_vcpu *vcpu)
29 {
30         u64 operand2;
31         u32 address = 0;
32         u8 tmp;
33
34         vcpu->stat.instruction_spx++;
35
36         operand2 = kvm_s390_get_base_disp_s(vcpu);
37
38         /* must be word boundary */
39         if (operand2 & 3) {
40                 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
41                 goto out;
42         }
43
44         /* get the value */
45         if (get_guest(vcpu, address, (u32 __user *) operand2)) {
46                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
47                 goto out;
48         }
49
50         address = address & 0x7fffe000u;
51
52         /* make sure that the new value is valid memory */
53         if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
54            (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
55                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
56                 goto out;
57         }
58
59         kvm_s390_set_prefix(vcpu, address);
60
61         VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
62         trace_kvm_s390_handle_prefix(vcpu, 1, address);
63 out:
64         return 0;
65 }
66
67 static int handle_store_prefix(struct kvm_vcpu *vcpu)
68 {
69         u64 operand2;
70         u32 address;
71
72         vcpu->stat.instruction_stpx++;
73
74         operand2 = kvm_s390_get_base_disp_s(vcpu);
75
76         /* must be word boundary */
77         if (operand2 & 3) {
78                 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
79                 goto out;
80         }
81
82         address = vcpu->arch.sie_block->prefix;
83         address = address & 0x7fffe000u;
84
85         /* get the value */
86         if (put_guest(vcpu, address, (u32 __user *)operand2)) {
87                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
88                 goto out;
89         }
90
91         VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92         trace_kvm_s390_handle_prefix(vcpu, 0, address);
93 out:
94         return 0;
95 }
96
97 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
98 {
99         u64 useraddr;
100         int rc;
101
102         vcpu->stat.instruction_stap++;
103
104         useraddr = kvm_s390_get_base_disp_s(vcpu);
105
106         if (useraddr & 1) {
107                 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
108                 goto out;
109         }
110
111         rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr);
112         if (rc) {
113                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
114                 goto out;
115         }
116
117         VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
118         trace_kvm_s390_handle_stap(vcpu, useraddr);
119 out:
120         return 0;
121 }
122
123 static int handle_skey(struct kvm_vcpu *vcpu)
124 {
125         vcpu->stat.instruction_storage_key++;
126         vcpu->arch.sie_block->gpsw.addr -= 4;
127         VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
128         return 0;
129 }
130
131 static int handle_tpi(struct kvm_vcpu *vcpu)
132 {
133         struct kvm_s390_interrupt_info *inti;
134         u64 addr;
135         int cc;
136
137         addr = kvm_s390_get_base_disp_s(vcpu);
138         if (addr & 3) {
139                 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140                 goto out;
141         }
142         cc = 0;
143         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
144         if (!inti)
145                 goto no_interrupt;
146         cc = 1;
147         if (addr) {
148                 /*
149                  * Store the two-word I/O interruption code into the
150                  * provided area.
151                  */
152                 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
153                 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
154                 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
155         } else {
156                 /*
157                  * Store the three-word I/O interruption code into
158                  * the appropriate lowcore area.
159                  */
160                 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
161                 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
162                 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
163                 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
164         }
165         kfree(inti);
166 no_interrupt:
167         /* Set condition code and we're done. */
168         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
169         vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
170 out:
171         return 0;
172 }
173
174 static int handle_tsch(struct kvm_vcpu *vcpu)
175 {
176         struct kvm_s390_interrupt_info *inti;
177
178         inti = kvm_s390_get_io_int(vcpu->kvm, 0,
179                                    vcpu->run->s.regs.gprs[1]);
180
181         /*
182          * Prepare exit to userspace.
183          * We indicate whether we dequeued a pending I/O interrupt
184          * so that userspace can re-inject it if the instruction gets
185          * a program check. While this may re-order the pending I/O
186          * interrupts, this is no problem since the priority is kept
187          * intact.
188          */
189         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
190         vcpu->run->s390_tsch.dequeued = !!inti;
191         if (inti) {
192                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
193                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
194                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
195                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
196         }
197         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
198         kfree(inti);
199         return -EREMOTE;
200 }
201
202 static int handle_io_inst(struct kvm_vcpu *vcpu)
203 {
204         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
205
206         if (vcpu->kvm->arch.css_support) {
207                 /*
208                  * Most I/O instructions will be handled by userspace.
209                  * Exceptions are tpi and the interrupt portion of tsch.
210                  */
211                 if (vcpu->arch.sie_block->ipa == 0xb236)
212                         return handle_tpi(vcpu);
213                 if (vcpu->arch.sie_block->ipa == 0xb235)
214                         return handle_tsch(vcpu);
215                 /* Handle in userspace. */
216                 return -EOPNOTSUPP;
217         } else {
218                 /*
219                  * Set condition code 3 to stop the guest from issueing channel
220                  * I/O instructions.
221                  */
222                 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
223                 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
224                 return 0;
225         }
226 }
227
228 static int handle_stfl(struct kvm_vcpu *vcpu)
229 {
230         unsigned int facility_list;
231         int rc;
232
233         vcpu->stat.instruction_stfl++;
234         /* only pass the facility bits, which we can handle */
235         facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
236
237         rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
238                            &facility_list, sizeof(facility_list));
239         if (rc)
240                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
241         else {
242                 VCPU_EVENT(vcpu, 5, "store facility list value %x",
243                            facility_list);
244                 trace_kvm_s390_handle_stfl(vcpu, facility_list);
245         }
246         return 0;
247 }
248
249 static void handle_new_psw(struct kvm_vcpu *vcpu)
250 {
251         /* Check whether the new psw is enabled for machine checks. */
252         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
253                 kvm_s390_deliver_pending_machine_checks(vcpu);
254 }
255
256 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
257 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
258 #define PSW_ADDR_24 0x0000000000ffffffUL
259 #define PSW_ADDR_31 0x000000007fffffffUL
260
261 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
262 {
263         u64 addr;
264         psw_compat_t new_psw;
265
266         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
267                 return kvm_s390_inject_program_int(vcpu,
268                                                    PGM_PRIVILEGED_OPERATION);
269
270         addr = kvm_s390_get_base_disp_s(vcpu);
271
272         if (addr & 7)
273                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
274
275         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
276                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
277
278         if (!(new_psw.mask & PSW32_MASK_BASE))
279                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
280
281         vcpu->arch.sie_block->gpsw.mask =
282                 (new_psw.mask & ~PSW32_MASK_BASE) << 32;
283         vcpu->arch.sie_block->gpsw.mask |= new_psw.addr & PSW32_ADDR_AMODE;
284         vcpu->arch.sie_block->gpsw.addr = new_psw.addr & ~PSW32_ADDR_AMODE;
285
286         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
287             (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
288              (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
289             ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
290              PSW_MASK_EA))
291                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
292
293         handle_new_psw(vcpu);
294         return 0;
295 }
296
297 static int handle_lpswe(struct kvm_vcpu *vcpu)
298 {
299         u64 addr;
300         psw_t new_psw;
301
302         addr = kvm_s390_get_base_disp_s(vcpu);
303
304         if (addr & 7)
305                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
306
307         if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
308                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
309
310         vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
311         vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
312
313         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
314             (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
315               PSW_MASK_BA) &&
316              (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
317             (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
318              (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
319             ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
320              PSW_MASK_EA))
321                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
322
323         handle_new_psw(vcpu);
324         return 0;
325 }
326
327 static int handle_stidp(struct kvm_vcpu *vcpu)
328 {
329         u64 operand2;
330         int rc;
331
332         vcpu->stat.instruction_stidp++;
333
334         operand2 = kvm_s390_get_base_disp_s(vcpu);
335
336         if (operand2 & 7) {
337                 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
338                 goto out;
339         }
340
341         rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2);
342         if (rc) {
343                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
344                 goto out;
345         }
346
347         VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
348 out:
349         return 0;
350 }
351
352 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
353 {
354         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
355         int cpus = 0;
356         int n;
357
358         spin_lock(&fi->lock);
359         for (n = 0; n < KVM_MAX_VCPUS; n++)
360                 if (fi->local_int[n])
361                         cpus++;
362         spin_unlock(&fi->lock);
363
364         /* deal with other level 3 hypervisors */
365         if (stsi(mem, 3, 2, 2))
366                 mem->count = 0;
367         if (mem->count < 8)
368                 mem->count++;
369         for (n = mem->count - 1; n > 0 ; n--)
370                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
371
372         mem->vm[0].cpus_total = cpus;
373         mem->vm[0].cpus_configured = cpus;
374         mem->vm[0].cpus_standby = 0;
375         mem->vm[0].cpus_reserved = 0;
376         mem->vm[0].caf = 1000;
377         memcpy(mem->vm[0].name, "KVMguest", 8);
378         ASCEBC(mem->vm[0].name, 8);
379         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
380         ASCEBC(mem->vm[0].cpi, 16);
381 }
382
383 static int handle_stsi(struct kvm_vcpu *vcpu)
384 {
385         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
386         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
387         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
388         u64 operand2;
389         unsigned long mem;
390
391         vcpu->stat.instruction_stsi++;
392         VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
393
394         operand2 = kvm_s390_get_base_disp_s(vcpu);
395
396         if (operand2 & 0xfff && fc > 0)
397                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
398
399         switch (fc) {
400         case 0:
401                 vcpu->run->s.regs.gprs[0] = 3 << 28;
402                 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
403                 return 0;
404         case 1: /* same handling for 1 and 2 */
405         case 2:
406                 mem = get_zeroed_page(GFP_KERNEL);
407                 if (!mem)
408                         goto out_fail;
409                 if (stsi((void *) mem, fc, sel1, sel2))
410                         goto out_mem;
411                 break;
412         case 3:
413                 if (sel1 != 2 || sel2 != 2)
414                         goto out_fail;
415                 mem = get_zeroed_page(GFP_KERNEL);
416                 if (!mem)
417                         goto out_fail;
418                 handle_stsi_3_2_2(vcpu, (void *) mem);
419                 break;
420         default:
421                 goto out_fail;
422         }
423
424         if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
425                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
426                 goto out_mem;
427         }
428         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
429         free_page(mem);
430         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
431         vcpu->run->s.regs.gprs[0] = 0;
432         return 0;
433 out_mem:
434         free_page(mem);
435 out_fail:
436         /* condition code 3 */
437         vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
438         return 0;
439 }
440
441 static const intercept_handler_t b2_handlers[256] = {
442         [0x02] = handle_stidp,
443         [0x10] = handle_set_prefix,
444         [0x11] = handle_store_prefix,
445         [0x12] = handle_store_cpu_address,
446         [0x29] = handle_skey,
447         [0x2a] = handle_skey,
448         [0x2b] = handle_skey,
449         [0x30] = handle_io_inst,
450         [0x31] = handle_io_inst,
451         [0x32] = handle_io_inst,
452         [0x33] = handle_io_inst,
453         [0x34] = handle_io_inst,
454         [0x35] = handle_io_inst,
455         [0x36] = handle_io_inst,
456         [0x37] = handle_io_inst,
457         [0x38] = handle_io_inst,
458         [0x39] = handle_io_inst,
459         [0x3a] = handle_io_inst,
460         [0x3b] = handle_io_inst,
461         [0x3c] = handle_io_inst,
462         [0x5f] = handle_io_inst,
463         [0x74] = handle_io_inst,
464         [0x76] = handle_io_inst,
465         [0x7d] = handle_stsi,
466         [0xb1] = handle_stfl,
467         [0xb2] = handle_lpswe,
468 };
469
470 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
471 {
472         intercept_handler_t handler;
473
474         /*
475          * a lot of B2 instructions are priviledged. We first check for
476          * the privileged ones, that we can handle in the kernel. If the
477          * kernel can handle this instruction, we check for the problem
478          * state bit and (a) handle the instruction or (b) send a code 2
479          * program check.
480          * Anything else goes to userspace.*/
481         handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
482         if (handler) {
483                 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
484                         return kvm_s390_inject_program_int(vcpu,
485                                                    PGM_PRIVILEGED_OPERATION);
486                 else
487                         return handler(vcpu);
488         }
489         return -EOPNOTSUPP;
490 }
491
492 static int handle_epsw(struct kvm_vcpu *vcpu)
493 {
494         int reg1, reg2;
495
496         reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
497         reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
498
499         /* This basically extracts the mask half of the psw. */
500         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
501         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
502         if (reg2) {
503                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
504                 vcpu->run->s.regs.gprs[reg2] |=
505                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
506         }
507         return 0;
508 }
509
510 static const intercept_handler_t b9_handlers[256] = {
511         [0x8d] = handle_epsw,
512         [0x9c] = handle_io_inst,
513 };
514
515 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
516 {
517         intercept_handler_t handler;
518
519         /* This is handled just as for the B2 instructions. */
520         handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
521         if (handler) {
522                 if ((handler != handle_epsw) &&
523                     (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
524                         return kvm_s390_inject_program_int(vcpu,
525                                                    PGM_PRIVILEGED_OPERATION);
526                 else
527                         return handler(vcpu);
528         }
529         return -EOPNOTSUPP;
530 }
531
532 static const intercept_handler_t eb_handlers[256] = {
533         [0x8a] = handle_io_inst,
534 };
535
536 int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
537 {
538         intercept_handler_t handler;
539
540         /* All eb instructions that end up here are privileged. */
541         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
542                 return kvm_s390_inject_program_int(vcpu,
543                                                    PGM_PRIVILEGED_OPERATION);
544         handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
545         if (handler)
546                 return handler(vcpu);
547         return -EOPNOTSUPP;
548 }
549
550 static int handle_tprot(struct kvm_vcpu *vcpu)
551 {
552         u64 address1, address2;
553         struct vm_area_struct *vma;
554         unsigned long user_address;
555
556         vcpu->stat.instruction_tprot++;
557
558         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
559
560         /* we only handle the Linux memory detection case:
561          * access key == 0
562          * guest DAT == off
563          * everything else goes to userspace. */
564         if (address2 & 0xf0)
565                 return -EOPNOTSUPP;
566         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
567                 return -EOPNOTSUPP;
568
569         down_read(&current->mm->mmap_sem);
570         user_address = __gmap_translate(address1, vcpu->arch.gmap);
571         if (IS_ERR_VALUE(user_address))
572                 goto out_inject;
573         vma = find_vma(current->mm, user_address);
574         if (!vma)
575                 goto out_inject;
576         vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
577         if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
578                 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
579         if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
580                 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
581
582         up_read(&current->mm->mmap_sem);
583         return 0;
584
585 out_inject:
586         up_read(&current->mm->mmap_sem);
587         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
588 }
589
590 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
591 {
592         /* For e5xx... instructions we only handle TPROT */
593         if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
594                 return handle_tprot(vcpu);
595         return -EOPNOTSUPP;
596 }
597
598 static int handle_sckpf(struct kvm_vcpu *vcpu)
599 {
600         u32 value;
601
602         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
603                 return kvm_s390_inject_program_int(vcpu,
604                                                    PGM_PRIVILEGED_OPERATION);
605
606         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
607                 return kvm_s390_inject_program_int(vcpu,
608                                                    PGM_SPECIFICATION);
609
610         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
611         vcpu->arch.sie_block->todpr = value;
612
613         return 0;
614 }
615
616 static const intercept_handler_t x01_handlers[256] = {
617         [0x07] = handle_sckpf,
618 };
619
620 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
621 {
622         intercept_handler_t handler;
623
624         handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
625         if (handler)
626                 return handler(vcpu);
627         return -EOPNOTSUPP;
628 }