2 * handling privileged instructions
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <asm/current.h>
18 #include <asm/debug.h>
19 #include <asm/ebcdic.h>
20 #include <asm/sysinfo.h>
21 #include <asm/ptrace.h>
22 #include <asm/compat.h>
27 static int handle_set_prefix(struct kvm_vcpu *vcpu)
33 vcpu->stat.instruction_spx++;
35 operand2 = kvm_s390_get_base_disp_s(vcpu);
37 /* must be word boundary */
39 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
44 if (get_guest_u32(vcpu, operand2, &address)) {
45 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
49 address = address & 0x7fffe000u;
51 /* make sure that the new value is valid memory */
52 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
53 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
54 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
58 kvm_s390_set_prefix(vcpu, address);
60 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
61 trace_kvm_s390_handle_prefix(vcpu, 1, address);
66 static int handle_store_prefix(struct kvm_vcpu *vcpu)
71 vcpu->stat.instruction_stpx++;
73 operand2 = kvm_s390_get_base_disp_s(vcpu);
75 /* must be word boundary */
77 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81 address = vcpu->arch.sie_block->prefix;
82 address = address & 0x7fffe000u;
85 if (put_guest_u32(vcpu, operand2, address)) {
86 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
90 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
91 trace_kvm_s390_handle_prefix(vcpu, 0, address);
96 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
101 vcpu->stat.instruction_stap++;
103 useraddr = kvm_s390_get_base_disp_s(vcpu);
106 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
110 rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
112 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
116 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
117 trace_kvm_s390_handle_stap(vcpu, useraddr);
122 static int handle_skey(struct kvm_vcpu *vcpu)
124 vcpu->stat.instruction_storage_key++;
125 vcpu->arch.sie_block->gpsw.addr -= 4;
126 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
130 static int handle_tpi(struct kvm_vcpu *vcpu)
133 struct kvm_s390_interrupt_info *inti;
136 addr = kvm_s390_get_base_disp_s(vcpu);
138 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
142 * Store the two-word I/O interruption code into the
145 put_guest_u16(vcpu, addr, inti->io.subchannel_id);
146 put_guest_u16(vcpu, addr + 2, inti->io.subchannel_nr);
147 put_guest_u32(vcpu, addr + 4, inti->io.io_int_parm);
150 * Store the three-word I/O interruption code into
151 * the appropriate lowcore area.
153 put_guest_u16(vcpu, 184, inti->io.subchannel_id);
154 put_guest_u16(vcpu, 186, inti->io.subchannel_nr);
155 put_guest_u32(vcpu, 188, inti->io.io_int_parm);
156 put_guest_u32(vcpu, 192, inti->io.io_int_word);
162 /* Set condition code and we're done. */
163 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
164 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
168 static int handle_tsch(struct kvm_vcpu *vcpu)
170 struct kvm_s390_interrupt_info *inti;
172 inti = kvm_s390_get_io_int(vcpu->kvm, 0,
173 vcpu->run->s.regs.gprs[1]);
176 * Prepare exit to userspace.
177 * We indicate whether we dequeued a pending I/O interrupt
178 * so that userspace can re-inject it if the instruction gets
179 * a program check. While this may re-order the pending I/O
180 * interrupts, this is no problem since the priority is kept
183 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
184 vcpu->run->s390_tsch.dequeued = !!inti;
186 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
187 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
188 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
189 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
191 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
196 static int handle_io_inst(struct kvm_vcpu *vcpu)
198 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
200 if (vcpu->kvm->arch.css_support) {
202 * Most I/O instructions will be handled by userspace.
203 * Exceptions are tpi and the interrupt portion of tsch.
205 if (vcpu->arch.sie_block->ipa == 0xb236)
206 return handle_tpi(vcpu);
207 if (vcpu->arch.sie_block->ipa == 0xb235)
208 return handle_tsch(vcpu);
209 /* Handle in userspace. */
213 * Set condition code 3 to stop the guest from issueing channel
216 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
217 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
222 static int handle_stfl(struct kvm_vcpu *vcpu)
224 unsigned int facility_list;
227 vcpu->stat.instruction_stfl++;
228 /* only pass the facility bits, which we can handle */
229 facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
231 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
232 &facility_list, sizeof(facility_list));
234 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
236 VCPU_EVENT(vcpu, 5, "store facility list value %x",
238 trace_kvm_s390_handle_stfl(vcpu, facility_list);
243 static void handle_new_psw(struct kvm_vcpu *vcpu)
245 /* Check whether the new psw is enabled for machine checks. */
246 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
247 kvm_s390_deliver_pending_machine_checks(vcpu);
250 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
251 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
252 #define PSW_ADDR_24 0x00000000000fffffUL
253 #define PSW_ADDR_31 0x000000007fffffffUL
255 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
258 psw_compat_t new_psw;
260 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
261 return kvm_s390_inject_program_int(vcpu,
262 PGM_PRIVILEGED_OPERATION);
264 addr = kvm_s390_get_base_disp_s(vcpu);
267 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
271 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
272 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
276 if (!(new_psw.mask & PSW32_MASK_BASE)) {
277 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
281 vcpu->arch.sie_block->gpsw.mask =
282 (new_psw.mask & ~PSW32_MASK_BASE) << 32;
283 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
285 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
286 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
287 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
288 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
290 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
294 handle_new_psw(vcpu);
299 static int handle_lpswe(struct kvm_vcpu *vcpu)
304 addr = kvm_s390_get_base_disp_s(vcpu);
307 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
311 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
312 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
316 vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
317 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
319 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
320 (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
322 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
323 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
324 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
325 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
327 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
331 handle_new_psw(vcpu);
336 static int handle_stidp(struct kvm_vcpu *vcpu)
341 vcpu->stat.instruction_stidp++;
343 operand2 = kvm_s390_get_base_disp_s(vcpu);
346 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
350 rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
352 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
356 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
361 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
363 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
367 spin_lock(&fi->lock);
368 for (n = 0; n < KVM_MAX_VCPUS; n++)
369 if (fi->local_int[n])
371 spin_unlock(&fi->lock);
373 /* deal with other level 3 hypervisors */
374 if (stsi(mem, 3, 2, 2))
378 for (n = mem->count - 1; n > 0 ; n--)
379 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
381 mem->vm[0].cpus_total = cpus;
382 mem->vm[0].cpus_configured = cpus;
383 mem->vm[0].cpus_standby = 0;
384 mem->vm[0].cpus_reserved = 0;
385 mem->vm[0].caf = 1000;
386 memcpy(mem->vm[0].name, "KVMguest", 8);
387 ASCEBC(mem->vm[0].name, 8);
388 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
389 ASCEBC(mem->vm[0].cpi, 16);
392 static int handle_stsi(struct kvm_vcpu *vcpu)
394 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
395 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
396 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
400 vcpu->stat.instruction_stsi++;
401 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
403 operand2 = kvm_s390_get_base_disp_s(vcpu);
405 if (operand2 & 0xfff && fc > 0)
406 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
410 vcpu->run->s.regs.gprs[0] = 3 << 28;
411 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
413 case 1: /* same handling for 1 and 2 */
415 mem = get_zeroed_page(GFP_KERNEL);
418 if (stsi((void *) mem, fc, sel1, sel2))
422 if (sel1 != 2 || sel2 != 2)
424 mem = get_zeroed_page(GFP_KERNEL);
427 handle_stsi_3_2_2(vcpu, (void *) mem);
433 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
434 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
437 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
439 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
440 vcpu->run->s.regs.gprs[0] = 0;
445 /* condition code 3 */
446 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
450 static const intercept_handler_t b2_handlers[256] = {
451 [0x02] = handle_stidp,
452 [0x10] = handle_set_prefix,
453 [0x11] = handle_store_prefix,
454 [0x12] = handle_store_cpu_address,
455 [0x29] = handle_skey,
456 [0x2a] = handle_skey,
457 [0x2b] = handle_skey,
458 [0x30] = handle_io_inst,
459 [0x31] = handle_io_inst,
460 [0x32] = handle_io_inst,
461 [0x33] = handle_io_inst,
462 [0x34] = handle_io_inst,
463 [0x35] = handle_io_inst,
464 [0x36] = handle_io_inst,
465 [0x37] = handle_io_inst,
466 [0x38] = handle_io_inst,
467 [0x39] = handle_io_inst,
468 [0x3a] = handle_io_inst,
469 [0x3b] = handle_io_inst,
470 [0x3c] = handle_io_inst,
471 [0x5f] = handle_io_inst,
472 [0x74] = handle_io_inst,
473 [0x76] = handle_io_inst,
474 [0x7d] = handle_stsi,
475 [0xb1] = handle_stfl,
476 [0xb2] = handle_lpswe,
479 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
481 intercept_handler_t handler;
484 * a lot of B2 instructions are priviledged. We first check for
485 * the privileged ones, that we can handle in the kernel. If the
486 * kernel can handle this instruction, we check for the problem
487 * state bit and (a) handle the instruction or (b) send a code 2
489 * Anything else goes to userspace.*/
490 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
492 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
493 return kvm_s390_inject_program_int(vcpu,
494 PGM_PRIVILEGED_OPERATION);
496 return handler(vcpu);
501 static int handle_epsw(struct kvm_vcpu *vcpu)
505 reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
506 reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
508 /* This basically extracts the mask half of the psw. */
509 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
510 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
512 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
513 vcpu->run->s.regs.gprs[reg2] |=
514 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
519 static const intercept_handler_t b9_handlers[256] = {
520 [0x8d] = handle_epsw,
521 [0x9c] = handle_io_inst,
524 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
526 intercept_handler_t handler;
528 /* This is handled just as for the B2 instructions. */
529 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
531 if ((handler != handle_epsw) &&
532 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
533 return kvm_s390_inject_program_int(vcpu,
534 PGM_PRIVILEGED_OPERATION);
536 return handler(vcpu);
541 static const intercept_handler_t eb_handlers[256] = {
542 [0x8a] = handle_io_inst,
545 int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
547 intercept_handler_t handler;
549 /* All eb instructions that end up here are privileged. */
550 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
551 return kvm_s390_inject_program_int(vcpu,
552 PGM_PRIVILEGED_OPERATION);
553 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
555 return handler(vcpu);
559 static int handle_tprot(struct kvm_vcpu *vcpu)
561 u64 address1, address2;
562 struct vm_area_struct *vma;
563 unsigned long user_address;
565 vcpu->stat.instruction_tprot++;
567 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
569 /* we only handle the Linux memory detection case:
572 * everything else goes to userspace. */
575 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
579 /* we must resolve the address without holding the mmap semaphore.
580 * This is ok since the userspace hypervisor is not supposed to change
581 * the mapping while the guest queries the memory. Otherwise the guest
582 * might crash or get wrong info anyway. */
583 user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
585 down_read(¤t->mm->mmap_sem);
586 vma = find_vma(current->mm, user_address);
588 up_read(¤t->mm->mmap_sem);
589 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
592 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
593 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
594 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
595 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
596 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
598 up_read(¤t->mm->mmap_sem);
602 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
604 /* For e5xx... instructions we only handle TPROT */
605 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
606 return handle_tprot(vcpu);
610 static int handle_sckpf(struct kvm_vcpu *vcpu)
614 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
615 return kvm_s390_inject_program_int(vcpu,
616 PGM_PRIVILEGED_OPERATION);
618 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
619 return kvm_s390_inject_program_int(vcpu,
622 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
623 vcpu->arch.sie_block->todpr = value;
628 static const intercept_handler_t x01_handlers[256] = {
629 [0x07] = handle_sckpf,
632 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
634 intercept_handler_t handler;
636 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
638 return handler(vcpu);