2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define CREATE_TRACE_POINTS
37 #include "trace-s390.h"
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
43 { "exit_null", VCPU_STAT(exit_null) },
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
80 { "diagnose_10", VCPU_STAT(diagnose_10) },
81 { "diagnose_44", VCPU_STAT(diagnose_44) },
82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
86 static unsigned long long *facilities;
88 /* Section: not file related */
89 int kvm_arch_hardware_enable(void *garbage)
91 /* every s390 is virtualization enabled ;-) */
95 void kvm_arch_hardware_disable(void *garbage)
99 int kvm_arch_hardware_setup(void)
104 void kvm_arch_hardware_unsetup(void)
108 void kvm_arch_check_processor_compat(void *rtn)
112 int kvm_arch_init(void *opaque)
117 void kvm_arch_exit(void)
121 /* Section: device related */
122 long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
130 int kvm_dev_ioctl_check_extension(long ext)
135 case KVM_CAP_S390_PSW:
136 case KVM_CAP_S390_GMAP:
137 case KVM_CAP_SYNC_MMU:
138 #ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
141 case KVM_CAP_SYNC_REGS:
142 case KVM_CAP_ONE_REG:
143 case KVM_CAP_ENABLE_CAP:
144 case KVM_CAP_S390_CSS_SUPPORT:
145 case KVM_CAP_IOEVENTFD:
148 case KVM_CAP_NR_VCPUS:
149 case KVM_CAP_MAX_VCPUS:
152 case KVM_CAP_S390_COW:
153 r = MACHINE_HAS_ESOP;
161 /* Section: vm related */
163 * Get (and clear) the dirty memory log for a memory slot.
165 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
166 struct kvm_dirty_log *log)
171 long kvm_arch_vm_ioctl(struct file *filp,
172 unsigned int ioctl, unsigned long arg)
174 struct kvm *kvm = filp->private_data;
175 void __user *argp = (void __user *)arg;
179 case KVM_S390_INTERRUPT: {
180 struct kvm_s390_interrupt s390int;
183 if (copy_from_user(&s390int, argp, sizeof(s390int)))
185 r = kvm_s390_inject_vm(kvm, &s390int);
195 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
201 #ifdef CONFIG_KVM_S390_UCONTROL
202 if (type & ~KVM_VM_S390_UCONTROL)
204 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
211 rc = s390_enable_sie();
217 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
221 sprintf(debug_name, "kvm-%u", current->pid);
223 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
227 spin_lock_init(&kvm->arch.float_int.lock);
228 INIT_LIST_HEAD(&kvm->arch.float_int.list);
230 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
231 VM_EVENT(kvm, 3, "%s", "vm created");
233 if (type & KVM_VM_S390_UCONTROL) {
234 kvm->arch.gmap = NULL;
236 kvm->arch.gmap = gmap_alloc(current->mm);
241 kvm->arch.css_support = 0;
245 debug_unregister(kvm->arch.dbf);
247 free_page((unsigned long)(kvm->arch.sca));
252 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
254 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
255 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
256 if (!kvm_is_ucontrol(vcpu->kvm)) {
257 clear_bit(63 - vcpu->vcpu_id,
258 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
259 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
260 (__u64) vcpu->arch.sie_block)
261 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
265 if (kvm_is_ucontrol(vcpu->kvm))
266 gmap_free(vcpu->arch.gmap);
268 free_page((unsigned long)(vcpu->arch.sie_block));
269 kvm_vcpu_uninit(vcpu);
273 static void kvm_free_vcpus(struct kvm *kvm)
276 struct kvm_vcpu *vcpu;
278 kvm_for_each_vcpu(i, vcpu, kvm)
279 kvm_arch_vcpu_destroy(vcpu);
281 mutex_lock(&kvm->lock);
282 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
283 kvm->vcpus[i] = NULL;
285 atomic_set(&kvm->online_vcpus, 0);
286 mutex_unlock(&kvm->lock);
289 void kvm_arch_sync_events(struct kvm *kvm)
293 void kvm_arch_destroy_vm(struct kvm *kvm)
296 free_page((unsigned long)(kvm->arch.sca));
297 debug_unregister(kvm->arch.dbf);
298 if (!kvm_is_ucontrol(kvm))
299 gmap_free(kvm->arch.gmap);
302 /* Section: vcpu related */
303 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
305 if (kvm_is_ucontrol(vcpu->kvm)) {
306 vcpu->arch.gmap = gmap_alloc(current->mm);
307 if (!vcpu->arch.gmap)
312 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
313 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
320 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
325 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
327 save_fp_regs(&vcpu->arch.host_fpregs);
328 save_access_regs(vcpu->arch.host_acrs);
329 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
330 restore_fp_regs(&vcpu->arch.guest_fpregs);
331 restore_access_regs(vcpu->run->s.regs.acrs);
332 gmap_enable(vcpu->arch.gmap);
333 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
336 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
338 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
339 gmap_disable(vcpu->arch.gmap);
340 save_fp_regs(&vcpu->arch.guest_fpregs);
341 save_access_regs(vcpu->run->s.regs.acrs);
342 restore_fp_regs(&vcpu->arch.host_fpregs);
343 restore_access_regs(vcpu->arch.host_acrs);
346 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
348 /* this equals initial cpu reset in pop, but we don't switch to ESA */
349 vcpu->arch.sie_block->gpsw.mask = 0UL;
350 vcpu->arch.sie_block->gpsw.addr = 0UL;
351 kvm_s390_set_prefix(vcpu, 0);
352 vcpu->arch.sie_block->cputm = 0UL;
353 vcpu->arch.sie_block->ckc = 0UL;
354 vcpu->arch.sie_block->todpr = 0;
355 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
356 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
357 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
358 vcpu->arch.guest_fpregs.fpc = 0;
359 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
360 vcpu->arch.sie_block->gbea = 1;
361 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
364 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
369 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
371 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
374 vcpu->arch.sie_block->ecb = 6;
375 vcpu->arch.sie_block->eca = 0xC1002001U;
376 vcpu->arch.sie_block->fac = (int) (long) facilities;
377 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
378 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
379 (unsigned long) vcpu);
380 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
381 get_cpu_id(&vcpu->arch.cpu_id);
382 vcpu->arch.cpu_id.version = 0xff;
386 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
389 struct kvm_vcpu *vcpu;
392 if (id >= KVM_MAX_VCPUS)
397 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
401 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
402 get_zeroed_page(GFP_KERNEL);
404 if (!vcpu->arch.sie_block)
407 vcpu->arch.sie_block->icpua = id;
408 if (!kvm_is_ucontrol(kvm)) {
409 if (!kvm->arch.sca) {
413 if (!kvm->arch.sca->cpu[id].sda)
414 kvm->arch.sca->cpu[id].sda =
415 (__u64) vcpu->arch.sie_block;
416 vcpu->arch.sie_block->scaoh =
417 (__u32)(((__u64)kvm->arch.sca) >> 32);
418 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
419 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
422 spin_lock_init(&vcpu->arch.local_int.lock);
423 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
424 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
425 spin_lock(&kvm->arch.float_int.lock);
426 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
427 init_waitqueue_head(&vcpu->arch.local_int.wq);
428 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
429 spin_unlock(&kvm->arch.float_int.lock);
431 rc = kvm_vcpu_init(vcpu, kvm, id);
433 goto out_free_sie_block;
434 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
435 vcpu->arch.sie_block);
436 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
440 free_page((unsigned long)(vcpu->arch.sie_block));
447 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
449 /* kvm common code refers to this, but never calls it */
454 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
456 /* kvm common code refers to this, but never calls it */
461 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
462 struct kvm_one_reg *reg)
467 case KVM_REG_S390_TODPR:
468 r = put_user(vcpu->arch.sie_block->todpr,
469 (u32 __user *)reg->addr);
471 case KVM_REG_S390_EPOCHDIFF:
472 r = put_user(vcpu->arch.sie_block->epoch,
473 (u64 __user *)reg->addr);
475 case KVM_REG_S390_CPU_TIMER:
476 r = put_user(vcpu->arch.sie_block->cputm,
477 (u64 __user *)reg->addr);
479 case KVM_REG_S390_CLOCK_COMP:
480 r = put_user(vcpu->arch.sie_block->ckc,
481 (u64 __user *)reg->addr);
490 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
491 struct kvm_one_reg *reg)
496 case KVM_REG_S390_TODPR:
497 r = get_user(vcpu->arch.sie_block->todpr,
498 (u32 __user *)reg->addr);
500 case KVM_REG_S390_EPOCHDIFF:
501 r = get_user(vcpu->arch.sie_block->epoch,
502 (u64 __user *)reg->addr);
504 case KVM_REG_S390_CPU_TIMER:
505 r = get_user(vcpu->arch.sie_block->cputm,
506 (u64 __user *)reg->addr);
508 case KVM_REG_S390_CLOCK_COMP:
509 r = get_user(vcpu->arch.sie_block->ckc,
510 (u64 __user *)reg->addr);
519 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
521 kvm_s390_vcpu_initial_reset(vcpu);
525 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
527 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
531 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
533 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
537 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
538 struct kvm_sregs *sregs)
540 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
541 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
542 restore_access_regs(vcpu->run->s.regs.acrs);
546 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
547 struct kvm_sregs *sregs)
549 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
550 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
554 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
556 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
557 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
558 restore_fp_regs(&vcpu->arch.guest_fpregs);
562 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
564 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
565 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
569 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
573 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
576 vcpu->run->psw_mask = psw.mask;
577 vcpu->run->psw_addr = psw.addr;
582 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
583 struct kvm_translation *tr)
585 return -EINVAL; /* not implemented yet */
588 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
589 struct kvm_guest_debug *dbg)
591 return -EINVAL; /* not implemented yet */
594 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
595 struct kvm_mp_state *mp_state)
597 return -EINVAL; /* not implemented yet */
600 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
601 struct kvm_mp_state *mp_state)
603 return -EINVAL; /* not implemented yet */
606 static int __vcpu_run(struct kvm_vcpu *vcpu)
610 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
615 if (test_thread_flag(TIF_MCCK_PENDING))
618 if (!kvm_is_ucontrol(vcpu->kvm))
619 kvm_s390_deliver_pending_interrupts(vcpu);
621 vcpu->arch.sie_block->icptcode = 0;
625 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
626 atomic_read(&vcpu->arch.sie_block->cpuflags));
627 trace_kvm_s390_sie_enter(vcpu,
628 atomic_read(&vcpu->arch.sie_block->cpuflags));
629 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
631 if (kvm_is_ucontrol(vcpu->kvm)) {
632 rc = SIE_INTERCEPT_UCONTROL;
634 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
635 trace_kvm_s390_sie_fault(vcpu);
636 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
639 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
640 vcpu->arch.sie_block->icptcode);
641 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
644 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
648 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
654 if (vcpu->sigset_active)
655 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
657 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
659 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
661 switch (kvm_run->exit_reason) {
662 case KVM_EXIT_S390_SIEIC:
663 case KVM_EXIT_UNKNOWN:
665 case KVM_EXIT_S390_RESET:
666 case KVM_EXIT_S390_UCONTROL:
667 case KVM_EXIT_S390_TSCH:
673 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
674 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
675 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
676 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
677 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
679 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
680 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
681 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
682 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
688 rc = __vcpu_run(vcpu);
691 if (kvm_is_ucontrol(vcpu->kvm))
694 rc = kvm_handle_sie_intercept(vcpu);
695 } while (!signal_pending(current) && !rc);
697 if (rc == SIE_INTERCEPT_RERUNVCPU)
700 if (signal_pending(current) && !rc) {
701 kvm_run->exit_reason = KVM_EXIT_INTR;
705 #ifdef CONFIG_KVM_S390_UCONTROL
706 if (rc == SIE_INTERCEPT_UCONTROL) {
707 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
708 kvm_run->s390_ucontrol.trans_exc_code =
709 current->thread.gmap_addr;
710 kvm_run->s390_ucontrol.pgm_code = 0x10;
715 if (rc == -EOPNOTSUPP) {
716 /* intercept cannot be handled in-kernel, prepare kvm-run */
717 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
718 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
719 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
720 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
724 if (rc == -EREMOTE) {
725 /* intercept was handled, but userspace support is needed
726 * kvm_run has been prepared by the handler */
730 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
731 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
732 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
733 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
735 if (vcpu->sigset_active)
736 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
738 vcpu->stat.exit_userspace++;
742 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
743 unsigned long n, int prefix)
746 return copy_to_guest(vcpu, guestdest, from, n);
748 return copy_to_guest_absolute(vcpu, guestdest, from, n);
752 * store status at address
753 * we use have two special cases:
754 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
755 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
757 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
759 unsigned char archmode = 1;
762 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
763 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
765 addr = SAVE_AREA_BASE;
767 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
768 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
770 addr = SAVE_AREA_BASE;
776 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
777 * copying in vcpu load/put. Lets update our copies before we save
778 * it into the save area
780 save_fp_regs(&vcpu->arch.guest_fpregs);
781 save_access_regs(vcpu->run->s.regs.acrs);
783 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
784 vcpu->arch.guest_fpregs.fprs, 128, prefix))
787 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
788 vcpu->run->s.regs.gprs, 128, prefix))
791 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
792 &vcpu->arch.sie_block->gpsw, 16, prefix))
795 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
796 &vcpu->arch.sie_block->prefix, 4, prefix))
799 if (__guestcopy(vcpu,
800 addr + offsetof(struct save_area, fp_ctrl_reg),
801 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
804 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
805 &vcpu->arch.sie_block->todpr, 4, prefix))
808 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
809 &vcpu->arch.sie_block->cputm, 8, prefix))
812 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
813 &vcpu->arch.sie_block->ckc, 8, prefix))
816 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
817 &vcpu->run->s.regs.acrs, 64, prefix))
820 if (__guestcopy(vcpu,
821 addr + offsetof(struct save_area, ctrl_regs),
822 &vcpu->arch.sie_block->gcr, 128, prefix))
827 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
828 struct kvm_enable_cap *cap)
836 case KVM_CAP_S390_CSS_SUPPORT:
837 if (!vcpu->kvm->arch.css_support) {
838 vcpu->kvm->arch.css_support = 1;
839 trace_kvm_s390_enable_css(vcpu->kvm);
850 long kvm_arch_vcpu_ioctl(struct file *filp,
851 unsigned int ioctl, unsigned long arg)
853 struct kvm_vcpu *vcpu = filp->private_data;
854 void __user *argp = (void __user *)arg;
858 case KVM_S390_INTERRUPT: {
859 struct kvm_s390_interrupt s390int;
862 if (copy_from_user(&s390int, argp, sizeof(s390int)))
864 r = kvm_s390_inject_vcpu(vcpu, &s390int);
867 case KVM_S390_STORE_STATUS:
868 r = kvm_s390_vcpu_store_status(vcpu, arg);
870 case KVM_S390_SET_INITIAL_PSW: {
874 if (copy_from_user(&psw, argp, sizeof(psw)))
876 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
879 case KVM_S390_INITIAL_RESET:
880 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
882 case KVM_SET_ONE_REG:
883 case KVM_GET_ONE_REG: {
884 struct kvm_one_reg reg;
886 if (copy_from_user(®, argp, sizeof(reg)))
888 if (ioctl == KVM_SET_ONE_REG)
889 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
891 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
894 #ifdef CONFIG_KVM_S390_UCONTROL
895 case KVM_S390_UCAS_MAP: {
896 struct kvm_s390_ucas_mapping ucasmap;
898 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
903 if (!kvm_is_ucontrol(vcpu->kvm)) {
908 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
909 ucasmap.vcpu_addr, ucasmap.length);
912 case KVM_S390_UCAS_UNMAP: {
913 struct kvm_s390_ucas_mapping ucasmap;
915 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
920 if (!kvm_is_ucontrol(vcpu->kvm)) {
925 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
930 case KVM_S390_VCPU_FAULT: {
931 r = gmap_fault(arg, vcpu->arch.gmap);
932 if (!IS_ERR_VALUE(r))
938 struct kvm_enable_cap cap;
940 if (copy_from_user(&cap, argp, sizeof(cap)))
942 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
951 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
953 #ifdef CONFIG_KVM_S390_UCONTROL
954 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
955 && (kvm_is_ucontrol(vcpu->kvm))) {
956 vmf->page = virt_to_page(vcpu->arch.sie_block);
961 return VM_FAULT_SIGBUS;
964 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
965 struct kvm_memory_slot *dont)
969 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
974 /* Section: memory related */
975 int kvm_arch_prepare_memory_region(struct kvm *kvm,
976 struct kvm_memory_slot *memslot,
977 struct kvm_userspace_memory_region *mem,
978 enum kvm_mr_change change)
980 /* A few sanity checks. We can have memory slots which have to be
981 located/ended at a segment boundary (1MB). The memory in userland is
982 ok to be fragmented into various different vmas. It is okay to mmap()
983 and munmap() stuff in this slot after doing this call at any time */
985 if (mem->userspace_addr & 0xffffful)
988 if (mem->memory_size & 0xffffful)
994 void kvm_arch_commit_memory_region(struct kvm *kvm,
995 struct kvm_userspace_memory_region *mem,
996 const struct kvm_memory_slot *old,
997 enum kvm_mr_change change)
1001 /* If the basics of the memslot do not change, we do not want
1002 * to update the gmap. Every update causes several unnecessary
1003 * segment translation exceptions. This is usually handled just
1004 * fine by the normal fault handler + gmap, but it will also
1005 * cause faults on the prefix page of running guest CPUs.
1007 if (old->userspace_addr == mem->userspace_addr &&
1008 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1009 old->npages * PAGE_SIZE == mem->memory_size)
1012 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1013 mem->guest_phys_addr, mem->memory_size);
1015 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1019 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1023 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1024 struct kvm_memory_slot *slot)
1028 static int __init kvm_s390_init(void)
1031 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1036 * guests can ask for up to 255+1 double words, we need a full page
1037 * to hold the maximum amount of facilities. On the other hand, we
1038 * only set facilities that are known to work in KVM.
1040 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1045 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
1046 facilities[0] &= 0xff00fff3f47c0000ULL;
1047 facilities[1] &= 0x001c000000000000ULL;
1051 static void __exit kvm_s390_exit(void)
1053 free_page((unsigned long) facilities);
1057 module_init(kvm_s390_init);
1058 module_exit(kvm_s390_exit);