2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/lowcore.h>
29 #include <asm/pgtable.h>
31 #include <asm/switch_to.h>
32 #include <asm/facility.h>
37 #define CREATE_TRACE_POINTS
39 #include "trace-s390.h"
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
45 { "exit_null", VCPU_STAT(exit_null) },
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
55 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
57 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
58 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
59 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
66 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
67 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
72 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
73 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
75 { "instruction_essa", VCPU_STAT(instruction_essa) },
76 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
78 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
79 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
80 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
81 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
82 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
87 { "diagnose_10", VCPU_STAT(diagnose_10) },
88 { "diagnose_44", VCPU_STAT(diagnose_44) },
89 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
93 unsigned long *vfacilities;
94 static struct gmap_notifier gmap_notifier;
96 /* test availability of vfacility */
97 int test_vfacility(unsigned long nr)
99 return __test_facility(nr, (void *) vfacilities);
102 /* Section: not file related */
103 int kvm_arch_hardware_enable(void *garbage)
105 /* every s390 is virtualization enabled ;-) */
109 void kvm_arch_hardware_disable(void *garbage)
113 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
115 int kvm_arch_hardware_setup(void)
117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
122 void kvm_arch_hardware_unsetup(void)
124 gmap_unregister_ipte_notifier(&gmap_notifier);
127 void kvm_arch_check_processor_compat(void *rtn)
131 int kvm_arch_init(void *opaque)
136 void kvm_arch_exit(void)
140 /* Section: device related */
141 long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
149 int kvm_dev_ioctl_check_extension(long ext)
154 case KVM_CAP_S390_PSW:
155 case KVM_CAP_S390_GMAP:
156 case KVM_CAP_SYNC_MMU:
157 #ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
160 case KVM_CAP_ASYNC_PF:
161 case KVM_CAP_SYNC_REGS:
162 case KVM_CAP_ONE_REG:
163 case KVM_CAP_ENABLE_CAP:
164 case KVM_CAP_S390_CSS_SUPPORT:
165 case KVM_CAP_IOEVENTFD:
166 case KVM_CAP_DEVICE_CTRL:
167 case KVM_CAP_ENABLE_CAP_VM:
168 case KVM_CAP_VM_ATTRIBUTES:
171 case KVM_CAP_NR_VCPUS:
172 case KVM_CAP_MAX_VCPUS:
175 case KVM_CAP_NR_MEMSLOTS:
176 r = KVM_USER_MEM_SLOTS;
178 case KVM_CAP_S390_COW:
179 r = MACHINE_HAS_ESOP;
187 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
188 struct kvm_memory_slot *memslot)
190 gfn_t cur_gfn, last_gfn;
191 unsigned long address;
192 struct gmap *gmap = kvm->arch.gmap;
194 down_read(&gmap->mm->mmap_sem);
195 /* Loop over all guest pages */
196 last_gfn = memslot->base_gfn + memslot->npages;
197 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
198 address = gfn_to_hva_memslot(memslot, cur_gfn);
200 if (gmap_test_and_clear_dirty(address, gmap))
201 mark_page_dirty(kvm, cur_gfn);
203 up_read(&gmap->mm->mmap_sem);
206 /* Section: vm related */
208 * Get (and clear) the dirty memory log for a memory slot.
210 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211 struct kvm_dirty_log *log)
215 struct kvm_memory_slot *memslot;
218 mutex_lock(&kvm->slots_lock);
221 if (log->slot >= KVM_USER_MEM_SLOTS)
224 memslot = id_to_memslot(kvm->memslots, log->slot);
226 if (!memslot->dirty_bitmap)
229 kvm_s390_sync_dirty_log(kvm, memslot);
230 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 /* Clear the dirty log */
236 n = kvm_dirty_bitmap_bytes(memslot);
237 memset(memslot->dirty_bitmap, 0, n);
241 mutex_unlock(&kvm->slots_lock);
245 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253 case KVM_CAP_S390_IRQCHIP:
254 kvm->arch.use_irqchip = 1;
264 static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268 switch (attr->attr) {
269 case KVM_S390_VM_MEM_ENABLE_CMMA:
271 mutex_lock(&kvm->lock);
272 if (atomic_read(&kvm->online_vcpus) == 0) {
273 kvm->arch.use_cmma = 1;
276 mutex_unlock(&kvm->lock);
278 case KVM_S390_VM_MEM_CLR_CMMA:
279 mutex_lock(&kvm->lock);
280 idx = srcu_read_lock(&kvm->srcu);
281 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
282 srcu_read_unlock(&kvm->srcu, idx);
283 mutex_unlock(&kvm->lock);
293 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297 switch (attr->group) {
298 case KVM_S390_VM_MEM_CTRL:
299 ret = kvm_s390_mem_control(kvm, attr);
309 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
314 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318 switch (attr->group) {
319 case KVM_S390_VM_MEM_CTRL:
320 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 case KVM_S390_VM_MEM_CLR_CMMA:
338 long kvm_arch_vm_ioctl(struct file *filp,
339 unsigned int ioctl, unsigned long arg)
341 struct kvm *kvm = filp->private_data;
342 void __user *argp = (void __user *)arg;
343 struct kvm_device_attr attr;
347 case KVM_S390_INTERRUPT: {
348 struct kvm_s390_interrupt s390int;
351 if (copy_from_user(&s390int, argp, sizeof(s390int)))
353 r = kvm_s390_inject_vm(kvm, &s390int);
356 case KVM_ENABLE_CAP: {
357 struct kvm_enable_cap cap;
359 if (copy_from_user(&cap, argp, sizeof(cap)))
361 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
364 case KVM_CREATE_IRQCHIP: {
365 struct kvm_irq_routing_entry routing;
368 if (kvm->arch.use_irqchip) {
369 /* Set up dummy routing. */
370 memset(&routing, 0, sizeof(routing));
371 kvm_set_irq_routing(kvm, &routing, 0, 0);
376 case KVM_SET_DEVICE_ATTR: {
378 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
380 r = kvm_s390_vm_set_attr(kvm, &attr);
383 case KVM_GET_DEVICE_ATTR: {
385 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
387 r = kvm_s390_vm_get_attr(kvm, &attr);
390 case KVM_HAS_DEVICE_ATTR: {
392 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
394 r = kvm_s390_vm_has_attr(kvm, &attr);
404 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
408 static unsigned long sca_offset;
411 #ifdef CONFIG_KVM_S390_UCONTROL
412 if (type & ~KVM_VM_S390_UCONTROL)
414 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
421 rc = s390_enable_sie();
427 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
430 spin_lock(&kvm_lock);
431 sca_offset = (sca_offset + 16) & 0x7f0;
432 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
433 spin_unlock(&kvm_lock);
435 sprintf(debug_name, "kvm-%u", current->pid);
437 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441 spin_lock_init(&kvm->arch.float_int.lock);
442 INIT_LIST_HEAD(&kvm->arch.float_int.list);
443 init_waitqueue_head(&kvm->arch.ipte_wq);
445 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
446 VM_EVENT(kvm, 3, "%s", "vm created");
448 if (type & KVM_VM_S390_UCONTROL) {
449 kvm->arch.gmap = NULL;
451 kvm->arch.gmap = gmap_alloc(current->mm);
454 kvm->arch.gmap->private = kvm;
455 kvm->arch.gmap->pfault_enabled = 0;
458 kvm->arch.css_support = 0;
459 kvm->arch.use_irqchip = 0;
463 debug_unregister(kvm->arch.dbf);
465 free_page((unsigned long)(kvm->arch.sca));
470 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
472 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
473 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
474 kvm_clear_async_pf_completion_queue(vcpu);
475 if (!kvm_is_ucontrol(vcpu->kvm)) {
476 clear_bit(63 - vcpu->vcpu_id,
477 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
478 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
479 (__u64) vcpu->arch.sie_block)
480 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
484 if (kvm_is_ucontrol(vcpu->kvm))
485 gmap_free(vcpu->arch.gmap);
487 if (kvm_s390_cmma_enabled(vcpu->kvm))
488 kvm_s390_vcpu_unsetup_cmma(vcpu);
489 free_page((unsigned long)(vcpu->arch.sie_block));
491 kvm_vcpu_uninit(vcpu);
492 kmem_cache_free(kvm_vcpu_cache, vcpu);
495 static void kvm_free_vcpus(struct kvm *kvm)
498 struct kvm_vcpu *vcpu;
500 kvm_for_each_vcpu(i, vcpu, kvm)
501 kvm_arch_vcpu_destroy(vcpu);
503 mutex_lock(&kvm->lock);
504 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
505 kvm->vcpus[i] = NULL;
507 atomic_set(&kvm->online_vcpus, 0);
508 mutex_unlock(&kvm->lock);
511 void kvm_arch_sync_events(struct kvm *kvm)
515 void kvm_arch_destroy_vm(struct kvm *kvm)
518 free_page((unsigned long)(kvm->arch.sca));
519 debug_unregister(kvm->arch.dbf);
520 if (!kvm_is_ucontrol(kvm))
521 gmap_free(kvm->arch.gmap);
522 kvm_s390_destroy_adapters(kvm);
525 /* Section: vcpu related */
526 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
528 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
529 kvm_clear_async_pf_completion_queue(vcpu);
530 if (kvm_is_ucontrol(vcpu->kvm)) {
531 vcpu->arch.gmap = gmap_alloc(current->mm);
532 if (!vcpu->arch.gmap)
534 vcpu->arch.gmap->private = vcpu->kvm;
538 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
539 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
546 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
551 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
553 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
554 save_fp_regs(vcpu->arch.host_fpregs.fprs);
555 save_access_regs(vcpu->arch.host_acrs);
556 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
557 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
558 restore_access_regs(vcpu->run->s.regs.acrs);
559 gmap_enable(vcpu->arch.gmap);
560 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
563 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
565 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
566 gmap_disable(vcpu->arch.gmap);
567 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
568 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
569 save_access_regs(vcpu->run->s.regs.acrs);
570 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
571 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
572 restore_access_regs(vcpu->arch.host_acrs);
575 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
577 /* this equals initial cpu reset in pop, but we don't switch to ESA */
578 vcpu->arch.sie_block->gpsw.mask = 0UL;
579 vcpu->arch.sie_block->gpsw.addr = 0UL;
580 kvm_s390_set_prefix(vcpu, 0);
581 vcpu->arch.sie_block->cputm = 0UL;
582 vcpu->arch.sie_block->ckc = 0UL;
583 vcpu->arch.sie_block->todpr = 0;
584 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
585 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
586 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
587 vcpu->arch.guest_fpregs.fpc = 0;
588 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
589 vcpu->arch.sie_block->gbea = 1;
590 vcpu->arch.sie_block->pp = 0;
591 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
592 kvm_clear_async_pf_completion_queue(vcpu);
593 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
594 kvm_s390_clear_local_irqs(vcpu);
597 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
602 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
604 free_page(vcpu->arch.sie_block->cbrlo);
605 vcpu->arch.sie_block->cbrlo = 0;
608 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
610 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
611 if (!vcpu->arch.sie_block->cbrlo)
614 vcpu->arch.sie_block->ecb2 |= 0x80;
615 vcpu->arch.sie_block->ecb2 &= ~0x08;
619 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
623 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
627 vcpu->arch.sie_block->ecb = 6;
628 if (test_vfacility(50) && test_vfacility(73))
629 vcpu->arch.sie_block->ecb |= 0x10;
631 vcpu->arch.sie_block->ecb2 = 8;
632 vcpu->arch.sie_block->eca = 0xC1002000U;
634 vcpu->arch.sie_block->eca |= 1;
635 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
636 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
637 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
638 rc = kvm_s390_vcpu_setup_cmma(vcpu);
642 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
643 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
644 (unsigned long) vcpu);
645 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
646 get_cpu_id(&vcpu->arch.cpu_id);
647 vcpu->arch.cpu_id.version = 0xff;
651 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
654 struct kvm_vcpu *vcpu;
655 struct sie_page *sie_page;
658 if (id >= KVM_MAX_VCPUS)
663 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
667 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
671 vcpu->arch.sie_block = &sie_page->sie_block;
672 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
674 vcpu->arch.sie_block->icpua = id;
675 if (!kvm_is_ucontrol(kvm)) {
676 if (!kvm->arch.sca) {
680 if (!kvm->arch.sca->cpu[id].sda)
681 kvm->arch.sca->cpu[id].sda =
682 (__u64) vcpu->arch.sie_block;
683 vcpu->arch.sie_block->scaoh =
684 (__u32)(((__u64)kvm->arch.sca) >> 32);
685 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
686 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
689 spin_lock_init(&vcpu->arch.local_int.lock);
690 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
691 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
692 vcpu->arch.local_int.wq = &vcpu->wq;
693 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
695 rc = kvm_vcpu_init(vcpu, kvm, id);
697 goto out_free_sie_block;
698 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
699 vcpu->arch.sie_block);
700 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
704 free_page((unsigned long)(vcpu->arch.sie_block));
706 kmem_cache_free(kvm_vcpu_cache, vcpu);
711 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
713 return kvm_cpu_has_interrupt(vcpu);
716 void s390_vcpu_block(struct kvm_vcpu *vcpu)
718 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
721 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
723 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
727 * Kick a guest cpu out of SIE and wait until SIE is not running.
728 * If the CPU is not running (e.g. waiting as idle) the function will
729 * return immediately. */
730 void exit_sie(struct kvm_vcpu *vcpu)
732 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
733 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
737 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
738 void exit_sie_sync(struct kvm_vcpu *vcpu)
740 s390_vcpu_block(vcpu);
744 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
747 struct kvm *kvm = gmap->private;
748 struct kvm_vcpu *vcpu;
750 kvm_for_each_vcpu(i, vcpu, kvm) {
751 /* match against both prefix pages */
752 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
753 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
754 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
760 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
762 /* kvm common code refers to this, but never calls it */
767 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
768 struct kvm_one_reg *reg)
773 case KVM_REG_S390_TODPR:
774 r = put_user(vcpu->arch.sie_block->todpr,
775 (u32 __user *)reg->addr);
777 case KVM_REG_S390_EPOCHDIFF:
778 r = put_user(vcpu->arch.sie_block->epoch,
779 (u64 __user *)reg->addr);
781 case KVM_REG_S390_CPU_TIMER:
782 r = put_user(vcpu->arch.sie_block->cputm,
783 (u64 __user *)reg->addr);
785 case KVM_REG_S390_CLOCK_COMP:
786 r = put_user(vcpu->arch.sie_block->ckc,
787 (u64 __user *)reg->addr);
789 case KVM_REG_S390_PFTOKEN:
790 r = put_user(vcpu->arch.pfault_token,
791 (u64 __user *)reg->addr);
793 case KVM_REG_S390_PFCOMPARE:
794 r = put_user(vcpu->arch.pfault_compare,
795 (u64 __user *)reg->addr);
797 case KVM_REG_S390_PFSELECT:
798 r = put_user(vcpu->arch.pfault_select,
799 (u64 __user *)reg->addr);
801 case KVM_REG_S390_PP:
802 r = put_user(vcpu->arch.sie_block->pp,
803 (u64 __user *)reg->addr);
805 case KVM_REG_S390_GBEA:
806 r = put_user(vcpu->arch.sie_block->gbea,
807 (u64 __user *)reg->addr);
816 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
817 struct kvm_one_reg *reg)
822 case KVM_REG_S390_TODPR:
823 r = get_user(vcpu->arch.sie_block->todpr,
824 (u32 __user *)reg->addr);
826 case KVM_REG_S390_EPOCHDIFF:
827 r = get_user(vcpu->arch.sie_block->epoch,
828 (u64 __user *)reg->addr);
830 case KVM_REG_S390_CPU_TIMER:
831 r = get_user(vcpu->arch.sie_block->cputm,
832 (u64 __user *)reg->addr);
834 case KVM_REG_S390_CLOCK_COMP:
835 r = get_user(vcpu->arch.sie_block->ckc,
836 (u64 __user *)reg->addr);
838 case KVM_REG_S390_PFTOKEN:
839 r = get_user(vcpu->arch.pfault_token,
840 (u64 __user *)reg->addr);
842 case KVM_REG_S390_PFCOMPARE:
843 r = get_user(vcpu->arch.pfault_compare,
844 (u64 __user *)reg->addr);
846 case KVM_REG_S390_PFSELECT:
847 r = get_user(vcpu->arch.pfault_select,
848 (u64 __user *)reg->addr);
850 case KVM_REG_S390_PP:
851 r = get_user(vcpu->arch.sie_block->pp,
852 (u64 __user *)reg->addr);
854 case KVM_REG_S390_GBEA:
855 r = get_user(vcpu->arch.sie_block->gbea,
856 (u64 __user *)reg->addr);
865 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
867 kvm_s390_vcpu_initial_reset(vcpu);
871 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
873 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
877 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
879 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
883 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
884 struct kvm_sregs *sregs)
886 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
887 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
888 restore_access_regs(vcpu->run->s.regs.acrs);
892 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
893 struct kvm_sregs *sregs)
895 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
896 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
900 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
902 if (test_fp_ctl(fpu->fpc))
904 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
905 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
906 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
907 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
911 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
913 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
914 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
918 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
922 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
925 vcpu->run->psw_mask = psw.mask;
926 vcpu->run->psw_addr = psw.addr;
931 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
932 struct kvm_translation *tr)
934 return -EINVAL; /* not implemented yet */
937 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
938 KVM_GUESTDBG_USE_HW_BP | \
941 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
942 struct kvm_guest_debug *dbg)
946 vcpu->guest_debug = 0;
947 kvm_s390_clear_bp_data(vcpu);
949 if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS)
952 if (dbg->control & KVM_GUESTDBG_ENABLE) {
953 vcpu->guest_debug = dbg->control;
954 /* enforce guest PER */
955 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
957 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
958 rc = kvm_s390_import_bp_data(vcpu, dbg);
960 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
961 vcpu->arch.guestdbg.last_bp = 0;
965 vcpu->guest_debug = 0;
966 kvm_s390_clear_bp_data(vcpu);
967 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
973 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
974 struct kvm_mp_state *mp_state)
976 return -EINVAL; /* not implemented yet */
979 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
980 struct kvm_mp_state *mp_state)
982 return -EINVAL; /* not implemented yet */
985 bool kvm_s390_cmma_enabled(struct kvm *kvm)
987 if (!MACHINE_IS_LPAR)
989 /* only enable for z10 and later */
990 if (!MACHINE_HAS_EDAT1)
992 if (!kvm->arch.use_cmma)
997 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1000 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1001 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1002 * This ensures that the ipte instruction for this request has
1003 * already finished. We might race against a second unmapper that
1004 * wants to set the blocking bit. Lets just retry the request loop.
1006 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1008 rc = gmap_ipte_notify(vcpu->arch.gmap,
1009 vcpu->arch.sie_block->prefix,
1013 s390_vcpu_unblock(vcpu);
1018 static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
1021 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
1022 struct mm_struct *mm = current->mm;
1023 down_read(&mm->mmap_sem);
1024 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
1025 up_read(&mm->mmap_sem);
1029 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1030 unsigned long token)
1032 struct kvm_s390_interrupt inti;
1033 inti.parm64 = token;
1036 inti.type = KVM_S390_INT_PFAULT_INIT;
1037 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1039 inti.type = KVM_S390_INT_PFAULT_DONE;
1040 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1044 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1045 struct kvm_async_pf *work)
1047 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1048 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1051 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1052 struct kvm_async_pf *work)
1054 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1055 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1058 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1059 struct kvm_async_pf *work)
1061 /* s390 will always inject the page directly */
1064 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1067 * s390 will always inject the page directly,
1068 * but we still want check_async_completion to cleanup
1073 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1076 struct kvm_arch_async_pf arch;
1079 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1081 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1082 vcpu->arch.pfault_compare)
1084 if (psw_extint_disabled(vcpu))
1086 if (kvm_cpu_has_interrupt(vcpu))
1088 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1090 if (!vcpu->arch.gmap->pfault_enabled)
1093 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1094 hva += current->thread.gmap_addr & ~PAGE_MASK;
1095 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1098 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1102 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1107 * On s390 notifications for arriving pages will be delivered directly
1108 * to the guest but the house keeping for completed pfaults is
1109 * handled outside the worker.
1111 kvm_check_async_pf_completion(vcpu);
1113 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1118 if (test_thread_flag(TIF_MCCK_PENDING))
1121 if (!kvm_is_ucontrol(vcpu->kvm))
1122 kvm_s390_deliver_pending_interrupts(vcpu);
1124 rc = kvm_s390_handle_requests(vcpu);
1128 if (guestdbg_enabled(vcpu)) {
1129 kvm_s390_backup_guest_per_regs(vcpu);
1130 kvm_s390_patch_guest_per_regs(vcpu);
1133 vcpu->arch.sie_block->icptcode = 0;
1134 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1135 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1136 trace_kvm_s390_sie_enter(vcpu, cpuflags);
1141 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1145 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1146 vcpu->arch.sie_block->icptcode);
1147 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1149 if (guestdbg_enabled(vcpu))
1150 kvm_s390_restore_guest_per_regs(vcpu);
1152 if (exit_reason >= 0) {
1154 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1155 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1156 vcpu->run->s390_ucontrol.trans_exc_code =
1157 current->thread.gmap_addr;
1158 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1161 } else if (current->thread.gmap_pfault) {
1162 trace_kvm_s390_major_guest_pfault(vcpu);
1163 current->thread.gmap_pfault = 0;
1164 if (kvm_arch_setup_async_pf(vcpu) ||
1165 (kvm_arch_fault_in_sync(vcpu) >= 0))
1170 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1171 trace_kvm_s390_sie_fault(vcpu);
1172 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1175 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1178 if (kvm_is_ucontrol(vcpu->kvm))
1179 /* Don't exit for host interrupts. */
1180 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1182 rc = kvm_handle_sie_intercept(vcpu);
1188 static int __vcpu_run(struct kvm_vcpu *vcpu)
1190 int rc, exit_reason;
1193 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1194 * ning the guest), so that memslots (and other stuff) are protected
1196 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1199 rc = vcpu_pre_run(vcpu);
1203 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1205 * As PF_VCPU will be used in fault handler, between
1206 * guest_enter and guest_exit should be no uaccess.
1211 exit_reason = sie64a(vcpu->arch.sie_block,
1212 vcpu->run->s.regs.gprs);
1214 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1216 rc = vcpu_post_run(vcpu, exit_reason);
1217 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1219 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1223 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1228 if (guestdbg_exit_pending(vcpu)) {
1229 kvm_s390_prepare_debug_exit(vcpu);
1233 if (vcpu->sigset_active)
1234 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1236 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1238 switch (kvm_run->exit_reason) {
1239 case KVM_EXIT_S390_SIEIC:
1240 case KVM_EXIT_UNKNOWN:
1242 case KVM_EXIT_S390_RESET:
1243 case KVM_EXIT_S390_UCONTROL:
1244 case KVM_EXIT_S390_TSCH:
1245 case KVM_EXIT_DEBUG:
1251 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1252 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1253 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1254 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1255 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1257 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1258 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1259 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1260 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1264 rc = __vcpu_run(vcpu);
1266 if (signal_pending(current) && !rc) {
1267 kvm_run->exit_reason = KVM_EXIT_INTR;
1271 if (guestdbg_exit_pending(vcpu) && !rc) {
1272 kvm_s390_prepare_debug_exit(vcpu);
1276 if (rc == -EOPNOTSUPP) {
1277 /* intercept cannot be handled in-kernel, prepare kvm-run */
1278 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1279 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1280 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1281 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1285 if (rc == -EREMOTE) {
1286 /* intercept was handled, but userspace support is needed
1287 * kvm_run has been prepared by the handler */
1291 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1292 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1293 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
1294 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1296 if (vcpu->sigset_active)
1297 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1299 vcpu->stat.exit_userspace++;
1304 * store status at address
1305 * we use have two special cases:
1306 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1307 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1309 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1311 unsigned char archmode = 1;
1315 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1316 if (write_guest_abs(vcpu, 163, &archmode, 1))
1318 gpa = SAVE_AREA_BASE;
1319 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1320 if (write_guest_real(vcpu, 163, &archmode, 1))
1322 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1324 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1325 vcpu->arch.guest_fpregs.fprs, 128);
1326 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1327 vcpu->run->s.regs.gprs, 128);
1328 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1329 &vcpu->arch.sie_block->gpsw, 16);
1330 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1331 &vcpu->arch.sie_block->prefix, 4);
1332 rc |= write_guest_abs(vcpu,
1333 gpa + offsetof(struct save_area, fp_ctrl_reg),
1334 &vcpu->arch.guest_fpregs.fpc, 4);
1335 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1336 &vcpu->arch.sie_block->todpr, 4);
1337 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1338 &vcpu->arch.sie_block->cputm, 8);
1339 clkcomp = vcpu->arch.sie_block->ckc >> 8;
1340 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1342 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1343 &vcpu->run->s.regs.acrs, 64);
1344 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1345 &vcpu->arch.sie_block->gcr, 128);
1346 return rc ? -EFAULT : 0;
1349 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1352 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1353 * copying in vcpu load/put. Lets update our copies before we save
1354 * it into the save area
1356 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1357 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1358 save_access_regs(vcpu->run->s.regs.acrs);
1360 return kvm_s390_store_status_unloaded(vcpu, addr);
1363 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1364 struct kvm_enable_cap *cap)
1372 case KVM_CAP_S390_CSS_SUPPORT:
1373 if (!vcpu->kvm->arch.css_support) {
1374 vcpu->kvm->arch.css_support = 1;
1375 trace_kvm_s390_enable_css(vcpu->kvm);
1386 long kvm_arch_vcpu_ioctl(struct file *filp,
1387 unsigned int ioctl, unsigned long arg)
1389 struct kvm_vcpu *vcpu = filp->private_data;
1390 void __user *argp = (void __user *)arg;
1395 case KVM_S390_INTERRUPT: {
1396 struct kvm_s390_interrupt s390int;
1399 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1401 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1404 case KVM_S390_STORE_STATUS:
1405 idx = srcu_read_lock(&vcpu->kvm->srcu);
1406 r = kvm_s390_vcpu_store_status(vcpu, arg);
1407 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1409 case KVM_S390_SET_INITIAL_PSW: {
1413 if (copy_from_user(&psw, argp, sizeof(psw)))
1415 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1418 case KVM_S390_INITIAL_RESET:
1419 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1421 case KVM_SET_ONE_REG:
1422 case KVM_GET_ONE_REG: {
1423 struct kvm_one_reg reg;
1425 if (copy_from_user(®, argp, sizeof(reg)))
1427 if (ioctl == KVM_SET_ONE_REG)
1428 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
1430 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
1433 #ifdef CONFIG_KVM_S390_UCONTROL
1434 case KVM_S390_UCAS_MAP: {
1435 struct kvm_s390_ucas_mapping ucasmap;
1437 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1442 if (!kvm_is_ucontrol(vcpu->kvm)) {
1447 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1448 ucasmap.vcpu_addr, ucasmap.length);
1451 case KVM_S390_UCAS_UNMAP: {
1452 struct kvm_s390_ucas_mapping ucasmap;
1454 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1459 if (!kvm_is_ucontrol(vcpu->kvm)) {
1464 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1469 case KVM_S390_VCPU_FAULT: {
1470 r = gmap_fault(arg, vcpu->arch.gmap);
1471 if (!IS_ERR_VALUE(r))
1475 case KVM_ENABLE_CAP:
1477 struct kvm_enable_cap cap;
1479 if (copy_from_user(&cap, argp, sizeof(cap)))
1481 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1490 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1492 #ifdef CONFIG_KVM_S390_UCONTROL
1493 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1494 && (kvm_is_ucontrol(vcpu->kvm))) {
1495 vmf->page = virt_to_page(vcpu->arch.sie_block);
1496 get_page(vmf->page);
1500 return VM_FAULT_SIGBUS;
1503 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1504 struct kvm_memory_slot *dont)
1508 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1509 unsigned long npages)
1514 void kvm_arch_memslots_updated(struct kvm *kvm)
1518 /* Section: memory related */
1519 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1520 struct kvm_memory_slot *memslot,
1521 struct kvm_userspace_memory_region *mem,
1522 enum kvm_mr_change change)
1524 /* A few sanity checks. We can have memory slots which have to be
1525 located/ended at a segment boundary (1MB). The memory in userland is
1526 ok to be fragmented into various different vmas. It is okay to mmap()
1527 and munmap() stuff in this slot after doing this call at any time */
1529 if (mem->userspace_addr & 0xffffful)
1532 if (mem->memory_size & 0xffffful)
1538 void kvm_arch_commit_memory_region(struct kvm *kvm,
1539 struct kvm_userspace_memory_region *mem,
1540 const struct kvm_memory_slot *old,
1541 enum kvm_mr_change change)
1545 /* If the basics of the memslot do not change, we do not want
1546 * to update the gmap. Every update causes several unnecessary
1547 * segment translation exceptions. This is usually handled just
1548 * fine by the normal fault handler + gmap, but it will also
1549 * cause faults on the prefix page of running guest CPUs.
1551 if (old->userspace_addr == mem->userspace_addr &&
1552 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1553 old->npages * PAGE_SIZE == mem->memory_size)
1556 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1557 mem->guest_phys_addr, mem->memory_size);
1559 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1563 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1567 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1568 struct kvm_memory_slot *slot)
1572 static int __init kvm_s390_init(void)
1575 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1580 * guests can ask for up to 255+1 double words, we need a full page
1581 * to hold the maximum amount of facilities. On the other hand, we
1582 * only set facilities that are known to work in KVM.
1584 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1589 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1590 vfacilities[0] &= 0xff82fff3f4fc2000UL;
1591 vfacilities[1] &= 0x005c000000000000UL;
1595 static void __exit kvm_s390_exit(void)
1597 free_page((unsigned long) vfacilities);
1601 module_init(kvm_s390_init);
1602 module_exit(kvm_s390_exit);
1605 * Enable autoloading of the kvm module.
1606 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1607 * since x86 takes a different approach.
1609 #include <linux/miscdevice.h>
1610 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1611 MODULE_ALIAS("devname:kvm");