2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/lowcore.h>
29 #include <asm/pgtable.h>
31 #include <asm/switch_to.h>
32 #include <asm/facility.h>
37 #define CREATE_TRACE_POINTS
39 #include "trace-s390.h"
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
45 { "exit_null", VCPU_STAT(exit_null) },
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
55 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
56 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
57 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
58 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
59 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
60 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
61 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
62 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
63 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
64 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
65 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
66 { "instruction_spx", VCPU_STAT(instruction_spx) },
67 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
68 { "instruction_stap", VCPU_STAT(instruction_stap) },
69 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
70 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
71 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
72 { "instruction_essa", VCPU_STAT(instruction_essa) },
73 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
74 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
75 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
76 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
77 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
78 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
79 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
80 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
81 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
82 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
83 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
84 { "diagnose_10", VCPU_STAT(diagnose_10) },
85 { "diagnose_44", VCPU_STAT(diagnose_44) },
86 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
90 unsigned long *vfacilities;
91 static struct gmap_notifier gmap_notifier;
93 /* test availability of vfacility */
94 int test_vfacility(unsigned long nr)
96 return __test_facility(nr, (void *) vfacilities);
99 /* Section: not file related */
100 int kvm_arch_hardware_enable(void *garbage)
102 /* every s390 is virtualization enabled ;-) */
106 void kvm_arch_hardware_disable(void *garbage)
110 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
112 int kvm_arch_hardware_setup(void)
114 gmap_notifier.notifier_call = kvm_gmap_notifier;
115 gmap_register_ipte_notifier(&gmap_notifier);
119 void kvm_arch_hardware_unsetup(void)
121 gmap_unregister_ipte_notifier(&gmap_notifier);
124 void kvm_arch_check_processor_compat(void *rtn)
128 int kvm_arch_init(void *opaque)
133 void kvm_arch_exit(void)
137 /* Section: device related */
138 long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
146 int kvm_dev_ioctl_check_extension(long ext)
151 case KVM_CAP_S390_PSW:
152 case KVM_CAP_S390_GMAP:
153 case KVM_CAP_SYNC_MMU:
154 #ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
157 case KVM_CAP_ASYNC_PF:
158 case KVM_CAP_SYNC_REGS:
159 case KVM_CAP_ONE_REG:
160 case KVM_CAP_ENABLE_CAP:
161 case KVM_CAP_S390_CSS_SUPPORT:
162 case KVM_CAP_IOEVENTFD:
163 case KVM_CAP_DEVICE_CTRL:
164 case KVM_CAP_ENABLE_CAP_VM:
165 case KVM_CAP_VM_ATTRIBUTES:
168 case KVM_CAP_NR_VCPUS:
169 case KVM_CAP_MAX_VCPUS:
172 case KVM_CAP_NR_MEMSLOTS:
173 r = KVM_USER_MEM_SLOTS;
175 case KVM_CAP_S390_COW:
176 r = MACHINE_HAS_ESOP;
184 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
185 struct kvm_memory_slot *memslot)
187 gfn_t cur_gfn, last_gfn;
188 unsigned long address;
189 struct gmap *gmap = kvm->arch.gmap;
191 down_read(&gmap->mm->mmap_sem);
192 /* Loop over all guest pages */
193 last_gfn = memslot->base_gfn + memslot->npages;
194 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
195 address = gfn_to_hva_memslot(memslot, cur_gfn);
197 if (gmap_test_and_clear_dirty(address, gmap))
198 mark_page_dirty(kvm, cur_gfn);
200 up_read(&gmap->mm->mmap_sem);
203 /* Section: vm related */
205 * Get (and clear) the dirty memory log for a memory slot.
207 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
208 struct kvm_dirty_log *log)
212 struct kvm_memory_slot *memslot;
215 mutex_lock(&kvm->slots_lock);
218 if (log->slot >= KVM_USER_MEM_SLOTS)
221 memslot = id_to_memslot(kvm->memslots, log->slot);
223 if (!memslot->dirty_bitmap)
226 kvm_s390_sync_dirty_log(kvm, memslot);
227 r = kvm_get_dirty_log(kvm, log, &is_dirty);
231 /* Clear the dirty log */
233 n = kvm_dirty_bitmap_bytes(memslot);
234 memset(memslot->dirty_bitmap, 0, n);
238 mutex_unlock(&kvm->slots_lock);
242 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
250 case KVM_CAP_S390_IRQCHIP:
251 kvm->arch.use_irqchip = 1;
261 static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265 switch (attr->attr) {
266 case KVM_S390_VM_MEM_ENABLE_CMMA:
268 mutex_lock(&kvm->lock);
269 if (atomic_read(&kvm->online_vcpus) == 0) {
270 kvm->arch.use_cmma = 1;
273 mutex_unlock(&kvm->lock);
275 case KVM_S390_VM_MEM_CLR_CMMA:
276 mutex_lock(&kvm->lock);
277 idx = srcu_read_lock(&kvm->srcu);
278 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
279 srcu_read_unlock(&kvm->srcu, idx);
280 mutex_unlock(&kvm->lock);
290 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294 switch (attr->group) {
295 case KVM_S390_VM_MEM_CTRL:
296 ret = kvm_s390_mem_control(kvm, attr);
306 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
311 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315 switch (attr->group) {
316 case KVM_S390_VM_MEM_CTRL:
317 switch (attr->attr) {
318 case KVM_S390_VM_MEM_ENABLE_CMMA:
319 case KVM_S390_VM_MEM_CLR_CMMA:
335 long kvm_arch_vm_ioctl(struct file *filp,
336 unsigned int ioctl, unsigned long arg)
338 struct kvm *kvm = filp->private_data;
339 void __user *argp = (void __user *)arg;
340 struct kvm_device_attr attr;
344 case KVM_S390_INTERRUPT: {
345 struct kvm_s390_interrupt s390int;
348 if (copy_from_user(&s390int, argp, sizeof(s390int)))
350 r = kvm_s390_inject_vm(kvm, &s390int);
353 case KVM_ENABLE_CAP: {
354 struct kvm_enable_cap cap;
356 if (copy_from_user(&cap, argp, sizeof(cap)))
358 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
361 case KVM_CREATE_IRQCHIP: {
362 struct kvm_irq_routing_entry routing;
365 if (kvm->arch.use_irqchip) {
366 /* Set up dummy routing. */
367 memset(&routing, 0, sizeof(routing));
368 kvm_set_irq_routing(kvm, &routing, 0, 0);
373 case KVM_SET_DEVICE_ATTR: {
375 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
377 r = kvm_s390_vm_set_attr(kvm, &attr);
380 case KVM_GET_DEVICE_ATTR: {
382 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
384 r = kvm_s390_vm_get_attr(kvm, &attr);
387 case KVM_HAS_DEVICE_ATTR: {
389 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
391 r = kvm_s390_vm_has_attr(kvm, &attr);
401 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
405 static unsigned long sca_offset;
408 #ifdef CONFIG_KVM_S390_UCONTROL
409 if (type & ~KVM_VM_S390_UCONTROL)
411 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418 rc = s390_enable_sie();
424 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
427 spin_lock(&kvm_lock);
428 sca_offset = (sca_offset + 16) & 0x7f0;
429 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
430 spin_unlock(&kvm_lock);
432 sprintf(debug_name, "kvm-%u", current->pid);
434 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
438 spin_lock_init(&kvm->arch.float_int.lock);
439 INIT_LIST_HEAD(&kvm->arch.float_int.list);
441 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
442 VM_EVENT(kvm, 3, "%s", "vm created");
444 if (type & KVM_VM_S390_UCONTROL) {
445 kvm->arch.gmap = NULL;
447 kvm->arch.gmap = gmap_alloc(current->mm);
450 kvm->arch.gmap->private = kvm;
451 kvm->arch.gmap->pfault_enabled = 0;
454 kvm->arch.css_support = 0;
455 kvm->arch.use_irqchip = 0;
459 debug_unregister(kvm->arch.dbf);
461 free_page((unsigned long)(kvm->arch.sca));
466 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
468 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
469 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
470 kvm_clear_async_pf_completion_queue(vcpu);
471 if (!kvm_is_ucontrol(vcpu->kvm)) {
472 clear_bit(63 - vcpu->vcpu_id,
473 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
474 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
475 (__u64) vcpu->arch.sie_block)
476 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
480 if (kvm_is_ucontrol(vcpu->kvm))
481 gmap_free(vcpu->arch.gmap);
483 if (kvm_s390_cmma_enabled(vcpu->kvm))
484 kvm_s390_vcpu_unsetup_cmma(vcpu);
485 free_page((unsigned long)(vcpu->arch.sie_block));
487 kvm_vcpu_uninit(vcpu);
488 kmem_cache_free(kvm_vcpu_cache, vcpu);
491 static void kvm_free_vcpus(struct kvm *kvm)
494 struct kvm_vcpu *vcpu;
496 kvm_for_each_vcpu(i, vcpu, kvm)
497 kvm_arch_vcpu_destroy(vcpu);
499 mutex_lock(&kvm->lock);
500 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
501 kvm->vcpus[i] = NULL;
503 atomic_set(&kvm->online_vcpus, 0);
504 mutex_unlock(&kvm->lock);
507 void kvm_arch_sync_events(struct kvm *kvm)
511 void kvm_arch_destroy_vm(struct kvm *kvm)
514 free_page((unsigned long)(kvm->arch.sca));
515 debug_unregister(kvm->arch.dbf);
516 if (!kvm_is_ucontrol(kvm))
517 gmap_free(kvm->arch.gmap);
518 kvm_s390_destroy_adapters(kvm);
521 /* Section: vcpu related */
522 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
524 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
525 kvm_clear_async_pf_completion_queue(vcpu);
526 if (kvm_is_ucontrol(vcpu->kvm)) {
527 vcpu->arch.gmap = gmap_alloc(current->mm);
528 if (!vcpu->arch.gmap)
530 vcpu->arch.gmap->private = vcpu->kvm;
534 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
535 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
542 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
547 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
549 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
550 save_fp_regs(vcpu->arch.host_fpregs.fprs);
551 save_access_regs(vcpu->arch.host_acrs);
552 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
553 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
554 restore_access_regs(vcpu->run->s.regs.acrs);
555 gmap_enable(vcpu->arch.gmap);
556 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
559 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
561 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
562 gmap_disable(vcpu->arch.gmap);
563 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
564 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
565 save_access_regs(vcpu->run->s.regs.acrs);
566 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
567 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
568 restore_access_regs(vcpu->arch.host_acrs);
571 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
573 /* this equals initial cpu reset in pop, but we don't switch to ESA */
574 vcpu->arch.sie_block->gpsw.mask = 0UL;
575 vcpu->arch.sie_block->gpsw.addr = 0UL;
576 kvm_s390_set_prefix(vcpu, 0);
577 vcpu->arch.sie_block->cputm = 0UL;
578 vcpu->arch.sie_block->ckc = 0UL;
579 vcpu->arch.sie_block->todpr = 0;
580 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
581 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
582 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
583 vcpu->arch.guest_fpregs.fpc = 0;
584 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
585 vcpu->arch.sie_block->gbea = 1;
586 vcpu->arch.sie_block->pp = 0;
587 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
588 kvm_clear_async_pf_completion_queue(vcpu);
589 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
590 kvm_s390_clear_local_irqs(vcpu);
593 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
598 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
600 free_page(vcpu->arch.sie_block->cbrlo);
601 vcpu->arch.sie_block->cbrlo = 0;
604 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
606 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
607 if (!vcpu->arch.sie_block->cbrlo)
610 vcpu->arch.sie_block->ecb2 |= 0x80;
611 vcpu->arch.sie_block->ecb2 &= ~0x08;
615 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
619 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
623 vcpu->arch.sie_block->ecb = 6;
624 if (test_vfacility(50) && test_vfacility(73))
625 vcpu->arch.sie_block->ecb |= 0x10;
627 vcpu->arch.sie_block->ecb2 = 8;
628 vcpu->arch.sie_block->eca = 0xC1002000U;
630 vcpu->arch.sie_block->eca |= 1;
631 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
632 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
633 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
634 rc = kvm_s390_vcpu_setup_cmma(vcpu);
638 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
639 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
640 (unsigned long) vcpu);
641 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
642 get_cpu_id(&vcpu->arch.cpu_id);
643 vcpu->arch.cpu_id.version = 0xff;
647 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
650 struct kvm_vcpu *vcpu;
651 struct sie_page *sie_page;
654 if (id >= KVM_MAX_VCPUS)
659 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
663 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
667 vcpu->arch.sie_block = &sie_page->sie_block;
668 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
670 vcpu->arch.sie_block->icpua = id;
671 if (!kvm_is_ucontrol(kvm)) {
672 if (!kvm->arch.sca) {
676 if (!kvm->arch.sca->cpu[id].sda)
677 kvm->arch.sca->cpu[id].sda =
678 (__u64) vcpu->arch.sie_block;
679 vcpu->arch.sie_block->scaoh =
680 (__u32)(((__u64)kvm->arch.sca) >> 32);
681 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
682 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
685 spin_lock_init(&vcpu->arch.local_int.lock);
686 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
687 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
688 vcpu->arch.local_int.wq = &vcpu->wq;
689 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
691 rc = kvm_vcpu_init(vcpu, kvm, id);
693 goto out_free_sie_block;
694 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
695 vcpu->arch.sie_block);
696 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
700 free_page((unsigned long)(vcpu->arch.sie_block));
702 kmem_cache_free(kvm_vcpu_cache, vcpu);
707 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
709 return kvm_cpu_has_interrupt(vcpu);
712 void s390_vcpu_block(struct kvm_vcpu *vcpu)
714 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
717 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
719 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
723 * Kick a guest cpu out of SIE and wait until SIE is not running.
724 * If the CPU is not running (e.g. waiting as idle) the function will
725 * return immediately. */
726 void exit_sie(struct kvm_vcpu *vcpu)
728 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
729 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
733 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
734 void exit_sie_sync(struct kvm_vcpu *vcpu)
736 s390_vcpu_block(vcpu);
740 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
743 struct kvm *kvm = gmap->private;
744 struct kvm_vcpu *vcpu;
746 kvm_for_each_vcpu(i, vcpu, kvm) {
747 /* match against both prefix pages */
748 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
749 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
750 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
756 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
758 /* kvm common code refers to this, but never calls it */
763 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
764 struct kvm_one_reg *reg)
769 case KVM_REG_S390_TODPR:
770 r = put_user(vcpu->arch.sie_block->todpr,
771 (u32 __user *)reg->addr);
773 case KVM_REG_S390_EPOCHDIFF:
774 r = put_user(vcpu->arch.sie_block->epoch,
775 (u64 __user *)reg->addr);
777 case KVM_REG_S390_CPU_TIMER:
778 r = put_user(vcpu->arch.sie_block->cputm,
779 (u64 __user *)reg->addr);
781 case KVM_REG_S390_CLOCK_COMP:
782 r = put_user(vcpu->arch.sie_block->ckc,
783 (u64 __user *)reg->addr);
785 case KVM_REG_S390_PFTOKEN:
786 r = put_user(vcpu->arch.pfault_token,
787 (u64 __user *)reg->addr);
789 case KVM_REG_S390_PFCOMPARE:
790 r = put_user(vcpu->arch.pfault_compare,
791 (u64 __user *)reg->addr);
793 case KVM_REG_S390_PFSELECT:
794 r = put_user(vcpu->arch.pfault_select,
795 (u64 __user *)reg->addr);
797 case KVM_REG_S390_PP:
798 r = put_user(vcpu->arch.sie_block->pp,
799 (u64 __user *)reg->addr);
801 case KVM_REG_S390_GBEA:
802 r = put_user(vcpu->arch.sie_block->gbea,
803 (u64 __user *)reg->addr);
812 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
813 struct kvm_one_reg *reg)
818 case KVM_REG_S390_TODPR:
819 r = get_user(vcpu->arch.sie_block->todpr,
820 (u32 __user *)reg->addr);
822 case KVM_REG_S390_EPOCHDIFF:
823 r = get_user(vcpu->arch.sie_block->epoch,
824 (u64 __user *)reg->addr);
826 case KVM_REG_S390_CPU_TIMER:
827 r = get_user(vcpu->arch.sie_block->cputm,
828 (u64 __user *)reg->addr);
830 case KVM_REG_S390_CLOCK_COMP:
831 r = get_user(vcpu->arch.sie_block->ckc,
832 (u64 __user *)reg->addr);
834 case KVM_REG_S390_PFTOKEN:
835 r = get_user(vcpu->arch.pfault_token,
836 (u64 __user *)reg->addr);
838 case KVM_REG_S390_PFCOMPARE:
839 r = get_user(vcpu->arch.pfault_compare,
840 (u64 __user *)reg->addr);
842 case KVM_REG_S390_PFSELECT:
843 r = get_user(vcpu->arch.pfault_select,
844 (u64 __user *)reg->addr);
846 case KVM_REG_S390_PP:
847 r = get_user(vcpu->arch.sie_block->pp,
848 (u64 __user *)reg->addr);
850 case KVM_REG_S390_GBEA:
851 r = get_user(vcpu->arch.sie_block->gbea,
852 (u64 __user *)reg->addr);
861 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
863 kvm_s390_vcpu_initial_reset(vcpu);
867 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
869 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
873 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
875 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
879 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
880 struct kvm_sregs *sregs)
882 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
883 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
884 restore_access_regs(vcpu->run->s.regs.acrs);
888 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
889 struct kvm_sregs *sregs)
891 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
892 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
896 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
898 if (test_fp_ctl(fpu->fpc))
900 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
901 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
902 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
903 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
907 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
909 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
910 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
914 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
918 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
921 vcpu->run->psw_mask = psw.mask;
922 vcpu->run->psw_addr = psw.addr;
927 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
928 struct kvm_translation *tr)
930 return -EINVAL; /* not implemented yet */
933 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
934 struct kvm_guest_debug *dbg)
936 return -EINVAL; /* not implemented yet */
939 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
940 struct kvm_mp_state *mp_state)
942 return -EINVAL; /* not implemented yet */
945 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
946 struct kvm_mp_state *mp_state)
948 return -EINVAL; /* not implemented yet */
951 bool kvm_s390_cmma_enabled(struct kvm *kvm)
953 if (!MACHINE_IS_LPAR)
955 /* only enable for z10 and later */
956 if (!MACHINE_HAS_EDAT1)
958 if (!kvm->arch.use_cmma)
963 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
966 * We use MMU_RELOAD just to re-arm the ipte notifier for the
967 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
968 * This ensures that the ipte instruction for this request has
969 * already finished. We might race against a second unmapper that
970 * wants to set the blocking bit. Lets just retry the request loop.
972 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
974 rc = gmap_ipte_notify(vcpu->arch.gmap,
975 vcpu->arch.sie_block->prefix,
979 s390_vcpu_unblock(vcpu);
984 static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
987 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
988 struct mm_struct *mm = current->mm;
989 down_read(&mm->mmap_sem);
990 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
991 up_read(&mm->mmap_sem);
995 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
998 struct kvm_s390_interrupt inti;
1002 inti.type = KVM_S390_INT_PFAULT_INIT;
1003 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1005 inti.type = KVM_S390_INT_PFAULT_DONE;
1006 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1010 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1011 struct kvm_async_pf *work)
1013 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1014 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1017 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1018 struct kvm_async_pf *work)
1020 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1021 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1024 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1025 struct kvm_async_pf *work)
1027 /* s390 will always inject the page directly */
1030 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1033 * s390 will always inject the page directly,
1034 * but we still want check_async_completion to cleanup
1039 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1042 struct kvm_arch_async_pf arch;
1045 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1047 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1048 vcpu->arch.pfault_compare)
1050 if (psw_extint_disabled(vcpu))
1052 if (kvm_cpu_has_interrupt(vcpu))
1054 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1056 if (!vcpu->arch.gmap->pfault_enabled)
1059 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
1060 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
1063 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1067 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1072 * On s390 notifications for arriving pages will be delivered directly
1073 * to the guest but the house keeping for completed pfaults is
1074 * handled outside the worker.
1076 kvm_check_async_pf_completion(vcpu);
1078 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1083 if (test_thread_flag(TIF_MCCK_PENDING))
1086 if (!kvm_is_ucontrol(vcpu->kvm))
1087 kvm_s390_deliver_pending_interrupts(vcpu);
1089 rc = kvm_s390_handle_requests(vcpu);
1093 vcpu->arch.sie_block->icptcode = 0;
1094 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1095 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1096 trace_kvm_s390_sie_enter(vcpu, cpuflags);
1101 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1105 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1106 vcpu->arch.sie_block->icptcode);
1107 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1109 if (exit_reason >= 0) {
1111 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1112 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1113 vcpu->run->s390_ucontrol.trans_exc_code =
1114 current->thread.gmap_addr;
1115 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1118 } else if (current->thread.gmap_pfault) {
1119 trace_kvm_s390_major_guest_pfault(vcpu);
1120 current->thread.gmap_pfault = 0;
1121 if (kvm_arch_setup_async_pf(vcpu) ||
1122 (kvm_arch_fault_in_sync(vcpu) >= 0))
1127 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1128 trace_kvm_s390_sie_fault(vcpu);
1129 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1132 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1135 if (kvm_is_ucontrol(vcpu->kvm))
1136 /* Don't exit for host interrupts. */
1137 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1139 rc = kvm_handle_sie_intercept(vcpu);
1145 static int __vcpu_run(struct kvm_vcpu *vcpu)
1147 int rc, exit_reason;
1150 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1151 * ning the guest), so that memslots (and other stuff) are protected
1153 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1156 rc = vcpu_pre_run(vcpu);
1160 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1162 * As PF_VCPU will be used in fault handler, between
1163 * guest_enter and guest_exit should be no uaccess.
1168 exit_reason = sie64a(vcpu->arch.sie_block,
1169 vcpu->run->s.regs.gprs);
1171 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1173 rc = vcpu_post_run(vcpu, exit_reason);
1174 } while (!signal_pending(current) && !rc);
1176 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1180 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1185 if (vcpu->sigset_active)
1186 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1188 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1190 switch (kvm_run->exit_reason) {
1191 case KVM_EXIT_S390_SIEIC:
1192 case KVM_EXIT_UNKNOWN:
1194 case KVM_EXIT_S390_RESET:
1195 case KVM_EXIT_S390_UCONTROL:
1196 case KVM_EXIT_S390_TSCH:
1202 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1203 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1204 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1205 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1206 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1208 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1209 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1210 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1211 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1215 rc = __vcpu_run(vcpu);
1217 if (signal_pending(current) && !rc) {
1218 kvm_run->exit_reason = KVM_EXIT_INTR;
1222 if (rc == -EOPNOTSUPP) {
1223 /* intercept cannot be handled in-kernel, prepare kvm-run */
1224 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1225 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1226 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1227 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1231 if (rc == -EREMOTE) {
1232 /* intercept was handled, but userspace support is needed
1233 * kvm_run has been prepared by the handler */
1237 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1238 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1239 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
1240 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1242 if (vcpu->sigset_active)
1243 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1245 vcpu->stat.exit_userspace++;
1249 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
1250 unsigned long n, int prefix)
1253 return copy_to_guest(vcpu, guestdest, from, n);
1255 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1259 * store status at address
1260 * we use have two special cases:
1261 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1262 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1264 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
1266 unsigned char archmode = 1;
1270 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1271 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1273 addr = SAVE_AREA_BASE;
1275 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1276 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1278 addr = SAVE_AREA_BASE;
1283 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
1284 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1287 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
1288 vcpu->run->s.regs.gprs, 128, prefix))
1291 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
1292 &vcpu->arch.sie_block->gpsw, 16, prefix))
1295 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
1296 &vcpu->arch.sie_block->prefix, 4, prefix))
1299 if (__guestcopy(vcpu,
1300 addr + offsetof(struct save_area, fp_ctrl_reg),
1301 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1304 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
1305 &vcpu->arch.sie_block->todpr, 4, prefix))
1308 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
1309 &vcpu->arch.sie_block->cputm, 8, prefix))
1312 clkcomp = vcpu->arch.sie_block->ckc >> 8;
1313 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
1314 &clkcomp, 8, prefix))
1317 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
1318 &vcpu->run->s.regs.acrs, 64, prefix))
1321 if (__guestcopy(vcpu,
1322 addr + offsetof(struct save_area, ctrl_regs),
1323 &vcpu->arch.sie_block->gcr, 128, prefix))
1328 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1331 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1332 * copying in vcpu load/put. Lets update our copies before we save
1333 * it into the save area
1335 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1336 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1337 save_access_regs(vcpu->run->s.regs.acrs);
1339 return kvm_s390_store_status_unloaded(vcpu, addr);
1342 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1343 struct kvm_enable_cap *cap)
1351 case KVM_CAP_S390_CSS_SUPPORT:
1352 if (!vcpu->kvm->arch.css_support) {
1353 vcpu->kvm->arch.css_support = 1;
1354 trace_kvm_s390_enable_css(vcpu->kvm);
1365 long kvm_arch_vcpu_ioctl(struct file *filp,
1366 unsigned int ioctl, unsigned long arg)
1368 struct kvm_vcpu *vcpu = filp->private_data;
1369 void __user *argp = (void __user *)arg;
1374 case KVM_S390_INTERRUPT: {
1375 struct kvm_s390_interrupt s390int;
1378 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1380 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1383 case KVM_S390_STORE_STATUS:
1384 idx = srcu_read_lock(&vcpu->kvm->srcu);
1385 r = kvm_s390_vcpu_store_status(vcpu, arg);
1386 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1388 case KVM_S390_SET_INITIAL_PSW: {
1392 if (copy_from_user(&psw, argp, sizeof(psw)))
1394 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1397 case KVM_S390_INITIAL_RESET:
1398 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1400 case KVM_SET_ONE_REG:
1401 case KVM_GET_ONE_REG: {
1402 struct kvm_one_reg reg;
1404 if (copy_from_user(®, argp, sizeof(reg)))
1406 if (ioctl == KVM_SET_ONE_REG)
1407 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
1409 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
1412 #ifdef CONFIG_KVM_S390_UCONTROL
1413 case KVM_S390_UCAS_MAP: {
1414 struct kvm_s390_ucas_mapping ucasmap;
1416 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1421 if (!kvm_is_ucontrol(vcpu->kvm)) {
1426 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1427 ucasmap.vcpu_addr, ucasmap.length);
1430 case KVM_S390_UCAS_UNMAP: {
1431 struct kvm_s390_ucas_mapping ucasmap;
1433 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1438 if (!kvm_is_ucontrol(vcpu->kvm)) {
1443 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1448 case KVM_S390_VCPU_FAULT: {
1449 r = gmap_fault(arg, vcpu->arch.gmap);
1450 if (!IS_ERR_VALUE(r))
1454 case KVM_ENABLE_CAP:
1456 struct kvm_enable_cap cap;
1458 if (copy_from_user(&cap, argp, sizeof(cap)))
1460 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1469 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1471 #ifdef CONFIG_KVM_S390_UCONTROL
1472 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1473 && (kvm_is_ucontrol(vcpu->kvm))) {
1474 vmf->page = virt_to_page(vcpu->arch.sie_block);
1475 get_page(vmf->page);
1479 return VM_FAULT_SIGBUS;
1482 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1483 struct kvm_memory_slot *dont)
1487 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1488 unsigned long npages)
1493 void kvm_arch_memslots_updated(struct kvm *kvm)
1497 /* Section: memory related */
1498 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1499 struct kvm_memory_slot *memslot,
1500 struct kvm_userspace_memory_region *mem,
1501 enum kvm_mr_change change)
1503 /* A few sanity checks. We can have memory slots which have to be
1504 located/ended at a segment boundary (1MB). The memory in userland is
1505 ok to be fragmented into various different vmas. It is okay to mmap()
1506 and munmap() stuff in this slot after doing this call at any time */
1508 if (mem->userspace_addr & 0xffffful)
1511 if (mem->memory_size & 0xffffful)
1517 void kvm_arch_commit_memory_region(struct kvm *kvm,
1518 struct kvm_userspace_memory_region *mem,
1519 const struct kvm_memory_slot *old,
1520 enum kvm_mr_change change)
1524 /* If the basics of the memslot do not change, we do not want
1525 * to update the gmap. Every update causes several unnecessary
1526 * segment translation exceptions. This is usually handled just
1527 * fine by the normal fault handler + gmap, but it will also
1528 * cause faults on the prefix page of running guest CPUs.
1530 if (old->userspace_addr == mem->userspace_addr &&
1531 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1532 old->npages * PAGE_SIZE == mem->memory_size)
1535 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1536 mem->guest_phys_addr, mem->memory_size);
1538 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1542 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1546 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1547 struct kvm_memory_slot *slot)
1551 static int __init kvm_s390_init(void)
1554 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1559 * guests can ask for up to 255+1 double words, we need a full page
1560 * to hold the maximum amount of facilities. On the other hand, we
1561 * only set facilities that are known to work in KVM.
1563 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1568 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1569 vfacilities[0] &= 0xff82fff3f4fc2000UL;
1570 vfacilities[1] &= 0x005c000000000000UL;
1574 static void __exit kvm_s390_exit(void)
1576 free_page((unsigned long) vfacilities);
1580 module_init(kvm_s390_init);
1581 module_exit(kvm_s390_exit);
1584 * Enable autoloading of the kvm module.
1585 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1586 * since x86 takes a different approach.
1588 #include <linux/miscdevice.h>
1589 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1590 MODULE_ALIAS("devname:kvm");