Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming...
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/switch_to.h>
31 #include <asm/facility.h>
32 #include <asm/sclp.h>
33 #include "kvm-s390.h"
34 #include "gaccess.h"
35
36 #define CREATE_TRACE_POINTS
37 #include "trace.h"
38 #include "trace-s390.h"
39
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43         { "userspace_handled", VCPU_STAT(exit_userspace) },
44         { "exit_null", VCPU_STAT(exit_null) },
45         { "exit_validity", VCPU_STAT(exit_validity) },
46         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47         { "exit_external_request", VCPU_STAT(exit_external_request) },
48         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
49         { "exit_instruction", VCPU_STAT(exit_instruction) },
50         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
52         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
53         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
55         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
56         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
63         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
64         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65         { "instruction_spx", VCPU_STAT(instruction_spx) },
66         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67         { "instruction_stap", VCPU_STAT(instruction_stap) },
68         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71         { "instruction_essa", VCPU_STAT(instruction_essa) },
72         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
73         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
74         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
75         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
76         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
77         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
78         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
79         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
80         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
81         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
82         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
83         { "diagnose_10", VCPU_STAT(diagnose_10) },
84         { "diagnose_44", VCPU_STAT(diagnose_44) },
85         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
86         { NULL }
87 };
88
89 unsigned long *vfacilities;
90 static struct gmap_notifier gmap_notifier;
91
92 /* test availability of vfacility */
93 static inline int test_vfacility(unsigned long nr)
94 {
95         return __test_facility(nr, (void *) vfacilities);
96 }
97
98 /* Section: not file related */
99 int kvm_arch_hardware_enable(void *garbage)
100 {
101         /* every s390 is virtualization enabled ;-) */
102         return 0;
103 }
104
105 void kvm_arch_hardware_disable(void *garbage)
106 {
107 }
108
109 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
110
111 int kvm_arch_hardware_setup(void)
112 {
113         gmap_notifier.notifier_call = kvm_gmap_notifier;
114         gmap_register_ipte_notifier(&gmap_notifier);
115         return 0;
116 }
117
118 void kvm_arch_hardware_unsetup(void)
119 {
120         gmap_unregister_ipte_notifier(&gmap_notifier);
121 }
122
123 void kvm_arch_check_processor_compat(void *rtn)
124 {
125 }
126
127 int kvm_arch_init(void *opaque)
128 {
129         return 0;
130 }
131
132 void kvm_arch_exit(void)
133 {
134 }
135
136 /* Section: device related */
137 long kvm_arch_dev_ioctl(struct file *filp,
138                         unsigned int ioctl, unsigned long arg)
139 {
140         if (ioctl == KVM_S390_ENABLE_SIE)
141                 return s390_enable_sie();
142         return -EINVAL;
143 }
144
145 int kvm_dev_ioctl_check_extension(long ext)
146 {
147         int r;
148
149         switch (ext) {
150         case KVM_CAP_S390_PSW:
151         case KVM_CAP_S390_GMAP:
152         case KVM_CAP_SYNC_MMU:
153 #ifdef CONFIG_KVM_S390_UCONTROL
154         case KVM_CAP_S390_UCONTROL:
155 #endif
156         case KVM_CAP_SYNC_REGS:
157         case KVM_CAP_ONE_REG:
158         case KVM_CAP_ENABLE_CAP:
159         case KVM_CAP_S390_CSS_SUPPORT:
160         case KVM_CAP_IOEVENTFD:
161                 r = 1;
162                 break;
163         case KVM_CAP_NR_VCPUS:
164         case KVM_CAP_MAX_VCPUS:
165                 r = KVM_MAX_VCPUS;
166                 break;
167         case KVM_CAP_NR_MEMSLOTS:
168                 r = KVM_USER_MEM_SLOTS;
169                 break;
170         case KVM_CAP_S390_COW:
171                 r = MACHINE_HAS_ESOP;
172                 break;
173         default:
174                 r = 0;
175         }
176         return r;
177 }
178
179 /* Section: vm related */
180 /*
181  * Get (and clear) the dirty memory log for a memory slot.
182  */
183 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
184                                struct kvm_dirty_log *log)
185 {
186         return 0;
187 }
188
189 long kvm_arch_vm_ioctl(struct file *filp,
190                        unsigned int ioctl, unsigned long arg)
191 {
192         struct kvm *kvm = filp->private_data;
193         void __user *argp = (void __user *)arg;
194         int r;
195
196         switch (ioctl) {
197         case KVM_S390_INTERRUPT: {
198                 struct kvm_s390_interrupt s390int;
199
200                 r = -EFAULT;
201                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
202                         break;
203                 r = kvm_s390_inject_vm(kvm, &s390int);
204                 break;
205         }
206         default:
207                 r = -ENOTTY;
208         }
209
210         return r;
211 }
212
213 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
214 {
215         int rc;
216         char debug_name[16];
217
218         rc = -EINVAL;
219 #ifdef CONFIG_KVM_S390_UCONTROL
220         if (type & ~KVM_VM_S390_UCONTROL)
221                 goto out_err;
222         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
223                 goto out_err;
224 #else
225         if (type)
226                 goto out_err;
227 #endif
228
229         rc = s390_enable_sie();
230         if (rc)
231                 goto out_err;
232
233         rc = -ENOMEM;
234
235         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
236         if (!kvm->arch.sca)
237                 goto out_err;
238
239         sprintf(debug_name, "kvm-%u", current->pid);
240
241         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
242         if (!kvm->arch.dbf)
243                 goto out_nodbf;
244
245         spin_lock_init(&kvm->arch.float_int.lock);
246         INIT_LIST_HEAD(&kvm->arch.float_int.list);
247
248         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
249         VM_EVENT(kvm, 3, "%s", "vm created");
250
251         if (type & KVM_VM_S390_UCONTROL) {
252                 kvm->arch.gmap = NULL;
253         } else {
254                 kvm->arch.gmap = gmap_alloc(current->mm);
255                 if (!kvm->arch.gmap)
256                         goto out_nogmap;
257                 kvm->arch.gmap->private = kvm;
258         }
259
260         kvm->arch.css_support = 0;
261
262         return 0;
263 out_nogmap:
264         debug_unregister(kvm->arch.dbf);
265 out_nodbf:
266         free_page((unsigned long)(kvm->arch.sca));
267 out_err:
268         return rc;
269 }
270
271 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
272 {
273         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
274         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
275         if (!kvm_is_ucontrol(vcpu->kvm)) {
276                 clear_bit(63 - vcpu->vcpu_id,
277                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
278                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
279                     (__u64) vcpu->arch.sie_block)
280                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
281         }
282         smp_mb();
283
284         if (kvm_is_ucontrol(vcpu->kvm))
285                 gmap_free(vcpu->arch.gmap);
286
287         if (vcpu->arch.sie_block->cbrlo)
288                 __free_page(__pfn_to_page(
289                                 vcpu->arch.sie_block->cbrlo >> PAGE_SHIFT));
290         free_page((unsigned long)(vcpu->arch.sie_block));
291
292         kvm_vcpu_uninit(vcpu);
293         kmem_cache_free(kvm_vcpu_cache, vcpu);
294 }
295
296 static void kvm_free_vcpus(struct kvm *kvm)
297 {
298         unsigned int i;
299         struct kvm_vcpu *vcpu;
300
301         kvm_for_each_vcpu(i, vcpu, kvm)
302                 kvm_arch_vcpu_destroy(vcpu);
303
304         mutex_lock(&kvm->lock);
305         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
306                 kvm->vcpus[i] = NULL;
307
308         atomic_set(&kvm->online_vcpus, 0);
309         mutex_unlock(&kvm->lock);
310 }
311
312 void kvm_arch_sync_events(struct kvm *kvm)
313 {
314 }
315
316 void kvm_arch_destroy_vm(struct kvm *kvm)
317 {
318         kvm_free_vcpus(kvm);
319         free_page((unsigned long)(kvm->arch.sca));
320         debug_unregister(kvm->arch.dbf);
321         if (!kvm_is_ucontrol(kvm))
322                 gmap_free(kvm->arch.gmap);
323 }
324
325 /* Section: vcpu related */
326 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
327 {
328         if (kvm_is_ucontrol(vcpu->kvm)) {
329                 vcpu->arch.gmap = gmap_alloc(current->mm);
330                 if (!vcpu->arch.gmap)
331                         return -ENOMEM;
332                 vcpu->arch.gmap->private = vcpu->kvm;
333                 return 0;
334         }
335
336         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
337         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
338                                     KVM_SYNC_GPRS |
339                                     KVM_SYNC_ACRS |
340                                     KVM_SYNC_CRS;
341         return 0;
342 }
343
344 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
345 {
346         /* Nothing todo */
347 }
348
349 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
350 {
351         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
352         save_fp_regs(vcpu->arch.host_fpregs.fprs);
353         save_access_regs(vcpu->arch.host_acrs);
354         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
355         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
356         restore_access_regs(vcpu->run->s.regs.acrs);
357         gmap_enable(vcpu->arch.gmap);
358         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
359 }
360
361 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
362 {
363         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
364         gmap_disable(vcpu->arch.gmap);
365         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
366         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
367         save_access_regs(vcpu->run->s.regs.acrs);
368         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
369         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
370         restore_access_regs(vcpu->arch.host_acrs);
371 }
372
373 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
374 {
375         /* this equals initial cpu reset in pop, but we don't switch to ESA */
376         vcpu->arch.sie_block->gpsw.mask = 0UL;
377         vcpu->arch.sie_block->gpsw.addr = 0UL;
378         kvm_s390_set_prefix(vcpu, 0);
379         vcpu->arch.sie_block->cputm     = 0UL;
380         vcpu->arch.sie_block->ckc       = 0UL;
381         vcpu->arch.sie_block->todpr     = 0;
382         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
383         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
384         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
385         vcpu->arch.guest_fpregs.fpc = 0;
386         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
387         vcpu->arch.sie_block->gbea = 1;
388         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
389 }
390
391 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
392 {
393         return 0;
394 }
395
396 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
397 {
398         struct page *cbrl;
399
400         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
401                                                     CPUSTAT_SM |
402                                                     CPUSTAT_STOPPED |
403                                                     CPUSTAT_GED);
404         vcpu->arch.sie_block->ecb   = 6;
405         if (test_vfacility(50) && test_vfacility(73))
406                 vcpu->arch.sie_block->ecb |= 0x10;
407
408         vcpu->arch.sie_block->ecb2  = 8;
409         vcpu->arch.sie_block->eca   = 0xC1002001U;
410         vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
411         if (kvm_enabled_cmma()) {
412                 cbrl = alloc_page(GFP_KERNEL | __GFP_ZERO);
413                 if (cbrl) {
414                         vcpu->arch.sie_block->ecb2 |= 0x80;
415                         vcpu->arch.sie_block->ecb2 &= ~0x08;
416                         vcpu->arch.sie_block->cbrlo = page_to_phys(cbrl);
417                 }
418         }
419         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
420         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
421                      (unsigned long) vcpu);
422         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
423         get_cpu_id(&vcpu->arch.cpu_id);
424         vcpu->arch.cpu_id.version = 0xff;
425         return 0;
426 }
427
428 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
429                                       unsigned int id)
430 {
431         struct kvm_vcpu *vcpu;
432         struct sie_page *sie_page;
433         int rc = -EINVAL;
434
435         if (id >= KVM_MAX_VCPUS)
436                 goto out;
437
438         rc = -ENOMEM;
439
440         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
441         if (!vcpu)
442                 goto out;
443
444         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
445         if (!sie_page)
446                 goto out_free_cpu;
447
448         vcpu->arch.sie_block = &sie_page->sie_block;
449         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
450
451         vcpu->arch.sie_block->icpua = id;
452         if (!kvm_is_ucontrol(kvm)) {
453                 if (!kvm->arch.sca) {
454                         WARN_ON_ONCE(1);
455                         goto out_free_cpu;
456                 }
457                 if (!kvm->arch.sca->cpu[id].sda)
458                         kvm->arch.sca->cpu[id].sda =
459                                 (__u64) vcpu->arch.sie_block;
460                 vcpu->arch.sie_block->scaoh =
461                         (__u32)(((__u64)kvm->arch.sca) >> 32);
462                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
463                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
464         }
465
466         spin_lock_init(&vcpu->arch.local_int.lock);
467         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
468         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
469         spin_lock(&kvm->arch.float_int.lock);
470         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
471         vcpu->arch.local_int.wq = &vcpu->wq;
472         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
473         spin_unlock(&kvm->arch.float_int.lock);
474
475         rc = kvm_vcpu_init(vcpu, kvm, id);
476         if (rc)
477                 goto out_free_sie_block;
478         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
479                  vcpu->arch.sie_block);
480         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
481
482         return vcpu;
483 out_free_sie_block:
484         free_page((unsigned long)(vcpu->arch.sie_block));
485 out_free_cpu:
486         kmem_cache_free(kvm_vcpu_cache, vcpu);
487 out:
488         return ERR_PTR(rc);
489 }
490
491 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
492 {
493         /* kvm common code refers to this, but never calls it */
494         BUG();
495         return 0;
496 }
497
498 void s390_vcpu_block(struct kvm_vcpu *vcpu)
499 {
500         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
501 }
502
503 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
504 {
505         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
506 }
507
508 /*
509  * Kick a guest cpu out of SIE and wait until SIE is not running.
510  * If the CPU is not running (e.g. waiting as idle) the function will
511  * return immediately. */
512 void exit_sie(struct kvm_vcpu *vcpu)
513 {
514         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
515         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
516                 cpu_relax();
517 }
518
519 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
520 void exit_sie_sync(struct kvm_vcpu *vcpu)
521 {
522         s390_vcpu_block(vcpu);
523         exit_sie(vcpu);
524 }
525
526 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
527 {
528         int i;
529         struct kvm *kvm = gmap->private;
530         struct kvm_vcpu *vcpu;
531
532         kvm_for_each_vcpu(i, vcpu, kvm) {
533                 /* match against both prefix pages */
534                 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
535                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
536                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
537                         exit_sie_sync(vcpu);
538                 }
539         }
540 }
541
542 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
543 {
544         /* kvm common code refers to this, but never calls it */
545         BUG();
546         return 0;
547 }
548
549 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
550                                            struct kvm_one_reg *reg)
551 {
552         int r = -EINVAL;
553
554         switch (reg->id) {
555         case KVM_REG_S390_TODPR:
556                 r = put_user(vcpu->arch.sie_block->todpr,
557                              (u32 __user *)reg->addr);
558                 break;
559         case KVM_REG_S390_EPOCHDIFF:
560                 r = put_user(vcpu->arch.sie_block->epoch,
561                              (u64 __user *)reg->addr);
562                 break;
563         case KVM_REG_S390_CPU_TIMER:
564                 r = put_user(vcpu->arch.sie_block->cputm,
565                              (u64 __user *)reg->addr);
566                 break;
567         case KVM_REG_S390_CLOCK_COMP:
568                 r = put_user(vcpu->arch.sie_block->ckc,
569                              (u64 __user *)reg->addr);
570                 break;
571         default:
572                 break;
573         }
574
575         return r;
576 }
577
578 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
579                                            struct kvm_one_reg *reg)
580 {
581         int r = -EINVAL;
582
583         switch (reg->id) {
584         case KVM_REG_S390_TODPR:
585                 r = get_user(vcpu->arch.sie_block->todpr,
586                              (u32 __user *)reg->addr);
587                 break;
588         case KVM_REG_S390_EPOCHDIFF:
589                 r = get_user(vcpu->arch.sie_block->epoch,
590                              (u64 __user *)reg->addr);
591                 break;
592         case KVM_REG_S390_CPU_TIMER:
593                 r = get_user(vcpu->arch.sie_block->cputm,
594                              (u64 __user *)reg->addr);
595                 break;
596         case KVM_REG_S390_CLOCK_COMP:
597                 r = get_user(vcpu->arch.sie_block->ckc,
598                              (u64 __user *)reg->addr);
599                 break;
600         default:
601                 break;
602         }
603
604         return r;
605 }
606
607 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
608 {
609         kvm_s390_vcpu_initial_reset(vcpu);
610         return 0;
611 }
612
613 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
614 {
615         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
616         return 0;
617 }
618
619 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
620 {
621         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
622         return 0;
623 }
624
625 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
626                                   struct kvm_sregs *sregs)
627 {
628         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
629         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
630         restore_access_regs(vcpu->run->s.regs.acrs);
631         return 0;
632 }
633
634 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
635                                   struct kvm_sregs *sregs)
636 {
637         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
638         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
639         return 0;
640 }
641
642 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
643 {
644         if (test_fp_ctl(fpu->fpc))
645                 return -EINVAL;
646         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
647         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
648         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
649         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
650         return 0;
651 }
652
653 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
654 {
655         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
656         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
657         return 0;
658 }
659
660 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
661 {
662         int rc = 0;
663
664         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
665                 rc = -EBUSY;
666         else {
667                 vcpu->run->psw_mask = psw.mask;
668                 vcpu->run->psw_addr = psw.addr;
669         }
670         return rc;
671 }
672
673 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
674                                   struct kvm_translation *tr)
675 {
676         return -EINVAL; /* not implemented yet */
677 }
678
679 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
680                                         struct kvm_guest_debug *dbg)
681 {
682         return -EINVAL; /* not implemented yet */
683 }
684
685 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
686                                     struct kvm_mp_state *mp_state)
687 {
688         return -EINVAL; /* not implemented yet */
689 }
690
691 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
692                                     struct kvm_mp_state *mp_state)
693 {
694         return -EINVAL; /* not implemented yet */
695 }
696
697 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
698 {
699         /*
700          * We use MMU_RELOAD just to re-arm the ipte notifier for the
701          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
702          * This ensures that the ipte instruction for this request has
703          * already finished. We might race against a second unmapper that
704          * wants to set the blocking bit. Lets just retry the request loop.
705          */
706         while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
707                 int rc;
708                 rc = gmap_ipte_notify(vcpu->arch.gmap,
709                                       vcpu->arch.sie_block->prefix,
710                                       PAGE_SIZE * 2);
711                 if (rc)
712                         return rc;
713                 s390_vcpu_unblock(vcpu);
714         }
715         return 0;
716 }
717
718 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
719 {
720         int rc, cpuflags;
721
722         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
723
724         if (need_resched())
725                 schedule();
726
727         if (test_thread_flag(TIF_MCCK_PENDING))
728                 s390_handle_mcck();
729
730         if (!kvm_is_ucontrol(vcpu->kvm))
731                 kvm_s390_deliver_pending_interrupts(vcpu);
732
733         rc = kvm_s390_handle_requests(vcpu);
734         if (rc)
735                 return rc;
736
737         vcpu->arch.sie_block->icptcode = 0;
738         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
739         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
740         trace_kvm_s390_sie_enter(vcpu, cpuflags);
741
742         return 0;
743 }
744
745 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
746 {
747         int rc;
748
749         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
750                    vcpu->arch.sie_block->icptcode);
751         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
752
753         if (exit_reason >= 0) {
754                 rc = 0;
755         } else if (kvm_is_ucontrol(vcpu->kvm)) {
756                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
757                 vcpu->run->s390_ucontrol.trans_exc_code =
758                                                 current->thread.gmap_addr;
759                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
760                 rc = -EREMOTE;
761         } else {
762                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
763                 trace_kvm_s390_sie_fault(vcpu);
764                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
765         }
766
767         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
768
769         if (rc == 0) {
770                 if (kvm_is_ucontrol(vcpu->kvm))
771                         rc = -EOPNOTSUPP;
772                 else
773                         rc = kvm_handle_sie_intercept(vcpu);
774         }
775
776         return rc;
777 }
778
779 bool kvm_enabled_cmma(void)
780 {
781         if (!MACHINE_IS_LPAR)
782                 return false;
783         /* only enable for z10 and later */
784         if (!MACHINE_HAS_EDAT1)
785                 return false;
786         return true;
787 }
788
789 static int __vcpu_run(struct kvm_vcpu *vcpu)
790 {
791         int rc, exit_reason;
792
793         /*
794          * We try to hold kvm->srcu during most of vcpu_run (except when run-
795          * ning the guest), so that memslots (and other stuff) are protected
796          */
797         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
798
799         do {
800                 rc = vcpu_pre_run(vcpu);
801                 if (rc)
802                         break;
803
804                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
805                 /*
806                  * As PF_VCPU will be used in fault handler, between
807                  * guest_enter and guest_exit should be no uaccess.
808                  */
809                 preempt_disable();
810                 kvm_guest_enter();
811                 preempt_enable();
812                 exit_reason = sie64a(vcpu->arch.sie_block,
813                                      vcpu->run->s.regs.gprs);
814                 kvm_guest_exit();
815                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
816
817                 rc = vcpu_post_run(vcpu, exit_reason);
818         } while (!signal_pending(current) && !rc);
819
820         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
821         return rc;
822 }
823
824 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
825 {
826         int rc;
827         sigset_t sigsaved;
828
829         if (vcpu->sigset_active)
830                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
831
832         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
833
834         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
835
836         switch (kvm_run->exit_reason) {
837         case KVM_EXIT_S390_SIEIC:
838         case KVM_EXIT_UNKNOWN:
839         case KVM_EXIT_INTR:
840         case KVM_EXIT_S390_RESET:
841         case KVM_EXIT_S390_UCONTROL:
842         case KVM_EXIT_S390_TSCH:
843                 break;
844         default:
845                 BUG();
846         }
847
848         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
849         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
850         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
851                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
852                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
853         }
854         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
855                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
856                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
857                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
858         }
859
860         might_fault();
861         rc = __vcpu_run(vcpu);
862
863         if (signal_pending(current) && !rc) {
864                 kvm_run->exit_reason = KVM_EXIT_INTR;
865                 rc = -EINTR;
866         }
867
868         if (rc == -EOPNOTSUPP) {
869                 /* intercept cannot be handled in-kernel, prepare kvm-run */
870                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
871                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
872                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
873                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
874                 rc = 0;
875         }
876
877         if (rc == -EREMOTE) {
878                 /* intercept was handled, but userspace support is needed
879                  * kvm_run has been prepared by the handler */
880                 rc = 0;
881         }
882
883         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
884         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
885         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
886         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
887
888         if (vcpu->sigset_active)
889                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
890
891         vcpu->stat.exit_userspace++;
892         return rc;
893 }
894
895 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
896                        unsigned long n, int prefix)
897 {
898         if (prefix)
899                 return copy_to_guest(vcpu, guestdest, from, n);
900         else
901                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
902 }
903
904 /*
905  * store status at address
906  * we use have two special cases:
907  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
908  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
909  */
910 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
911 {
912         unsigned char archmode = 1;
913         int prefix;
914         u64 clkcomp;
915
916         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
917                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
918                         return -EFAULT;
919                 addr = SAVE_AREA_BASE;
920                 prefix = 0;
921         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
922                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
923                         return -EFAULT;
924                 addr = SAVE_AREA_BASE;
925                 prefix = 1;
926         } else
927                 prefix = 0;
928
929         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
930                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
931                 return -EFAULT;
932
933         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
934                         vcpu->run->s.regs.gprs, 128, prefix))
935                 return -EFAULT;
936
937         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
938                         &vcpu->arch.sie_block->gpsw, 16, prefix))
939                 return -EFAULT;
940
941         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
942                         &vcpu->arch.sie_block->prefix, 4, prefix))
943                 return -EFAULT;
944
945         if (__guestcopy(vcpu,
946                         addr + offsetof(struct save_area, fp_ctrl_reg),
947                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
948                 return -EFAULT;
949
950         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
951                         &vcpu->arch.sie_block->todpr, 4, prefix))
952                 return -EFAULT;
953
954         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
955                         &vcpu->arch.sie_block->cputm, 8, prefix))
956                 return -EFAULT;
957
958         clkcomp = vcpu->arch.sie_block->ckc >> 8;
959         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
960                         &clkcomp, 8, prefix))
961                 return -EFAULT;
962
963         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
964                         &vcpu->run->s.regs.acrs, 64, prefix))
965                 return -EFAULT;
966
967         if (__guestcopy(vcpu,
968                         addr + offsetof(struct save_area, ctrl_regs),
969                         &vcpu->arch.sie_block->gcr, 128, prefix))
970                 return -EFAULT;
971         return 0;
972 }
973
974 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
975 {
976         /*
977          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
978          * copying in vcpu load/put. Lets update our copies before we save
979          * it into the save area
980          */
981         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
982         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
983         save_access_regs(vcpu->run->s.regs.acrs);
984
985         return kvm_s390_store_status_unloaded(vcpu, addr);
986 }
987
988 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
989                                      struct kvm_enable_cap *cap)
990 {
991         int r;
992
993         if (cap->flags)
994                 return -EINVAL;
995
996         switch (cap->cap) {
997         case KVM_CAP_S390_CSS_SUPPORT:
998                 if (!vcpu->kvm->arch.css_support) {
999                         vcpu->kvm->arch.css_support = 1;
1000                         trace_kvm_s390_enable_css(vcpu->kvm);
1001                 }
1002                 r = 0;
1003                 break;
1004         default:
1005                 r = -EINVAL;
1006                 break;
1007         }
1008         return r;
1009 }
1010
1011 long kvm_arch_vcpu_ioctl(struct file *filp,
1012                          unsigned int ioctl, unsigned long arg)
1013 {
1014         struct kvm_vcpu *vcpu = filp->private_data;
1015         void __user *argp = (void __user *)arg;
1016         int idx;
1017         long r;
1018
1019         switch (ioctl) {
1020         case KVM_S390_INTERRUPT: {
1021                 struct kvm_s390_interrupt s390int;
1022
1023                 r = -EFAULT;
1024                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1025                         break;
1026                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1027                 break;
1028         }
1029         case KVM_S390_STORE_STATUS:
1030                 idx = srcu_read_lock(&vcpu->kvm->srcu);
1031                 r = kvm_s390_vcpu_store_status(vcpu, arg);
1032                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1033                 break;
1034         case KVM_S390_SET_INITIAL_PSW: {
1035                 psw_t psw;
1036
1037                 r = -EFAULT;
1038                 if (copy_from_user(&psw, argp, sizeof(psw)))
1039                         break;
1040                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1041                 break;
1042         }
1043         case KVM_S390_INITIAL_RESET:
1044                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1045                 break;
1046         case KVM_SET_ONE_REG:
1047         case KVM_GET_ONE_REG: {
1048                 struct kvm_one_reg reg;
1049                 r = -EFAULT;
1050                 if (copy_from_user(&reg, argp, sizeof(reg)))
1051                         break;
1052                 if (ioctl == KVM_SET_ONE_REG)
1053                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1054                 else
1055                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1056                 break;
1057         }
1058 #ifdef CONFIG_KVM_S390_UCONTROL
1059         case KVM_S390_UCAS_MAP: {
1060                 struct kvm_s390_ucas_mapping ucasmap;
1061
1062                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1063                         r = -EFAULT;
1064                         break;
1065                 }
1066
1067                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1068                         r = -EINVAL;
1069                         break;
1070                 }
1071
1072                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1073                                      ucasmap.vcpu_addr, ucasmap.length);
1074                 break;
1075         }
1076         case KVM_S390_UCAS_UNMAP: {
1077                 struct kvm_s390_ucas_mapping ucasmap;
1078
1079                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1080                         r = -EFAULT;
1081                         break;
1082                 }
1083
1084                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1085                         r = -EINVAL;
1086                         break;
1087                 }
1088
1089                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1090                         ucasmap.length);
1091                 break;
1092         }
1093 #endif
1094         case KVM_S390_VCPU_FAULT: {
1095                 r = gmap_fault(arg, vcpu->arch.gmap);
1096                 if (!IS_ERR_VALUE(r))
1097                         r = 0;
1098                 break;
1099         }
1100         case KVM_ENABLE_CAP:
1101         {
1102                 struct kvm_enable_cap cap;
1103                 r = -EFAULT;
1104                 if (copy_from_user(&cap, argp, sizeof(cap)))
1105                         break;
1106                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1107                 break;
1108         }
1109         default:
1110                 r = -ENOTTY;
1111         }
1112         return r;
1113 }
1114
1115 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1116 {
1117 #ifdef CONFIG_KVM_S390_UCONTROL
1118         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1119                  && (kvm_is_ucontrol(vcpu->kvm))) {
1120                 vmf->page = virt_to_page(vcpu->arch.sie_block);
1121                 get_page(vmf->page);
1122                 return 0;
1123         }
1124 #endif
1125         return VM_FAULT_SIGBUS;
1126 }
1127
1128 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1129                            struct kvm_memory_slot *dont)
1130 {
1131 }
1132
1133 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1134                             unsigned long npages)
1135 {
1136         return 0;
1137 }
1138
1139 void kvm_arch_memslots_updated(struct kvm *kvm)
1140 {
1141 }
1142
1143 /* Section: memory related */
1144 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1145                                    struct kvm_memory_slot *memslot,
1146                                    struct kvm_userspace_memory_region *mem,
1147                                    enum kvm_mr_change change)
1148 {
1149         /* A few sanity checks. We can have memory slots which have to be
1150            located/ended at a segment boundary (1MB). The memory in userland is
1151            ok to be fragmented into various different vmas. It is okay to mmap()
1152            and munmap() stuff in this slot after doing this call at any time */
1153
1154         if (mem->userspace_addr & 0xffffful)
1155                 return -EINVAL;
1156
1157         if (mem->memory_size & 0xffffful)
1158                 return -EINVAL;
1159
1160         return 0;
1161 }
1162
1163 void kvm_arch_commit_memory_region(struct kvm *kvm,
1164                                 struct kvm_userspace_memory_region *mem,
1165                                 const struct kvm_memory_slot *old,
1166                                 enum kvm_mr_change change)
1167 {
1168         int rc;
1169
1170         /* If the basics of the memslot do not change, we do not want
1171          * to update the gmap. Every update causes several unnecessary
1172          * segment translation exceptions. This is usually handled just
1173          * fine by the normal fault handler + gmap, but it will also
1174          * cause faults on the prefix page of running guest CPUs.
1175          */
1176         if (old->userspace_addr == mem->userspace_addr &&
1177             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1178             old->npages * PAGE_SIZE == mem->memory_size)
1179                 return;
1180
1181         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1182                 mem->guest_phys_addr, mem->memory_size);
1183         if (rc)
1184                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1185         return;
1186 }
1187
1188 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1189 {
1190 }
1191
1192 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1193                                    struct kvm_memory_slot *slot)
1194 {
1195 }
1196
1197 static int __init kvm_s390_init(void)
1198 {
1199         int ret;
1200         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1201         if (ret)
1202                 return ret;
1203
1204         /*
1205          * guests can ask for up to 255+1 double words, we need a full page
1206          * to hold the maximum amount of facilities. On the other hand, we
1207          * only set facilities that are known to work in KVM.
1208          */
1209         vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1210         if (!vfacilities) {
1211                 kvm_exit();
1212                 return -ENOMEM;
1213         }
1214         memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1215         vfacilities[0] &= 0xff82fff3f4fc2000UL;
1216         vfacilities[1] &= 0x005c000000000000UL;
1217         return 0;
1218 }
1219
1220 static void __exit kvm_s390_exit(void)
1221 {
1222         free_page((unsigned long) vfacilities);
1223         kvm_exit();
1224 }
1225
1226 module_init(kvm_s390_init);
1227 module_exit(kvm_s390_exit);
1228
1229 /*
1230  * Enable autoloading of the kvm module.
1231  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1232  * since x86 takes a different approach.
1233  */
1234 #include <linux/miscdevice.h>
1235 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1236 MODULE_ALIAS("devname:kvm");