KVM: s390: Machine Check
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008, 2015
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/dis.h>
22 #include <asm/uaccess.h>
23 #include <asm/sclp.h>
24 #include "kvm-s390.h"
25 #include "gaccess.h"
26 #include "trace-s390.h"
27
28 #define IOINT_SCHID_MASK 0x0000ffff
29 #define IOINT_SSID_MASK 0x00030000
30 #define IOINT_CSSID_MASK 0x03fc0000
31 #define IOINT_AI_MASK 0x04000000
32 #define PFAULT_INIT 0x0600
33 #define PFAULT_DONE 0x0680
34 #define VIRTIO_PARAM 0x0d00
35
36 static int is_ioint(u64 type)
37 {
38         return ((type & 0xfffe0000u) != 0xfffe0000u);
39 }
40
41 int psw_extint_disabled(struct kvm_vcpu *vcpu)
42 {
43         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
44 }
45
46 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
47 {
48         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
49 }
50
51 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
52 {
53         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
54 }
55
56 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
57 {
58         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
59             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
60             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
61                 return 0;
62         return 1;
63 }
64
65 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
66 {
67         if (psw_extint_disabled(vcpu) ||
68             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
69                 return 0;
70         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
71                 /* No timer interrupts when single stepping */
72                 return 0;
73         return 1;
74 }
75
76 static u64 int_word_to_isc_bits(u32 int_word)
77 {
78         u8 isc = (int_word & 0x38000000) >> 27;
79
80         return (0x80 >> isc) << 24;
81 }
82
83 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
84                                       struct kvm_s390_interrupt_info *inti)
85 {
86         switch (inti->type) {
87         case KVM_S390_INT_EXTERNAL_CALL:
88                 if (psw_extint_disabled(vcpu))
89                         return 0;
90                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
91                         return 1;
92                 return 0;
93         case KVM_S390_INT_EMERGENCY:
94                 if (psw_extint_disabled(vcpu))
95                         return 0;
96                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
97                         return 1;
98                 return 0;
99         case KVM_S390_INT_CLOCK_COMP:
100                 return ckc_interrupts_enabled(vcpu);
101         case KVM_S390_INT_CPU_TIMER:
102                 if (psw_extint_disabled(vcpu))
103                         return 0;
104                 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
105                         return 1;
106                 return 0;
107         case KVM_S390_INT_SERVICE:
108         case KVM_S390_INT_PFAULT_INIT:
109         case KVM_S390_INT_PFAULT_DONE:
110         case KVM_S390_INT_VIRTIO:
111                 if (psw_extint_disabled(vcpu))
112                         return 0;
113                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
114                         return 1;
115                 return 0;
116         case KVM_S390_PROGRAM_INT:
117         case KVM_S390_SIGP_STOP:
118         case KVM_S390_SIGP_SET_PREFIX:
119         case KVM_S390_RESTART:
120                 return 1;
121         case KVM_S390_MCHK:
122                 if (psw_mchk_disabled(vcpu))
123                         return 0;
124                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
125                         return 1;
126                 return 0;
127         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
128                 if (psw_ioint_disabled(vcpu))
129                         return 0;
130                 if (vcpu->arch.sie_block->gcr[6] &
131                     int_word_to_isc_bits(inti->io.io_int_word))
132                         return 1;
133                 return 0;
134         default:
135                 printk(KERN_WARNING "illegal interrupt type %llx\n",
136                        inti->type);
137                 BUG();
138         }
139         return 0;
140 }
141
142 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
143 {
144         return vcpu->arch.local_int.pending_irqs;
145 }
146
147 static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
148 {
149         unsigned long active_mask = pending_local_irqs(vcpu);
150
151         if (psw_extint_disabled(vcpu))
152                 active_mask &= ~IRQ_PEND_EXT_MASK;
153         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
154                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
155         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
156                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
157         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
158                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
159         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
160                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
161         if (psw_mchk_disabled(vcpu))
162                 active_mask &= ~IRQ_PEND_MCHK_MASK;
163
164         /*
165          * STOP irqs will never be actively delivered. They are triggered via
166          * intercept requests and cleared when the stop intercept is performed.
167          */
168         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
169
170         return active_mask;
171 }
172
173 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
174 {
175         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
176         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
177 }
178
179 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
180 {
181         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
182         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
183 }
184
185 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
186 {
187         atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
188                           &vcpu->arch.sie_block->cpuflags);
189         vcpu->arch.sie_block->lctl = 0x0000;
190         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
191
192         if (guestdbg_enabled(vcpu)) {
193                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
194                                                LCTL_CR10 | LCTL_CR11);
195                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
196         }
197 }
198
199 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
200 {
201         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
202 }
203
204 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
205 {
206         if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
207                 return;
208         if (psw_extint_disabled(vcpu))
209                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
210         else
211                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
212 }
213
214 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
215 {
216         if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
217                 return;
218         if (psw_mchk_disabled(vcpu))
219                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
220         else
221                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
222 }
223
224 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
225 {
226         if (kvm_s390_is_stop_irq_pending(vcpu))
227                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
228 }
229
230 /* Set interception request for non-deliverable local interrupts */
231 static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
232 {
233         set_intercept_indicators_ext(vcpu);
234         set_intercept_indicators_mchk(vcpu);
235         set_intercept_indicators_stop(vcpu);
236 }
237
238 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
239                                       struct kvm_s390_interrupt_info *inti)
240 {
241         switch (inti->type) {
242         case KVM_S390_INT_SERVICE:
243         case KVM_S390_INT_PFAULT_DONE:
244         case KVM_S390_INT_VIRTIO:
245                 if (psw_extint_disabled(vcpu))
246                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
247                 else
248                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
249                 break;
250         case KVM_S390_MCHK:
251                 if (psw_mchk_disabled(vcpu))
252                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
253                 else
254                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
255                 break;
256         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
257                 if (psw_ioint_disabled(vcpu))
258                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
259                 else
260                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
261                 break;
262         default:
263                 BUG();
264         }
265 }
266
267 static u16 get_ilc(struct kvm_vcpu *vcpu)
268 {
269         switch (vcpu->arch.sie_block->icptcode) {
270         case ICPT_INST:
271         case ICPT_INSTPROGI:
272         case ICPT_OPEREXC:
273         case ICPT_PARTEXEC:
274         case ICPT_IOINST:
275                 /* last instruction only stored for these icptcodes */
276                 return insn_length(vcpu->arch.sie_block->ipa >> 8);
277         case ICPT_PROGI:
278                 return vcpu->arch.sie_block->pgmilc;
279         default:
280                 return 0;
281         }
282 }
283
284 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
285 {
286         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
287         int rc;
288
289         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
290                                          0, 0);
291
292         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
293                            (u16 *)__LC_EXT_INT_CODE);
294         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
295         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
296                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
297         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
298                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
299         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
300         return rc ? -EFAULT : 0;
301 }
302
303 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
304 {
305         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
306         int rc;
307
308         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
309                                          0, 0);
310
311         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
312                            (u16 __user *)__LC_EXT_INT_CODE);
313         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
314         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
315                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
316         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
317                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
318         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
319         return rc ? -EFAULT : 0;
320 }
321
322 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
323 {
324         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
325         struct kvm_s390_ext_info ext;
326         int rc;
327
328         spin_lock(&li->lock);
329         ext = li->irq.ext;
330         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
331         li->irq.ext.ext_params2 = 0;
332         spin_unlock(&li->lock);
333
334         VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
335                    0, ext.ext_params2);
336         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
337                                          KVM_S390_INT_PFAULT_INIT,
338                                          0, ext.ext_params2);
339
340         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
341         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
342         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
343                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
344         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
345                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
346         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
347         return rc ? -EFAULT : 0;
348 }
349
350 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
351 {
352         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
353         struct kvm_s390_mchk_info mchk;
354         unsigned long adtl_status_addr;
355         int rc;
356
357         spin_lock(&li->lock);
358         mchk = li->irq.mchk;
359         /*
360          * If there was an exigent machine check pending, then any repressible
361          * machine checks that might have been pending are indicated along
362          * with it, so always clear both bits
363          */
364         clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
365         clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
366         memset(&li->irq.mchk, 0, sizeof(mchk));
367         spin_unlock(&li->lock);
368
369         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
370                    mchk.mcic);
371         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
372                                          mchk.cr14, mchk.mcic);
373
374         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
375         rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
376                             &adtl_status_addr, sizeof(unsigned long));
377         rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr);
378         rc |= put_guest_lc(vcpu, mchk.mcic,
379                            (u64 __user *) __LC_MCCK_CODE);
380         rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
381                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
382         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
383                              &mchk.fixed_logout, sizeof(mchk.fixed_logout));
384         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
385                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
386         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
387                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
388         return rc ? -EFAULT : 0;
389 }
390
391 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
392 {
393         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
394         int rc;
395
396         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
397         vcpu->stat.deliver_restart_signal++;
398         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
399
400         rc  = write_guest_lc(vcpu,
401                              offsetof(struct _lowcore, restart_old_psw),
402                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
403         rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
404                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
405         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
406         return rc ? -EFAULT : 0;
407 }
408
409 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
410 {
411         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
412         struct kvm_s390_prefix_info prefix;
413
414         spin_lock(&li->lock);
415         prefix = li->irq.prefix;
416         li->irq.prefix.address = 0;
417         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
418         spin_unlock(&li->lock);
419
420         VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
421         vcpu->stat.deliver_prefix_signal++;
422         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
423                                          KVM_S390_SIGP_SET_PREFIX,
424                                          prefix.address, 0);
425
426         kvm_s390_set_prefix(vcpu, prefix.address);
427         return 0;
428 }
429
430 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
431 {
432         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
433         int rc;
434         int cpu_addr;
435
436         spin_lock(&li->lock);
437         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
438         clear_bit(cpu_addr, li->sigp_emerg_pending);
439         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
440                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
441         spin_unlock(&li->lock);
442
443         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
444         vcpu->stat.deliver_emergency_signal++;
445         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
446                                          cpu_addr, 0);
447
448         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
449                            (u16 *)__LC_EXT_INT_CODE);
450         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
451         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
452                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
454                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
455         return rc ? -EFAULT : 0;
456 }
457
458 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
459 {
460         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
461         struct kvm_s390_extcall_info extcall;
462         int rc;
463
464         spin_lock(&li->lock);
465         extcall = li->irq.extcall;
466         li->irq.extcall.code = 0;
467         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
468         spin_unlock(&li->lock);
469
470         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
471         vcpu->stat.deliver_external_call++;
472         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
473                                          KVM_S390_INT_EXTERNAL_CALL,
474                                          extcall.code, 0);
475
476         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
477                            (u16 *)__LC_EXT_INT_CODE);
478         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
479         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
480                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
481         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
482                             sizeof(psw_t));
483         return rc ? -EFAULT : 0;
484 }
485
486 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
487 {
488         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
489         struct kvm_s390_pgm_info pgm_info;
490         int rc = 0, nullifying = false;
491         u16 ilc = get_ilc(vcpu);
492
493         spin_lock(&li->lock);
494         pgm_info = li->irq.pgm;
495         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
496         memset(&li->irq.pgm, 0, sizeof(pgm_info));
497         spin_unlock(&li->lock);
498
499         VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
500                    pgm_info.code, ilc);
501         vcpu->stat.deliver_program_int++;
502         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
503                                          pgm_info.code, 0);
504
505         switch (pgm_info.code & ~PGM_PER) {
506         case PGM_AFX_TRANSLATION:
507         case PGM_ASX_TRANSLATION:
508         case PGM_EX_TRANSLATION:
509         case PGM_LFX_TRANSLATION:
510         case PGM_LSTE_SEQUENCE:
511         case PGM_LSX_TRANSLATION:
512         case PGM_LX_TRANSLATION:
513         case PGM_PRIMARY_AUTHORITY:
514         case PGM_SECONDARY_AUTHORITY:
515                 nullifying = true;
516                 /* fall through */
517         case PGM_SPACE_SWITCH:
518                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
519                                   (u64 *)__LC_TRANS_EXC_CODE);
520                 break;
521         case PGM_ALEN_TRANSLATION:
522         case PGM_ALE_SEQUENCE:
523         case PGM_ASTE_INSTANCE:
524         case PGM_ASTE_SEQUENCE:
525         case PGM_ASTE_VALIDITY:
526         case PGM_EXTENDED_AUTHORITY:
527                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
528                                   (u8 *)__LC_EXC_ACCESS_ID);
529                 nullifying = true;
530                 break;
531         case PGM_ASCE_TYPE:
532         case PGM_PAGE_TRANSLATION:
533         case PGM_REGION_FIRST_TRANS:
534         case PGM_REGION_SECOND_TRANS:
535         case PGM_REGION_THIRD_TRANS:
536         case PGM_SEGMENT_TRANSLATION:
537                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
538                                   (u64 *)__LC_TRANS_EXC_CODE);
539                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
540                                    (u8 *)__LC_EXC_ACCESS_ID);
541                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
542                                    (u8 *)__LC_OP_ACCESS_ID);
543                 nullifying = true;
544                 break;
545         case PGM_MONITOR:
546                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
547                                   (u16 *)__LC_MON_CLASS_NR);
548                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
549                                    (u64 *)__LC_MON_CODE);
550                 break;
551         case PGM_VECTOR_PROCESSING:
552         case PGM_DATA:
553                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
554                                   (u32 *)__LC_DATA_EXC_CODE);
555                 break;
556         case PGM_PROTECTION:
557                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
558                                   (u64 *)__LC_TRANS_EXC_CODE);
559                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
560                                    (u8 *)__LC_EXC_ACCESS_ID);
561                 break;
562         case PGM_STACK_FULL:
563         case PGM_STACK_EMPTY:
564         case PGM_STACK_SPECIFICATION:
565         case PGM_STACK_TYPE:
566         case PGM_STACK_OPERATION:
567         case PGM_TRACE_TABEL:
568         case PGM_CRYPTO_OPERATION:
569                 nullifying = true;
570                 break;
571         }
572
573         if (pgm_info.code & PGM_PER) {
574                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
575                                    (u8 *) __LC_PER_CODE);
576                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
577                                    (u8 *)__LC_PER_ATMID);
578                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
579                                    (u64 *) __LC_PER_ADDRESS);
580                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
581                                    (u8 *) __LC_PER_ACCESS_ID);
582         }
583
584         if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
585                 kvm_s390_rewind_psw(vcpu, ilc);
586
587         rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
588         rc |= put_guest_lc(vcpu, pgm_info.code,
589                            (u16 *)__LC_PGM_INT_CODE);
590         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
591                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
592         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
593                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
594         return rc ? -EFAULT : 0;
595 }
596
597 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
598                                           struct kvm_s390_interrupt_info *inti)
599 {
600         int rc;
601
602         VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
603                    inti->ext.ext_params);
604         vcpu->stat.deliver_service_signal++;
605         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
606                                          inti->ext.ext_params, 0);
607
608         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
609         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
610         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
611                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
612         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
613                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
614         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
615                            (u32 *)__LC_EXT_PARAMS);
616         return rc ? -EFAULT : 0;
617 }
618
619 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
620                                            struct kvm_s390_interrupt_info *inti)
621 {
622         int rc;
623
624         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
625                                          KVM_S390_INT_PFAULT_DONE, 0,
626                                          inti->ext.ext_params2);
627
628         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
629         rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
630         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
631                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
632         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
633                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
634         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
635                            (u64 *)__LC_EXT_PARAMS2);
636         return rc ? -EFAULT : 0;
637 }
638
639 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
640                                          struct kvm_s390_interrupt_info *inti)
641 {
642         int rc;
643
644         VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
645                    inti->ext.ext_params, inti->ext.ext_params2);
646         vcpu->stat.deliver_virtio_interrupt++;
647         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
648                                          inti->ext.ext_params,
649                                          inti->ext.ext_params2);
650
651         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
652         rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
653         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
654                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
655         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
656                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
657         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
658                            (u32 *)__LC_EXT_PARAMS);
659         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
660                            (u64 *)__LC_EXT_PARAMS2);
661         return rc ? -EFAULT : 0;
662 }
663
664 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
665                                      struct kvm_s390_interrupt_info *inti)
666 {
667         int rc;
668
669         VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
670         vcpu->stat.deliver_io_int++;
671         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
672                                          ((__u32)inti->io.subchannel_id << 16) |
673                                                 inti->io.subchannel_nr,
674                                          ((__u64)inti->io.io_int_parm << 32) |
675                                                 inti->io.io_int_word);
676
677         rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
678                            (u16 *)__LC_SUBCHANNEL_ID);
679         rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
680                            (u16 *)__LC_SUBCHANNEL_NR);
681         rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
682                            (u32 *)__LC_IO_INT_PARM);
683         rc |= put_guest_lc(vcpu, inti->io.io_int_word,
684                            (u32 *)__LC_IO_INT_WORD);
685         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
686                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
687         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
688                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
689         return rc ? -EFAULT : 0;
690 }
691
692 static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
693                                            struct kvm_s390_interrupt_info *inti)
694 {
695         struct kvm_s390_mchk_info *mchk = &inti->mchk;
696         int rc;
697
698         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
699                    mchk->mcic);
700         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
701                                          mchk->cr14, mchk->mcic);
702
703         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
704         rc |= put_guest_lc(vcpu, mchk->mcic,
705                         (u64 __user *) __LC_MCCK_CODE);
706         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
707                         (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
708         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
709                              &mchk->fixed_logout, sizeof(mchk->fixed_logout));
710         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
711                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
712         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
713                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
714         return rc ? -EFAULT : 0;
715 }
716
717 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
718
719 static const deliver_irq_t deliver_irq_funcs[] = {
720         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
721         [IRQ_PEND_PROG]           = __deliver_prog,
722         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
723         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
724         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
725         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
726         [IRQ_PEND_RESTART]        = __deliver_restart,
727         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
728         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
729 };
730
731 static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
732                                            struct kvm_s390_interrupt_info *inti)
733 {
734         int rc;
735
736         switch (inti->type) {
737         case KVM_S390_INT_SERVICE:
738                 rc = __deliver_service(vcpu, inti);
739                 break;
740         case KVM_S390_INT_PFAULT_DONE:
741                 rc = __deliver_pfault_done(vcpu, inti);
742                 break;
743         case KVM_S390_INT_VIRTIO:
744                 rc = __deliver_virtio(vcpu, inti);
745                 break;
746         case KVM_S390_MCHK:
747                 rc = __deliver_mchk_floating(vcpu, inti);
748                 break;
749         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
750                 rc = __deliver_io(vcpu, inti);
751                 break;
752         default:
753                 BUG();
754         }
755
756         return rc;
757 }
758
759 /* Check whether an external call is pending (deliverable or not) */
760 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
761 {
762         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
763         uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
764
765         if (!sclp_has_sigpif())
766                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
767
768         return (sigp_ctrl & SIGP_CTRL_C) &&
769                (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
770 }
771
772 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
773 {
774         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
775         struct kvm_s390_interrupt_info  *inti;
776         int rc;
777
778         rc = !!deliverable_local_irqs(vcpu);
779
780         if ((!rc) && atomic_read(&fi->active)) {
781                 spin_lock(&fi->lock);
782                 list_for_each_entry(inti, &fi->list, list)
783                         if (__interrupt_is_deliverable(vcpu, inti)) {
784                                 rc = 1;
785                                 break;
786                         }
787                 spin_unlock(&fi->lock);
788         }
789
790         if (!rc && kvm_cpu_has_pending_timer(vcpu))
791                 rc = 1;
792
793         /* external call pending and deliverable */
794         if (!rc && kvm_s390_ext_call_pending(vcpu) &&
795             !psw_extint_disabled(vcpu) &&
796             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
797                 rc = 1;
798
799         if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
800                 rc = 1;
801
802         return rc;
803 }
804
805 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
806 {
807         if (!(vcpu->arch.sie_block->ckc <
808               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
809                 return 0;
810         if (!ckc_interrupts_enabled(vcpu))
811                 return 0;
812         return 1;
813 }
814
815 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
816 {
817         u64 now, sltime;
818
819         vcpu->stat.exit_wait_state++;
820
821         /* fast path */
822         if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
823                 return 0;
824
825         if (psw_interrupts_disabled(vcpu)) {
826                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
827                 return -EOPNOTSUPP; /* disabled wait */
828         }
829
830         if (!ckc_interrupts_enabled(vcpu)) {
831                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
832                 __set_cpu_idle(vcpu);
833                 goto no_timer;
834         }
835
836         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
837         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
838
839         /* underflow */
840         if (vcpu->arch.sie_block->ckc < now)
841                 return 0;
842
843         __set_cpu_idle(vcpu);
844         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
845         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
846 no_timer:
847         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
848         kvm_vcpu_block(vcpu);
849         __unset_cpu_idle(vcpu);
850         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
851
852         hrtimer_cancel(&vcpu->arch.ckc_timer);
853         return 0;
854 }
855
856 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
857 {
858         if (waitqueue_active(&vcpu->wq)) {
859                 /*
860                  * The vcpu gave up the cpu voluntarily, mark it as a good
861                  * yield-candidate.
862                  */
863                 vcpu->preempted = true;
864                 wake_up_interruptible(&vcpu->wq);
865                 vcpu->stat.halt_wakeup++;
866         }
867 }
868
869 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
870 {
871         struct kvm_vcpu *vcpu;
872         u64 now, sltime;
873
874         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
875         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
876         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
877
878         /*
879          * If the monotonic clock runs faster than the tod clock we might be
880          * woken up too early and have to go back to sleep to avoid deadlocks.
881          */
882         if (vcpu->arch.sie_block->ckc > now &&
883             hrtimer_forward_now(timer, ns_to_ktime(sltime)))
884                 return HRTIMER_RESTART;
885         kvm_s390_vcpu_wakeup(vcpu);
886         return HRTIMER_NORESTART;
887 }
888
889 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
890 {
891         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
892
893         spin_lock(&li->lock);
894         li->pending_irqs = 0;
895         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
896         memset(&li->irq, 0, sizeof(li->irq));
897         spin_unlock(&li->lock);
898
899         /* clear pending external calls set by sigp interpretation facility */
900         atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
901         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
902 }
903
904 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
905 {
906         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
907         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
908         struct kvm_s390_interrupt_info  *n, *inti = NULL;
909         deliver_irq_t func;
910         int deliver;
911         int rc = 0;
912         unsigned long irq_type;
913         unsigned long deliverable_irqs;
914
915         __reset_intercept_indicators(vcpu);
916
917         /* pending ckc conditions might have been invalidated */
918         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
919         if (kvm_cpu_has_pending_timer(vcpu))
920                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
921
922         do {
923                 deliverable_irqs = deliverable_local_irqs(vcpu);
924                 /* bits are in the order of interrupt priority */
925                 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
926                 if (irq_type == IRQ_PEND_COUNT)
927                         break;
928                 func = deliver_irq_funcs[irq_type];
929                 if (!func) {
930                         WARN_ON_ONCE(func == NULL);
931                         clear_bit(irq_type, &li->pending_irqs);
932                         continue;
933                 }
934                 rc = func(vcpu);
935         } while (!rc && irq_type != IRQ_PEND_COUNT);
936
937         set_intercept_indicators_local(vcpu);
938
939         if (!rc && atomic_read(&fi->active)) {
940                 do {
941                         deliver = 0;
942                         spin_lock(&fi->lock);
943                         list_for_each_entry_safe(inti, n, &fi->list, list) {
944                                 if (__interrupt_is_deliverable(vcpu, inti)) {
945                                         list_del(&inti->list);
946                                         fi->irq_count--;
947                                         deliver = 1;
948                                         break;
949                                 }
950                                 __set_intercept_indicator(vcpu, inti);
951                         }
952                         if (list_empty(&fi->list))
953                                 atomic_set(&fi->active, 0);
954                         spin_unlock(&fi->lock);
955                         if (deliver) {
956                                 rc = __deliver_floating_interrupt(vcpu, inti);
957                                 kfree(inti);
958                         }
959                 } while (!rc && deliver);
960         }
961
962         return rc;
963 }
964
965 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
966 {
967         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
968
969         li->irq.pgm = irq->u.pgm;
970         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
971         return 0;
972 }
973
974 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
975 {
976         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
977         struct kvm_s390_irq irq;
978
979         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
980         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
981                                    0, 1);
982         spin_lock(&li->lock);
983         irq.u.pgm.code = code;
984         __inject_prog(vcpu, &irq);
985         BUG_ON(waitqueue_active(li->wq));
986         spin_unlock(&li->lock);
987         return 0;
988 }
989
990 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
991                              struct kvm_s390_pgm_info *pgm_info)
992 {
993         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
994         struct kvm_s390_irq irq;
995         int rc;
996
997         VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
998                    pgm_info->code);
999         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1000                                    pgm_info->code, 0, 1);
1001         spin_lock(&li->lock);
1002         irq.u.pgm = *pgm_info;
1003         rc = __inject_prog(vcpu, &irq);
1004         BUG_ON(waitqueue_active(li->wq));
1005         spin_unlock(&li->lock);
1006         return rc;
1007 }
1008
1009 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1010 {
1011         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1012
1013         VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
1014                    irq->u.ext.ext_params, irq->u.ext.ext_params2);
1015         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1016                                    irq->u.ext.ext_params,
1017                                    irq->u.ext.ext_params2, 2);
1018
1019         li->irq.ext = irq->u.ext;
1020         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1021         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1022         return 0;
1023 }
1024
1025 static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1026 {
1027         unsigned char new_val, old_val;
1028         uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1029
1030         new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1031         old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1032         if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1033                 /* another external call is pending */
1034                 return -EBUSY;
1035         }
1036         atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1037         return 0;
1038 }
1039
1040 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1041 {
1042         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1043         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1044         uint16_t src_id = irq->u.extcall.code;
1045
1046         VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
1047                    src_id);
1048         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1049                                    src_id, 0, 2);
1050
1051         /* sending vcpu invalid */
1052         if (src_id >= KVM_MAX_VCPUS ||
1053             kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1054                 return -EINVAL;
1055
1056         if (sclp_has_sigpif())
1057                 return __inject_extcall_sigpif(vcpu, src_id);
1058
1059         if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1060                 return -EBUSY;
1061         *extcall = irq->u.extcall;
1062         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1063         return 0;
1064 }
1065
1066 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1067 {
1068         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1069         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1070
1071         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1072                    irq->u.prefix.address);
1073         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1074                                    irq->u.prefix.address, 0, 2);
1075
1076         if (!is_vcpu_stopped(vcpu))
1077                 return -EBUSY;
1078
1079         *prefix = irq->u.prefix;
1080         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1081         return 0;
1082 }
1083
1084 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1085 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1086 {
1087         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1088         struct kvm_s390_stop_info *stop = &li->irq.stop;
1089         int rc = 0;
1090
1091         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1092
1093         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1094                 return -EINVAL;
1095
1096         if (is_vcpu_stopped(vcpu)) {
1097                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1098                         rc = kvm_s390_store_status_unloaded(vcpu,
1099                                                 KVM_S390_STORE_STATUS_NOADDR);
1100                 return rc;
1101         }
1102
1103         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1104                 return -EBUSY;
1105         stop->flags = irq->u.stop.flags;
1106         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1107         return 0;
1108 }
1109
1110 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1111                                  struct kvm_s390_irq *irq)
1112 {
1113         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1114
1115         VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1116         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1117
1118         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1119         return 0;
1120 }
1121
1122 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1123                                    struct kvm_s390_irq *irq)
1124 {
1125         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1126
1127         VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1128                    irq->u.emerg.code);
1129         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1130                                    irq->u.emerg.code, 0, 2);
1131
1132         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1133         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1134         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1135         return 0;
1136 }
1137
1138 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1139 {
1140         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1141         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1142
1143         VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1144                    irq->u.mchk.mcic);
1145         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1146                                    irq->u.mchk.mcic, 2);
1147
1148         /*
1149          * Because repressible machine checks can be indicated along with
1150          * exigent machine checks (PoP, Chapter 11, Interruption action)
1151          * we need to combine cr14, mcic and external damage code.
1152          * Failing storage address and the logout area should not be or'ed
1153          * together, we just indicate the last occurrence of the corresponding
1154          * machine check
1155          */
1156         mchk->cr14 |= irq->u.mchk.cr14;
1157         mchk->mcic |= irq->u.mchk.mcic;
1158         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1159         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1160         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1161                sizeof(mchk->fixed_logout));
1162         if (mchk->mcic & MCHK_EX_MASK)
1163                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1164         else if (mchk->mcic & MCHK_REP_MASK)
1165                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1166         return 0;
1167 }
1168
1169 static int __inject_ckc(struct kvm_vcpu *vcpu)
1170 {
1171         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1172
1173         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1174         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1175                                    0, 0, 2);
1176
1177         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1178         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1179         return 0;
1180 }
1181
1182 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1183 {
1184         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1185
1186         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1187         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1188                                    0, 0, 2);
1189
1190         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1191         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1192         return 0;
1193 }
1194
1195
1196 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1197                                                     u64 cr6, u64 schid)
1198 {
1199         struct kvm_s390_float_interrupt *fi;
1200         struct kvm_s390_interrupt_info *inti, *iter;
1201
1202         if ((!schid && !cr6) || (schid && cr6))
1203                 return NULL;
1204         fi = &kvm->arch.float_int;
1205         spin_lock(&fi->lock);
1206         inti = NULL;
1207         list_for_each_entry(iter, &fi->list, list) {
1208                 if (!is_ioint(iter->type))
1209                         continue;
1210                 if (cr6 &&
1211                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1212                         continue;
1213                 if (schid) {
1214                         if (((schid & 0x00000000ffff0000) >> 16) !=
1215                             iter->io.subchannel_id)
1216                                 continue;
1217                         if ((schid & 0x000000000000ffff) !=
1218                             iter->io.subchannel_nr)
1219                                 continue;
1220                 }
1221                 inti = iter;
1222                 break;
1223         }
1224         if (inti) {
1225                 list_del_init(&inti->list);
1226                 fi->irq_count--;
1227         }
1228         if (list_empty(&fi->list))
1229                 atomic_set(&fi->active, 0);
1230         spin_unlock(&fi->lock);
1231         return inti;
1232 }
1233
1234 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1235 {
1236         struct kvm_s390_local_interrupt *li;
1237         struct kvm_s390_float_interrupt *fi;
1238         struct kvm_s390_interrupt_info *iter;
1239         struct kvm_vcpu *dst_vcpu = NULL;
1240         int sigcpu;
1241         int rc = 0;
1242
1243         fi = &kvm->arch.float_int;
1244         spin_lock(&fi->lock);
1245         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1246                 rc = -EINVAL;
1247                 goto unlock_fi;
1248         }
1249         fi->irq_count++;
1250         if (!is_ioint(inti->type)) {
1251                 list_add_tail(&inti->list, &fi->list);
1252         } else {
1253                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1254
1255                 /* Keep I/O interrupts sorted in isc order. */
1256                 list_for_each_entry(iter, &fi->list, list) {
1257                         if (!is_ioint(iter->type))
1258                                 continue;
1259                         if (int_word_to_isc_bits(iter->io.io_int_word)
1260                             <= isc_bits)
1261                                 continue;
1262                         break;
1263                 }
1264                 list_add_tail(&inti->list, &iter->list);
1265         }
1266         atomic_set(&fi->active, 1);
1267         if (atomic_read(&kvm->online_vcpus) == 0)
1268                 goto unlock_fi;
1269         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1270         if (sigcpu == KVM_MAX_VCPUS) {
1271                 do {
1272                         sigcpu = fi->next_rr_cpu++;
1273                         if (sigcpu == KVM_MAX_VCPUS)
1274                                 sigcpu = fi->next_rr_cpu = 0;
1275                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1276         }
1277         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1278         li = &dst_vcpu->arch.local_int;
1279         spin_lock(&li->lock);
1280         switch (inti->type) {
1281         case KVM_S390_MCHK:
1282                 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1283                 break;
1284         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1285                 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1286                 break;
1287         default:
1288                 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1289                 break;
1290         }
1291         spin_unlock(&li->lock);
1292         kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1293 unlock_fi:
1294         spin_unlock(&fi->lock);
1295         return rc;
1296 }
1297
1298 int kvm_s390_inject_vm(struct kvm *kvm,
1299                        struct kvm_s390_interrupt *s390int)
1300 {
1301         struct kvm_s390_interrupt_info *inti;
1302         int rc;
1303
1304         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1305         if (!inti)
1306                 return -ENOMEM;
1307
1308         inti->type = s390int->type;
1309         switch (inti->type) {
1310         case KVM_S390_INT_VIRTIO:
1311                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1312                          s390int->parm, s390int->parm64);
1313                 inti->ext.ext_params = s390int->parm;
1314                 inti->ext.ext_params2 = s390int->parm64;
1315                 break;
1316         case KVM_S390_INT_SERVICE:
1317                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1318                 inti->ext.ext_params = s390int->parm;
1319                 break;
1320         case KVM_S390_INT_PFAULT_DONE:
1321                 inti->ext.ext_params2 = s390int->parm64;
1322                 break;
1323         case KVM_S390_MCHK:
1324                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1325                          s390int->parm64);
1326                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1327                 inti->mchk.mcic = s390int->parm64;
1328                 break;
1329         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1330                 if (inti->type & IOINT_AI_MASK)
1331                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1332                 else
1333                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1334                                  s390int->type & IOINT_CSSID_MASK,
1335                                  s390int->type & IOINT_SSID_MASK,
1336                                  s390int->type & IOINT_SCHID_MASK);
1337                 inti->io.subchannel_id = s390int->parm >> 16;
1338                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1339                 inti->io.io_int_parm = s390int->parm64 >> 32;
1340                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1341                 break;
1342         default:
1343                 kfree(inti);
1344                 return -EINVAL;
1345         }
1346         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1347                                  2);
1348
1349         rc = __inject_vm(kvm, inti);
1350         if (rc)
1351                 kfree(inti);
1352         return rc;
1353 }
1354
1355 int kvm_s390_reinject_io_int(struct kvm *kvm,
1356                               struct kvm_s390_interrupt_info *inti)
1357 {
1358         return __inject_vm(kvm, inti);
1359 }
1360
1361 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1362                        struct kvm_s390_irq *irq)
1363 {
1364         irq->type = s390int->type;
1365         switch (irq->type) {
1366         case KVM_S390_PROGRAM_INT:
1367                 if (s390int->parm & 0xffff0000)
1368                         return -EINVAL;
1369                 irq->u.pgm.code = s390int->parm;
1370                 break;
1371         case KVM_S390_SIGP_SET_PREFIX:
1372                 irq->u.prefix.address = s390int->parm;
1373                 break;
1374         case KVM_S390_SIGP_STOP:
1375                 irq->u.stop.flags = s390int->parm;
1376                 break;
1377         case KVM_S390_INT_EXTERNAL_CALL:
1378                 if (s390int->parm & 0xffff0000)
1379                         return -EINVAL;
1380                 irq->u.extcall.code = s390int->parm;
1381                 break;
1382         case KVM_S390_INT_EMERGENCY:
1383                 if (s390int->parm & 0xffff0000)
1384                         return -EINVAL;
1385                 irq->u.emerg.code = s390int->parm;
1386                 break;
1387         case KVM_S390_MCHK:
1388                 irq->u.mchk.mcic = s390int->parm64;
1389                 break;
1390         }
1391         return 0;
1392 }
1393
1394 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1395 {
1396         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1397
1398         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1399 }
1400
1401 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1402 {
1403         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1404
1405         spin_lock(&li->lock);
1406         li->irq.stop.flags = 0;
1407         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1408         spin_unlock(&li->lock);
1409 }
1410
1411 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1412 {
1413         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1414         int rc;
1415
1416         spin_lock(&li->lock);
1417         switch (irq->type) {
1418         case KVM_S390_PROGRAM_INT:
1419                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1420                            irq->u.pgm.code);
1421                 rc = __inject_prog(vcpu, irq);
1422                 break;
1423         case KVM_S390_SIGP_SET_PREFIX:
1424                 rc = __inject_set_prefix(vcpu, irq);
1425                 break;
1426         case KVM_S390_SIGP_STOP:
1427                 rc = __inject_sigp_stop(vcpu, irq);
1428                 break;
1429         case KVM_S390_RESTART:
1430                 rc = __inject_sigp_restart(vcpu, irq);
1431                 break;
1432         case KVM_S390_INT_CLOCK_COMP:
1433                 rc = __inject_ckc(vcpu);
1434                 break;
1435         case KVM_S390_INT_CPU_TIMER:
1436                 rc = __inject_cpu_timer(vcpu);
1437                 break;
1438         case KVM_S390_INT_EXTERNAL_CALL:
1439                 rc = __inject_extcall(vcpu, irq);
1440                 break;
1441         case KVM_S390_INT_EMERGENCY:
1442                 rc = __inject_sigp_emergency(vcpu, irq);
1443                 break;
1444         case KVM_S390_MCHK:
1445                 rc = __inject_mchk(vcpu, irq);
1446                 break;
1447         case KVM_S390_INT_PFAULT_INIT:
1448                 rc = __inject_pfault_init(vcpu, irq);
1449                 break;
1450         case KVM_S390_INT_VIRTIO:
1451         case KVM_S390_INT_SERVICE:
1452         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1453         default:
1454                 rc = -EINVAL;
1455         }
1456         spin_unlock(&li->lock);
1457         if (!rc)
1458                 kvm_s390_vcpu_wakeup(vcpu);
1459         return rc;
1460 }
1461
1462 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1463 {
1464         struct kvm_s390_float_interrupt *fi;
1465         struct kvm_s390_interrupt_info  *n, *inti = NULL;
1466
1467         fi = &kvm->arch.float_int;
1468         spin_lock(&fi->lock);
1469         list_for_each_entry_safe(inti, n, &fi->list, list) {
1470                 list_del(&inti->list);
1471                 kfree(inti);
1472         }
1473         fi->irq_count = 0;
1474         atomic_set(&fi->active, 0);
1475         spin_unlock(&fi->lock);
1476 }
1477
1478 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1479                                    u8 *addr)
1480 {
1481         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1482         struct kvm_s390_irq irq = {0};
1483
1484         irq.type = inti->type;
1485         switch (inti->type) {
1486         case KVM_S390_INT_PFAULT_INIT:
1487         case KVM_S390_INT_PFAULT_DONE:
1488         case KVM_S390_INT_VIRTIO:
1489         case KVM_S390_INT_SERVICE:
1490                 irq.u.ext = inti->ext;
1491                 break;
1492         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1493                 irq.u.io = inti->io;
1494                 break;
1495         case KVM_S390_MCHK:
1496                 irq.u.mchk = inti->mchk;
1497                 break;
1498         default:
1499                 return -EINVAL;
1500         }
1501
1502         if (copy_to_user(uptr, &irq, sizeof(irq)))
1503                 return -EFAULT;
1504
1505         return 0;
1506 }
1507
1508 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1509 {
1510         struct kvm_s390_interrupt_info *inti;
1511         struct kvm_s390_float_interrupt *fi;
1512         int ret = 0;
1513         int n = 0;
1514
1515         fi = &kvm->arch.float_int;
1516         spin_lock(&fi->lock);
1517
1518         list_for_each_entry(inti, &fi->list, list) {
1519                 if (len < sizeof(struct kvm_s390_irq)) {
1520                         /* signal userspace to try again */
1521                         ret = -ENOMEM;
1522                         break;
1523                 }
1524                 ret = copy_irq_to_user(inti, buf);
1525                 if (ret)
1526                         break;
1527                 buf += sizeof(struct kvm_s390_irq);
1528                 len -= sizeof(struct kvm_s390_irq);
1529                 n++;
1530         }
1531
1532         spin_unlock(&fi->lock);
1533
1534         return ret < 0 ? ret : n;
1535 }
1536
1537 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1538 {
1539         int r;
1540
1541         switch (attr->group) {
1542         case KVM_DEV_FLIC_GET_ALL_IRQS:
1543                 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1544                                           attr->attr);
1545                 break;
1546         default:
1547                 r = -EINVAL;
1548         }
1549
1550         return r;
1551 }
1552
1553 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1554                                      u64 addr)
1555 {
1556         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1557         void *target = NULL;
1558         void __user *source;
1559         u64 size;
1560
1561         if (get_user(inti->type, (u64 __user *)addr))
1562                 return -EFAULT;
1563
1564         switch (inti->type) {
1565         case KVM_S390_INT_PFAULT_INIT:
1566         case KVM_S390_INT_PFAULT_DONE:
1567         case KVM_S390_INT_VIRTIO:
1568         case KVM_S390_INT_SERVICE:
1569                 target = (void *) &inti->ext;
1570                 source = &uptr->u.ext;
1571                 size = sizeof(inti->ext);
1572                 break;
1573         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1574                 target = (void *) &inti->io;
1575                 source = &uptr->u.io;
1576                 size = sizeof(inti->io);
1577                 break;
1578         case KVM_S390_MCHK:
1579                 target = (void *) &inti->mchk;
1580                 source = &uptr->u.mchk;
1581                 size = sizeof(inti->mchk);
1582                 break;
1583         default:
1584                 return -EINVAL;
1585         }
1586
1587         if (copy_from_user(target, source, size))
1588                 return -EFAULT;
1589
1590         return 0;
1591 }
1592
1593 static int enqueue_floating_irq(struct kvm_device *dev,
1594                                 struct kvm_device_attr *attr)
1595 {
1596         struct kvm_s390_interrupt_info *inti = NULL;
1597         int r = 0;
1598         int len = attr->attr;
1599
1600         if (len % sizeof(struct kvm_s390_irq) != 0)
1601                 return -EINVAL;
1602         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1603                 return -EINVAL;
1604
1605         while (len >= sizeof(struct kvm_s390_irq)) {
1606                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1607                 if (!inti)
1608                         return -ENOMEM;
1609
1610                 r = copy_irq_from_user(inti, attr->addr);
1611                 if (r) {
1612                         kfree(inti);
1613                         return r;
1614                 }
1615                 r = __inject_vm(dev->kvm, inti);
1616                 if (r) {
1617                         kfree(inti);
1618                         return r;
1619                 }
1620                 len -= sizeof(struct kvm_s390_irq);
1621                 attr->addr += sizeof(struct kvm_s390_irq);
1622         }
1623
1624         return r;
1625 }
1626
1627 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1628 {
1629         if (id >= MAX_S390_IO_ADAPTERS)
1630                 return NULL;
1631         return kvm->arch.adapters[id];
1632 }
1633
1634 static int register_io_adapter(struct kvm_device *dev,
1635                                struct kvm_device_attr *attr)
1636 {
1637         struct s390_io_adapter *adapter;
1638         struct kvm_s390_io_adapter adapter_info;
1639
1640         if (copy_from_user(&adapter_info,
1641                            (void __user *)attr->addr, sizeof(adapter_info)))
1642                 return -EFAULT;
1643
1644         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1645             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1646                 return -EINVAL;
1647
1648         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1649         if (!adapter)
1650                 return -ENOMEM;
1651
1652         INIT_LIST_HEAD(&adapter->maps);
1653         init_rwsem(&adapter->maps_lock);
1654         atomic_set(&adapter->nr_maps, 0);
1655         adapter->id = adapter_info.id;
1656         adapter->isc = adapter_info.isc;
1657         adapter->maskable = adapter_info.maskable;
1658         adapter->masked = false;
1659         adapter->swap = adapter_info.swap;
1660         dev->kvm->arch.adapters[adapter->id] = adapter;
1661
1662         return 0;
1663 }
1664
1665 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1666 {
1667         int ret;
1668         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1669
1670         if (!adapter || !adapter->maskable)
1671                 return -EINVAL;
1672         ret = adapter->masked;
1673         adapter->masked = masked;
1674         return ret;
1675 }
1676
1677 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1678 {
1679         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1680         struct s390_map_info *map;
1681         int ret;
1682
1683         if (!adapter || !addr)
1684                 return -EINVAL;
1685
1686         map = kzalloc(sizeof(*map), GFP_KERNEL);
1687         if (!map) {
1688                 ret = -ENOMEM;
1689                 goto out;
1690         }
1691         INIT_LIST_HEAD(&map->list);
1692         map->guest_addr = addr;
1693         map->addr = gmap_translate(kvm->arch.gmap, addr);
1694         if (map->addr == -EFAULT) {
1695                 ret = -EFAULT;
1696                 goto out;
1697         }
1698         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1699         if (ret < 0)
1700                 goto out;
1701         BUG_ON(ret != 1);
1702         down_write(&adapter->maps_lock);
1703         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1704                 list_add_tail(&map->list, &adapter->maps);
1705                 ret = 0;
1706         } else {
1707                 put_page(map->page);
1708                 ret = -EINVAL;
1709         }
1710         up_write(&adapter->maps_lock);
1711 out:
1712         if (ret)
1713                 kfree(map);
1714         return ret;
1715 }
1716
1717 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1718 {
1719         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1720         struct s390_map_info *map, *tmp;
1721         int found = 0;
1722
1723         if (!adapter || !addr)
1724                 return -EINVAL;
1725
1726         down_write(&adapter->maps_lock);
1727         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1728                 if (map->guest_addr == addr) {
1729                         found = 1;
1730                         atomic_dec(&adapter->nr_maps);
1731                         list_del(&map->list);
1732                         put_page(map->page);
1733                         kfree(map);
1734                         break;
1735                 }
1736         }
1737         up_write(&adapter->maps_lock);
1738
1739         return found ? 0 : -EINVAL;
1740 }
1741
1742 void kvm_s390_destroy_adapters(struct kvm *kvm)
1743 {
1744         int i;
1745         struct s390_map_info *map, *tmp;
1746
1747         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1748                 if (!kvm->arch.adapters[i])
1749                         continue;
1750                 list_for_each_entry_safe(map, tmp,
1751                                          &kvm->arch.adapters[i]->maps, list) {
1752                         list_del(&map->list);
1753                         put_page(map->page);
1754                         kfree(map);
1755                 }
1756                 kfree(kvm->arch.adapters[i]);
1757         }
1758 }
1759
1760 static int modify_io_adapter(struct kvm_device *dev,
1761                              struct kvm_device_attr *attr)
1762 {
1763         struct kvm_s390_io_adapter_req req;
1764         struct s390_io_adapter *adapter;
1765         int ret;
1766
1767         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1768                 return -EFAULT;
1769
1770         adapter = get_io_adapter(dev->kvm, req.id);
1771         if (!adapter)
1772                 return -EINVAL;
1773         switch (req.type) {
1774         case KVM_S390_IO_ADAPTER_MASK:
1775                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1776                 if (ret > 0)
1777                         ret = 0;
1778                 break;
1779         case KVM_S390_IO_ADAPTER_MAP:
1780                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1781                 break;
1782         case KVM_S390_IO_ADAPTER_UNMAP:
1783                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1784                 break;
1785         default:
1786                 ret = -EINVAL;
1787         }
1788
1789         return ret;
1790 }
1791
1792 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1793 {
1794         int r = 0;
1795         unsigned int i;
1796         struct kvm_vcpu *vcpu;
1797
1798         switch (attr->group) {
1799         case KVM_DEV_FLIC_ENQUEUE:
1800                 r = enqueue_floating_irq(dev, attr);
1801                 break;
1802         case KVM_DEV_FLIC_CLEAR_IRQS:
1803                 kvm_s390_clear_float_irqs(dev->kvm);
1804                 break;
1805         case KVM_DEV_FLIC_APF_ENABLE:
1806                 dev->kvm->arch.gmap->pfault_enabled = 1;
1807                 break;
1808         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1809                 dev->kvm->arch.gmap->pfault_enabled = 0;
1810                 /*
1811                  * Make sure no async faults are in transition when
1812                  * clearing the queues. So we don't need to worry
1813                  * about late coming workers.
1814                  */
1815                 synchronize_srcu(&dev->kvm->srcu);
1816                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1817                         kvm_clear_async_pf_completion_queue(vcpu);
1818                 break;
1819         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1820                 r = register_io_adapter(dev, attr);
1821                 break;
1822         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1823                 r = modify_io_adapter(dev, attr);
1824                 break;
1825         default:
1826                 r = -EINVAL;
1827         }
1828
1829         return r;
1830 }
1831
1832 static int flic_create(struct kvm_device *dev, u32 type)
1833 {
1834         if (!dev)
1835                 return -EINVAL;
1836         if (dev->kvm->arch.flic)
1837                 return -EINVAL;
1838         dev->kvm->arch.flic = dev;
1839         return 0;
1840 }
1841
1842 static void flic_destroy(struct kvm_device *dev)
1843 {
1844         dev->kvm->arch.flic = NULL;
1845         kfree(dev);
1846 }
1847
1848 /* s390 floating irq controller (flic) */
1849 struct kvm_device_ops kvm_flic_ops = {
1850         .name = "kvm-flic",
1851         .get_attr = flic_get_attr,
1852         .set_attr = flic_set_attr,
1853         .create = flic_create,
1854         .destroy = flic_destroy,
1855 };
1856
1857 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1858 {
1859         unsigned long bit;
1860
1861         bit = bit_nr + (addr % PAGE_SIZE) * 8;
1862
1863         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1864 }
1865
1866 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1867                                           u64 addr)
1868 {
1869         struct s390_map_info *map;
1870
1871         if (!adapter)
1872                 return NULL;
1873
1874         list_for_each_entry(map, &adapter->maps, list) {
1875                 if (map->guest_addr == addr)
1876                         return map;
1877         }
1878         return NULL;
1879 }
1880
1881 static int adapter_indicators_set(struct kvm *kvm,
1882                                   struct s390_io_adapter *adapter,
1883                                   struct kvm_s390_adapter_int *adapter_int)
1884 {
1885         unsigned long bit;
1886         int summary_set, idx;
1887         struct s390_map_info *info;
1888         void *map;
1889
1890         info = get_map_info(adapter, adapter_int->ind_addr);
1891         if (!info)
1892                 return -1;
1893         map = page_address(info->page);
1894         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1895         set_bit(bit, map);
1896         idx = srcu_read_lock(&kvm->srcu);
1897         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1898         set_page_dirty_lock(info->page);
1899         info = get_map_info(adapter, adapter_int->summary_addr);
1900         if (!info) {
1901                 srcu_read_unlock(&kvm->srcu, idx);
1902                 return -1;
1903         }
1904         map = page_address(info->page);
1905         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1906                           adapter->swap);
1907         summary_set = test_and_set_bit(bit, map);
1908         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1909         set_page_dirty_lock(info->page);
1910         srcu_read_unlock(&kvm->srcu, idx);
1911         return summary_set ? 0 : 1;
1912 }
1913
1914 /*
1915  * < 0 - not injected due to error
1916  * = 0 - coalesced, summary indicator already active
1917  * > 0 - injected interrupt
1918  */
1919 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1920                            struct kvm *kvm, int irq_source_id, int level,
1921                            bool line_status)
1922 {
1923         int ret;
1924         struct s390_io_adapter *adapter;
1925
1926         /* We're only interested in the 0->1 transition. */
1927         if (!level)
1928                 return 0;
1929         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1930         if (!adapter)
1931                 return -1;
1932         down_read(&adapter->maps_lock);
1933         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1934         up_read(&adapter->maps_lock);
1935         if ((ret > 0) && !adapter->masked) {
1936                 struct kvm_s390_interrupt s390int = {
1937                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
1938                         .parm = 0,
1939                         .parm64 = (adapter->isc << 27) | 0x80000000,
1940                 };
1941                 ret = kvm_s390_inject_vm(kvm, &s390int);
1942                 if (ret == 0)
1943                         ret = 1;
1944         }
1945         return ret;
1946 }
1947
1948 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1949                           const struct kvm_irq_routing_entry *ue)
1950 {
1951         int ret;
1952
1953         switch (ue->type) {
1954         case KVM_IRQ_ROUTING_S390_ADAPTER:
1955                 e->set = set_adapter_int;
1956                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1957                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1958                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1959                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1960                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1961                 ret = 0;
1962                 break;
1963         default:
1964                 ret = -EINVAL;
1965         }
1966
1967         return ret;
1968 }
1969
1970 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1971                 int irq_source_id, int level, bool line_status)
1972 {
1973         return -EINVAL;
1974 }