KVM: s390: store the breaking-event address on pgm interrupts
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008, 2015
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/dis.h>
22 #include <asm/uaccess.h>
23 #include <asm/sclp.h>
24 #include "kvm-s390.h"
25 #include "gaccess.h"
26 #include "trace-s390.h"
27
28 #define IOINT_SCHID_MASK 0x0000ffff
29 #define IOINT_SSID_MASK 0x00030000
30 #define IOINT_CSSID_MASK 0x03fc0000
31 #define IOINT_AI_MASK 0x04000000
32 #define PFAULT_INIT 0x0600
33 #define PFAULT_DONE 0x0680
34 #define VIRTIO_PARAM 0x0d00
35
36 static int is_ioint(u64 type)
37 {
38         return ((type & 0xfffe0000u) != 0xfffe0000u);
39 }
40
41 int psw_extint_disabled(struct kvm_vcpu *vcpu)
42 {
43         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
44 }
45
46 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
47 {
48         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
49 }
50
51 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
52 {
53         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
54 }
55
56 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
57 {
58         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
59             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
60             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
61                 return 0;
62         return 1;
63 }
64
65 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
66 {
67         if (psw_extint_disabled(vcpu) ||
68             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
69                 return 0;
70         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
71                 /* No timer interrupts when single stepping */
72                 return 0;
73         return 1;
74 }
75
76 static u64 int_word_to_isc_bits(u32 int_word)
77 {
78         u8 isc = (int_word & 0x38000000) >> 27;
79
80         return (0x80 >> isc) << 24;
81 }
82
83 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
84                                       struct kvm_s390_interrupt_info *inti)
85 {
86         switch (inti->type) {
87         case KVM_S390_INT_EXTERNAL_CALL:
88                 if (psw_extint_disabled(vcpu))
89                         return 0;
90                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
91                         return 1;
92                 return 0;
93         case KVM_S390_INT_EMERGENCY:
94                 if (psw_extint_disabled(vcpu))
95                         return 0;
96                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
97                         return 1;
98                 return 0;
99         case KVM_S390_INT_CLOCK_COMP:
100                 return ckc_interrupts_enabled(vcpu);
101         case KVM_S390_INT_CPU_TIMER:
102                 if (psw_extint_disabled(vcpu))
103                         return 0;
104                 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
105                         return 1;
106                 return 0;
107         case KVM_S390_INT_SERVICE:
108         case KVM_S390_INT_PFAULT_INIT:
109         case KVM_S390_INT_PFAULT_DONE:
110         case KVM_S390_INT_VIRTIO:
111                 if (psw_extint_disabled(vcpu))
112                         return 0;
113                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
114                         return 1;
115                 return 0;
116         case KVM_S390_PROGRAM_INT:
117         case KVM_S390_SIGP_STOP:
118         case KVM_S390_SIGP_SET_PREFIX:
119         case KVM_S390_RESTART:
120                 return 1;
121         case KVM_S390_MCHK:
122                 if (psw_mchk_disabled(vcpu))
123                         return 0;
124                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
125                         return 1;
126                 return 0;
127         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
128                 if (psw_ioint_disabled(vcpu))
129                         return 0;
130                 if (vcpu->arch.sie_block->gcr[6] &
131                     int_word_to_isc_bits(inti->io.io_int_word))
132                         return 1;
133                 return 0;
134         default:
135                 printk(KERN_WARNING "illegal interrupt type %llx\n",
136                        inti->type);
137                 BUG();
138         }
139         return 0;
140 }
141
142 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
143 {
144         return vcpu->arch.local_int.pending_irqs;
145 }
146
147 static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
148 {
149         unsigned long active_mask = pending_local_irqs(vcpu);
150
151         if (psw_extint_disabled(vcpu))
152                 active_mask &= ~IRQ_PEND_EXT_MASK;
153         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
154                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
155         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
156                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
157         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
158                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
159         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
160                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
161         if (psw_mchk_disabled(vcpu))
162                 active_mask &= ~IRQ_PEND_MCHK_MASK;
163
164         /*
165          * STOP irqs will never be actively delivered. They are triggered via
166          * intercept requests and cleared when the stop intercept is performed.
167          */
168         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
169
170         return active_mask;
171 }
172
173 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
174 {
175         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
176         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
177 }
178
179 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
180 {
181         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
182         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
183 }
184
185 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
186 {
187         atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
188                           &vcpu->arch.sie_block->cpuflags);
189         vcpu->arch.sie_block->lctl = 0x0000;
190         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
191
192         if (guestdbg_enabled(vcpu)) {
193                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
194                                                LCTL_CR10 | LCTL_CR11);
195                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
196         }
197 }
198
199 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
200 {
201         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
202 }
203
204 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
205 {
206         if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
207                 return;
208         if (psw_extint_disabled(vcpu))
209                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
210         else
211                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
212 }
213
214 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
215 {
216         if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
217                 return;
218         if (psw_mchk_disabled(vcpu))
219                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
220         else
221                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
222 }
223
224 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
225 {
226         if (kvm_s390_is_stop_irq_pending(vcpu))
227                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
228 }
229
230 /* Set interception request for non-deliverable local interrupts */
231 static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
232 {
233         set_intercept_indicators_ext(vcpu);
234         set_intercept_indicators_mchk(vcpu);
235         set_intercept_indicators_stop(vcpu);
236 }
237
238 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
239                                       struct kvm_s390_interrupt_info *inti)
240 {
241         switch (inti->type) {
242         case KVM_S390_INT_SERVICE:
243         case KVM_S390_INT_PFAULT_DONE:
244         case KVM_S390_INT_VIRTIO:
245                 if (psw_extint_disabled(vcpu))
246                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
247                 else
248                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
249                 break;
250         case KVM_S390_MCHK:
251                 if (psw_mchk_disabled(vcpu))
252                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
253                 else
254                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
255                 break;
256         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
257                 if (psw_ioint_disabled(vcpu))
258                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
259                 else
260                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
261                 break;
262         default:
263                 BUG();
264         }
265 }
266
267 static u16 get_ilc(struct kvm_vcpu *vcpu)
268 {
269         switch (vcpu->arch.sie_block->icptcode) {
270         case ICPT_INST:
271         case ICPT_INSTPROGI:
272         case ICPT_OPEREXC:
273         case ICPT_PARTEXEC:
274         case ICPT_IOINST:
275                 /* last instruction only stored for these icptcodes */
276                 return insn_length(vcpu->arch.sie_block->ipa >> 8);
277         case ICPT_PROGI:
278                 return vcpu->arch.sie_block->pgmilc;
279         default:
280                 return 0;
281         }
282 }
283
284 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
285 {
286         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
287         int rc;
288
289         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
290                                          0, 0);
291
292         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
293                            (u16 *)__LC_EXT_INT_CODE);
294         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
295         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
296                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
297         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
298                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
299         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
300         return rc ? -EFAULT : 0;
301 }
302
303 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
304 {
305         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
306         int rc;
307
308         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
309                                          0, 0);
310
311         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
312                            (u16 __user *)__LC_EXT_INT_CODE);
313         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
314         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
315                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
316         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
317                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
318         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
319         return rc ? -EFAULT : 0;
320 }
321
322 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
323 {
324         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
325         struct kvm_s390_ext_info ext;
326         int rc;
327
328         spin_lock(&li->lock);
329         ext = li->irq.ext;
330         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
331         li->irq.ext.ext_params2 = 0;
332         spin_unlock(&li->lock);
333
334         VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
335                    0, ext.ext_params2);
336         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
337                                          KVM_S390_INT_PFAULT_INIT,
338                                          0, ext.ext_params2);
339
340         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
341         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
342         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
343                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
344         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
345                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
346         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
347         return rc ? -EFAULT : 0;
348 }
349
350 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
351 {
352         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
353         struct kvm_s390_mchk_info mchk;
354         unsigned long adtl_status_addr;
355         int rc;
356
357         spin_lock(&li->lock);
358         mchk = li->irq.mchk;
359         /*
360          * If there was an exigent machine check pending, then any repressible
361          * machine checks that might have been pending are indicated along
362          * with it, so always clear both bits
363          */
364         clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
365         clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
366         memset(&li->irq.mchk, 0, sizeof(mchk));
367         spin_unlock(&li->lock);
368
369         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
370                    mchk.mcic);
371         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
372                                          mchk.cr14, mchk.mcic);
373
374         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
375         rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
376                             &adtl_status_addr, sizeof(unsigned long));
377         rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr);
378         rc |= put_guest_lc(vcpu, mchk.mcic,
379                            (u64 __user *) __LC_MCCK_CODE);
380         rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
381                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
382         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
383                              &mchk.fixed_logout, sizeof(mchk.fixed_logout));
384         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
385                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
386         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
387                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
388         return rc ? -EFAULT : 0;
389 }
390
391 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
392 {
393         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
394         int rc;
395
396         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
397         vcpu->stat.deliver_restart_signal++;
398         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
399
400         rc  = write_guest_lc(vcpu,
401                              offsetof(struct _lowcore, restart_old_psw),
402                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
403         rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
404                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
405         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
406         return rc ? -EFAULT : 0;
407 }
408
409 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
410 {
411         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
412         struct kvm_s390_prefix_info prefix;
413
414         spin_lock(&li->lock);
415         prefix = li->irq.prefix;
416         li->irq.prefix.address = 0;
417         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
418         spin_unlock(&li->lock);
419
420         VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
421         vcpu->stat.deliver_prefix_signal++;
422         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
423                                          KVM_S390_SIGP_SET_PREFIX,
424                                          prefix.address, 0);
425
426         kvm_s390_set_prefix(vcpu, prefix.address);
427         return 0;
428 }
429
430 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
431 {
432         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
433         int rc;
434         int cpu_addr;
435
436         spin_lock(&li->lock);
437         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
438         clear_bit(cpu_addr, li->sigp_emerg_pending);
439         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
440                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
441         spin_unlock(&li->lock);
442
443         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
444         vcpu->stat.deliver_emergency_signal++;
445         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
446                                          cpu_addr, 0);
447
448         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
449                            (u16 *)__LC_EXT_INT_CODE);
450         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
451         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
452                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
454                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
455         return rc ? -EFAULT : 0;
456 }
457
458 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
459 {
460         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
461         struct kvm_s390_extcall_info extcall;
462         int rc;
463
464         spin_lock(&li->lock);
465         extcall = li->irq.extcall;
466         li->irq.extcall.code = 0;
467         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
468         spin_unlock(&li->lock);
469
470         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
471         vcpu->stat.deliver_external_call++;
472         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
473                                          KVM_S390_INT_EXTERNAL_CALL,
474                                          extcall.code, 0);
475
476         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
477                            (u16 *)__LC_EXT_INT_CODE);
478         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
479         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
480                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
481         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
482                             sizeof(psw_t));
483         return rc ? -EFAULT : 0;
484 }
485
486 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
487 {
488         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
489         struct kvm_s390_pgm_info pgm_info;
490         int rc = 0, nullifying = false;
491         u16 ilc = get_ilc(vcpu);
492
493         spin_lock(&li->lock);
494         pgm_info = li->irq.pgm;
495         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
496         memset(&li->irq.pgm, 0, sizeof(pgm_info));
497         spin_unlock(&li->lock);
498
499         VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
500                    pgm_info.code, ilc);
501         vcpu->stat.deliver_program_int++;
502         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
503                                          pgm_info.code, 0);
504
505         switch (pgm_info.code & ~PGM_PER) {
506         case PGM_AFX_TRANSLATION:
507         case PGM_ASX_TRANSLATION:
508         case PGM_EX_TRANSLATION:
509         case PGM_LFX_TRANSLATION:
510         case PGM_LSTE_SEQUENCE:
511         case PGM_LSX_TRANSLATION:
512         case PGM_LX_TRANSLATION:
513         case PGM_PRIMARY_AUTHORITY:
514         case PGM_SECONDARY_AUTHORITY:
515                 nullifying = true;
516                 /* fall through */
517         case PGM_SPACE_SWITCH:
518                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
519                                   (u64 *)__LC_TRANS_EXC_CODE);
520                 break;
521         case PGM_ALEN_TRANSLATION:
522         case PGM_ALE_SEQUENCE:
523         case PGM_ASTE_INSTANCE:
524         case PGM_ASTE_SEQUENCE:
525         case PGM_ASTE_VALIDITY:
526         case PGM_EXTENDED_AUTHORITY:
527                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
528                                   (u8 *)__LC_EXC_ACCESS_ID);
529                 nullifying = true;
530                 break;
531         case PGM_ASCE_TYPE:
532         case PGM_PAGE_TRANSLATION:
533         case PGM_REGION_FIRST_TRANS:
534         case PGM_REGION_SECOND_TRANS:
535         case PGM_REGION_THIRD_TRANS:
536         case PGM_SEGMENT_TRANSLATION:
537                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
538                                   (u64 *)__LC_TRANS_EXC_CODE);
539                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
540                                    (u8 *)__LC_EXC_ACCESS_ID);
541                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
542                                    (u8 *)__LC_OP_ACCESS_ID);
543                 nullifying = true;
544                 break;
545         case PGM_MONITOR:
546                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
547                                   (u16 *)__LC_MON_CLASS_NR);
548                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
549                                    (u64 *)__LC_MON_CODE);
550                 break;
551         case PGM_VECTOR_PROCESSING:
552         case PGM_DATA:
553                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
554                                   (u32 *)__LC_DATA_EXC_CODE);
555                 break;
556         case PGM_PROTECTION:
557                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
558                                   (u64 *)__LC_TRANS_EXC_CODE);
559                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
560                                    (u8 *)__LC_EXC_ACCESS_ID);
561                 break;
562         case PGM_STACK_FULL:
563         case PGM_STACK_EMPTY:
564         case PGM_STACK_SPECIFICATION:
565         case PGM_STACK_TYPE:
566         case PGM_STACK_OPERATION:
567         case PGM_TRACE_TABEL:
568         case PGM_CRYPTO_OPERATION:
569                 nullifying = true;
570                 break;
571         }
572
573         if (pgm_info.code & PGM_PER) {
574                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
575                                    (u8 *) __LC_PER_CODE);
576                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
577                                    (u8 *)__LC_PER_ATMID);
578                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
579                                    (u64 *) __LC_PER_ADDRESS);
580                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
581                                    (u8 *) __LC_PER_ACCESS_ID);
582         }
583
584         if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
585                 kvm_s390_rewind_psw(vcpu, ilc);
586
587         rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
588         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
589                                  (u64 *) __LC_LAST_BREAK);
590         rc |= put_guest_lc(vcpu, pgm_info.code,
591                            (u16 *)__LC_PGM_INT_CODE);
592         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
593                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
594         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
595                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
596         return rc ? -EFAULT : 0;
597 }
598
599 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
600                                           struct kvm_s390_interrupt_info *inti)
601 {
602         int rc;
603
604         VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
605                    inti->ext.ext_params);
606         vcpu->stat.deliver_service_signal++;
607         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
608                                          inti->ext.ext_params, 0);
609
610         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
611         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
612         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
613                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
614         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
615                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
616         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
617                            (u32 *)__LC_EXT_PARAMS);
618         return rc ? -EFAULT : 0;
619 }
620
621 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
622                                            struct kvm_s390_interrupt_info *inti)
623 {
624         int rc;
625
626         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
627                                          KVM_S390_INT_PFAULT_DONE, 0,
628                                          inti->ext.ext_params2);
629
630         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
631         rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
632         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
633                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
634         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
635                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
636         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
637                            (u64 *)__LC_EXT_PARAMS2);
638         return rc ? -EFAULT : 0;
639 }
640
641 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
642                                          struct kvm_s390_interrupt_info *inti)
643 {
644         int rc;
645
646         VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
647                    inti->ext.ext_params, inti->ext.ext_params2);
648         vcpu->stat.deliver_virtio_interrupt++;
649         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
650                                          inti->ext.ext_params,
651                                          inti->ext.ext_params2);
652
653         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
654         rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
655         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
656                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
657         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
658                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
659         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
660                            (u32 *)__LC_EXT_PARAMS);
661         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
662                            (u64 *)__LC_EXT_PARAMS2);
663         return rc ? -EFAULT : 0;
664 }
665
666 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
667                                      struct kvm_s390_interrupt_info *inti)
668 {
669         int rc;
670
671         VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
672         vcpu->stat.deliver_io_int++;
673         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
674                                          ((__u32)inti->io.subchannel_id << 16) |
675                                                 inti->io.subchannel_nr,
676                                          ((__u64)inti->io.io_int_parm << 32) |
677                                                 inti->io.io_int_word);
678
679         rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
680                            (u16 *)__LC_SUBCHANNEL_ID);
681         rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
682                            (u16 *)__LC_SUBCHANNEL_NR);
683         rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
684                            (u32 *)__LC_IO_INT_PARM);
685         rc |= put_guest_lc(vcpu, inti->io.io_int_word,
686                            (u32 *)__LC_IO_INT_WORD);
687         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
688                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
689         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
690                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
691         return rc ? -EFAULT : 0;
692 }
693
694 static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
695                                            struct kvm_s390_interrupt_info *inti)
696 {
697         struct kvm_s390_mchk_info *mchk = &inti->mchk;
698         int rc;
699
700         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
701                    mchk->mcic);
702         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
703                                          mchk->cr14, mchk->mcic);
704
705         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
706         rc |= put_guest_lc(vcpu, mchk->mcic,
707                         (u64 __user *) __LC_MCCK_CODE);
708         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
709                         (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
710         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
711                              &mchk->fixed_logout, sizeof(mchk->fixed_logout));
712         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
713                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
714         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
715                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
716         return rc ? -EFAULT : 0;
717 }
718
719 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
720
721 static const deliver_irq_t deliver_irq_funcs[] = {
722         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
723         [IRQ_PEND_PROG]           = __deliver_prog,
724         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
725         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
726         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
727         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
728         [IRQ_PEND_RESTART]        = __deliver_restart,
729         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
730         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
731 };
732
733 static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
734                                            struct kvm_s390_interrupt_info *inti)
735 {
736         int rc;
737
738         switch (inti->type) {
739         case KVM_S390_INT_SERVICE:
740                 rc = __deliver_service(vcpu, inti);
741                 break;
742         case KVM_S390_INT_PFAULT_DONE:
743                 rc = __deliver_pfault_done(vcpu, inti);
744                 break;
745         case KVM_S390_INT_VIRTIO:
746                 rc = __deliver_virtio(vcpu, inti);
747                 break;
748         case KVM_S390_MCHK:
749                 rc = __deliver_mchk_floating(vcpu, inti);
750                 break;
751         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
752                 rc = __deliver_io(vcpu, inti);
753                 break;
754         default:
755                 BUG();
756         }
757
758         return rc;
759 }
760
761 /* Check whether an external call is pending (deliverable or not) */
762 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
763 {
764         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
765         uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
766
767         if (!sclp_has_sigpif())
768                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
769
770         return (sigp_ctrl & SIGP_CTRL_C) &&
771                (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
772 }
773
774 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
775 {
776         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
777         struct kvm_s390_interrupt_info  *inti;
778         int rc;
779
780         rc = !!deliverable_local_irqs(vcpu);
781
782         if ((!rc) && atomic_read(&fi->active)) {
783                 spin_lock(&fi->lock);
784                 list_for_each_entry(inti, &fi->list, list)
785                         if (__interrupt_is_deliverable(vcpu, inti)) {
786                                 rc = 1;
787                                 break;
788                         }
789                 spin_unlock(&fi->lock);
790         }
791
792         if (!rc && kvm_cpu_has_pending_timer(vcpu))
793                 rc = 1;
794
795         /* external call pending and deliverable */
796         if (!rc && kvm_s390_ext_call_pending(vcpu) &&
797             !psw_extint_disabled(vcpu) &&
798             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
799                 rc = 1;
800
801         if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
802                 rc = 1;
803
804         return rc;
805 }
806
807 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
808 {
809         if (!(vcpu->arch.sie_block->ckc <
810               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
811                 return 0;
812         if (!ckc_interrupts_enabled(vcpu))
813                 return 0;
814         return 1;
815 }
816
817 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
818 {
819         u64 now, sltime;
820
821         vcpu->stat.exit_wait_state++;
822
823         /* fast path */
824         if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
825                 return 0;
826
827         if (psw_interrupts_disabled(vcpu)) {
828                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
829                 return -EOPNOTSUPP; /* disabled wait */
830         }
831
832         if (!ckc_interrupts_enabled(vcpu)) {
833                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
834                 __set_cpu_idle(vcpu);
835                 goto no_timer;
836         }
837
838         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
839         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
840
841         /* underflow */
842         if (vcpu->arch.sie_block->ckc < now)
843                 return 0;
844
845         __set_cpu_idle(vcpu);
846         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
847         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
848 no_timer:
849         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
850         kvm_vcpu_block(vcpu);
851         __unset_cpu_idle(vcpu);
852         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
853
854         hrtimer_cancel(&vcpu->arch.ckc_timer);
855         return 0;
856 }
857
858 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
859 {
860         if (waitqueue_active(&vcpu->wq)) {
861                 /*
862                  * The vcpu gave up the cpu voluntarily, mark it as a good
863                  * yield-candidate.
864                  */
865                 vcpu->preempted = true;
866                 wake_up_interruptible(&vcpu->wq);
867                 vcpu->stat.halt_wakeup++;
868         }
869 }
870
871 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
872 {
873         struct kvm_vcpu *vcpu;
874         u64 now, sltime;
875
876         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
877         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
878         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
879
880         /*
881          * If the monotonic clock runs faster than the tod clock we might be
882          * woken up too early and have to go back to sleep to avoid deadlocks.
883          */
884         if (vcpu->arch.sie_block->ckc > now &&
885             hrtimer_forward_now(timer, ns_to_ktime(sltime)))
886                 return HRTIMER_RESTART;
887         kvm_s390_vcpu_wakeup(vcpu);
888         return HRTIMER_NORESTART;
889 }
890
891 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
892 {
893         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
894
895         spin_lock(&li->lock);
896         li->pending_irqs = 0;
897         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
898         memset(&li->irq, 0, sizeof(li->irq));
899         spin_unlock(&li->lock);
900
901         /* clear pending external calls set by sigp interpretation facility */
902         atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
903         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
904 }
905
906 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
907 {
908         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
909         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
910         struct kvm_s390_interrupt_info  *n, *inti = NULL;
911         deliver_irq_t func;
912         int deliver;
913         int rc = 0;
914         unsigned long irq_type;
915         unsigned long deliverable_irqs;
916
917         __reset_intercept_indicators(vcpu);
918
919         /* pending ckc conditions might have been invalidated */
920         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
921         if (kvm_cpu_has_pending_timer(vcpu))
922                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
923
924         do {
925                 deliverable_irqs = deliverable_local_irqs(vcpu);
926                 /* bits are in the order of interrupt priority */
927                 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
928                 if (irq_type == IRQ_PEND_COUNT)
929                         break;
930                 func = deliver_irq_funcs[irq_type];
931                 if (!func) {
932                         WARN_ON_ONCE(func == NULL);
933                         clear_bit(irq_type, &li->pending_irqs);
934                         continue;
935                 }
936                 rc = func(vcpu);
937         } while (!rc && irq_type != IRQ_PEND_COUNT);
938
939         set_intercept_indicators_local(vcpu);
940
941         if (!rc && atomic_read(&fi->active)) {
942                 do {
943                         deliver = 0;
944                         spin_lock(&fi->lock);
945                         list_for_each_entry_safe(inti, n, &fi->list, list) {
946                                 if (__interrupt_is_deliverable(vcpu, inti)) {
947                                         list_del(&inti->list);
948                                         fi->irq_count--;
949                                         deliver = 1;
950                                         break;
951                                 }
952                                 __set_intercept_indicator(vcpu, inti);
953                         }
954                         if (list_empty(&fi->list))
955                                 atomic_set(&fi->active, 0);
956                         spin_unlock(&fi->lock);
957                         if (deliver) {
958                                 rc = __deliver_floating_interrupt(vcpu, inti);
959                                 kfree(inti);
960                         }
961                 } while (!rc && deliver);
962         }
963
964         return rc;
965 }
966
967 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
968 {
969         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
970
971         li->irq.pgm = irq->u.pgm;
972         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
973         return 0;
974 }
975
976 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
977 {
978         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
979         struct kvm_s390_irq irq;
980
981         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
982         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
983                                    0, 1);
984         spin_lock(&li->lock);
985         irq.u.pgm.code = code;
986         __inject_prog(vcpu, &irq);
987         BUG_ON(waitqueue_active(li->wq));
988         spin_unlock(&li->lock);
989         return 0;
990 }
991
992 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
993                              struct kvm_s390_pgm_info *pgm_info)
994 {
995         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
996         struct kvm_s390_irq irq;
997         int rc;
998
999         VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
1000                    pgm_info->code);
1001         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1002                                    pgm_info->code, 0, 1);
1003         spin_lock(&li->lock);
1004         irq.u.pgm = *pgm_info;
1005         rc = __inject_prog(vcpu, &irq);
1006         BUG_ON(waitqueue_active(li->wq));
1007         spin_unlock(&li->lock);
1008         return rc;
1009 }
1010
1011 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1012 {
1013         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1014
1015         VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
1016                    irq->u.ext.ext_params, irq->u.ext.ext_params2);
1017         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1018                                    irq->u.ext.ext_params,
1019                                    irq->u.ext.ext_params2, 2);
1020
1021         li->irq.ext = irq->u.ext;
1022         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1023         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1024         return 0;
1025 }
1026
1027 static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1028 {
1029         unsigned char new_val, old_val;
1030         uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1031
1032         new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1033         old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1034         if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1035                 /* another external call is pending */
1036                 return -EBUSY;
1037         }
1038         atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1039         return 0;
1040 }
1041
1042 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1043 {
1044         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1045         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1046         uint16_t src_id = irq->u.extcall.code;
1047
1048         VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
1049                    src_id);
1050         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1051                                    src_id, 0, 2);
1052
1053         /* sending vcpu invalid */
1054         if (src_id >= KVM_MAX_VCPUS ||
1055             kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1056                 return -EINVAL;
1057
1058         if (sclp_has_sigpif())
1059                 return __inject_extcall_sigpif(vcpu, src_id);
1060
1061         if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1062                 return -EBUSY;
1063         *extcall = irq->u.extcall;
1064         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1065         return 0;
1066 }
1067
1068 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1069 {
1070         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1071         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1072
1073         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1074                    irq->u.prefix.address);
1075         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1076                                    irq->u.prefix.address, 0, 2);
1077
1078         if (!is_vcpu_stopped(vcpu))
1079                 return -EBUSY;
1080
1081         *prefix = irq->u.prefix;
1082         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1083         return 0;
1084 }
1085
1086 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1087 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1088 {
1089         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1090         struct kvm_s390_stop_info *stop = &li->irq.stop;
1091         int rc = 0;
1092
1093         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1094
1095         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1096                 return -EINVAL;
1097
1098         if (is_vcpu_stopped(vcpu)) {
1099                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1100                         rc = kvm_s390_store_status_unloaded(vcpu,
1101                                                 KVM_S390_STORE_STATUS_NOADDR);
1102                 return rc;
1103         }
1104
1105         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1106                 return -EBUSY;
1107         stop->flags = irq->u.stop.flags;
1108         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1109         return 0;
1110 }
1111
1112 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1113                                  struct kvm_s390_irq *irq)
1114 {
1115         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1116
1117         VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1118         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1119
1120         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1121         return 0;
1122 }
1123
1124 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1125                                    struct kvm_s390_irq *irq)
1126 {
1127         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1128
1129         VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1130                    irq->u.emerg.code);
1131         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1132                                    irq->u.emerg.code, 0, 2);
1133
1134         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1135         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1136         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1137         return 0;
1138 }
1139
1140 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1141 {
1142         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1143         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1144
1145         VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1146                    irq->u.mchk.mcic);
1147         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1148                                    irq->u.mchk.mcic, 2);
1149
1150         /*
1151          * Because repressible machine checks can be indicated along with
1152          * exigent machine checks (PoP, Chapter 11, Interruption action)
1153          * we need to combine cr14, mcic and external damage code.
1154          * Failing storage address and the logout area should not be or'ed
1155          * together, we just indicate the last occurrence of the corresponding
1156          * machine check
1157          */
1158         mchk->cr14 |= irq->u.mchk.cr14;
1159         mchk->mcic |= irq->u.mchk.mcic;
1160         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1161         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1162         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1163                sizeof(mchk->fixed_logout));
1164         if (mchk->mcic & MCHK_EX_MASK)
1165                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1166         else if (mchk->mcic & MCHK_REP_MASK)
1167                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1168         return 0;
1169 }
1170
1171 static int __inject_ckc(struct kvm_vcpu *vcpu)
1172 {
1173         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1174
1175         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1176         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1177                                    0, 0, 2);
1178
1179         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1180         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1181         return 0;
1182 }
1183
1184 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1185 {
1186         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1187
1188         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1189         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1190                                    0, 0, 2);
1191
1192         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1193         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1194         return 0;
1195 }
1196
1197
1198 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1199                                                     u64 cr6, u64 schid)
1200 {
1201         struct kvm_s390_float_interrupt *fi;
1202         struct kvm_s390_interrupt_info *inti, *iter;
1203
1204         if ((!schid && !cr6) || (schid && cr6))
1205                 return NULL;
1206         fi = &kvm->arch.float_int;
1207         spin_lock(&fi->lock);
1208         inti = NULL;
1209         list_for_each_entry(iter, &fi->list, list) {
1210                 if (!is_ioint(iter->type))
1211                         continue;
1212                 if (cr6 &&
1213                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1214                         continue;
1215                 if (schid) {
1216                         if (((schid & 0x00000000ffff0000) >> 16) !=
1217                             iter->io.subchannel_id)
1218                                 continue;
1219                         if ((schid & 0x000000000000ffff) !=
1220                             iter->io.subchannel_nr)
1221                                 continue;
1222                 }
1223                 inti = iter;
1224                 break;
1225         }
1226         if (inti) {
1227                 list_del_init(&inti->list);
1228                 fi->irq_count--;
1229         }
1230         if (list_empty(&fi->list))
1231                 atomic_set(&fi->active, 0);
1232         spin_unlock(&fi->lock);
1233         return inti;
1234 }
1235
1236 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1237 {
1238         struct kvm_s390_local_interrupt *li;
1239         struct kvm_s390_float_interrupt *fi;
1240         struct kvm_s390_interrupt_info *iter;
1241         struct kvm_vcpu *dst_vcpu = NULL;
1242         int sigcpu;
1243         int rc = 0;
1244
1245         fi = &kvm->arch.float_int;
1246         spin_lock(&fi->lock);
1247         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1248                 rc = -EINVAL;
1249                 goto unlock_fi;
1250         }
1251         fi->irq_count++;
1252         if (!is_ioint(inti->type)) {
1253                 list_add_tail(&inti->list, &fi->list);
1254         } else {
1255                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1256
1257                 /* Keep I/O interrupts sorted in isc order. */
1258                 list_for_each_entry(iter, &fi->list, list) {
1259                         if (!is_ioint(iter->type))
1260                                 continue;
1261                         if (int_word_to_isc_bits(iter->io.io_int_word)
1262                             <= isc_bits)
1263                                 continue;
1264                         break;
1265                 }
1266                 list_add_tail(&inti->list, &iter->list);
1267         }
1268         atomic_set(&fi->active, 1);
1269         if (atomic_read(&kvm->online_vcpus) == 0)
1270                 goto unlock_fi;
1271         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1272         if (sigcpu == KVM_MAX_VCPUS) {
1273                 do {
1274                         sigcpu = fi->next_rr_cpu++;
1275                         if (sigcpu == KVM_MAX_VCPUS)
1276                                 sigcpu = fi->next_rr_cpu = 0;
1277                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1278         }
1279         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1280         li = &dst_vcpu->arch.local_int;
1281         spin_lock(&li->lock);
1282         switch (inti->type) {
1283         case KVM_S390_MCHK:
1284                 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1285                 break;
1286         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1287                 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1288                 break;
1289         default:
1290                 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1291                 break;
1292         }
1293         spin_unlock(&li->lock);
1294         kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1295 unlock_fi:
1296         spin_unlock(&fi->lock);
1297         return rc;
1298 }
1299
1300 int kvm_s390_inject_vm(struct kvm *kvm,
1301                        struct kvm_s390_interrupt *s390int)
1302 {
1303         struct kvm_s390_interrupt_info *inti;
1304         int rc;
1305
1306         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1307         if (!inti)
1308                 return -ENOMEM;
1309
1310         inti->type = s390int->type;
1311         switch (inti->type) {
1312         case KVM_S390_INT_VIRTIO:
1313                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1314                          s390int->parm, s390int->parm64);
1315                 inti->ext.ext_params = s390int->parm;
1316                 inti->ext.ext_params2 = s390int->parm64;
1317                 break;
1318         case KVM_S390_INT_SERVICE:
1319                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1320                 inti->ext.ext_params = s390int->parm;
1321                 break;
1322         case KVM_S390_INT_PFAULT_DONE:
1323                 inti->ext.ext_params2 = s390int->parm64;
1324                 break;
1325         case KVM_S390_MCHK:
1326                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1327                          s390int->parm64);
1328                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1329                 inti->mchk.mcic = s390int->parm64;
1330                 break;
1331         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1332                 if (inti->type & IOINT_AI_MASK)
1333                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1334                 else
1335                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1336                                  s390int->type & IOINT_CSSID_MASK,
1337                                  s390int->type & IOINT_SSID_MASK,
1338                                  s390int->type & IOINT_SCHID_MASK);
1339                 inti->io.subchannel_id = s390int->parm >> 16;
1340                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1341                 inti->io.io_int_parm = s390int->parm64 >> 32;
1342                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1343                 break;
1344         default:
1345                 kfree(inti);
1346                 return -EINVAL;
1347         }
1348         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1349                                  2);
1350
1351         rc = __inject_vm(kvm, inti);
1352         if (rc)
1353                 kfree(inti);
1354         return rc;
1355 }
1356
1357 int kvm_s390_reinject_io_int(struct kvm *kvm,
1358                               struct kvm_s390_interrupt_info *inti)
1359 {
1360         return __inject_vm(kvm, inti);
1361 }
1362
1363 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1364                        struct kvm_s390_irq *irq)
1365 {
1366         irq->type = s390int->type;
1367         switch (irq->type) {
1368         case KVM_S390_PROGRAM_INT:
1369                 if (s390int->parm & 0xffff0000)
1370                         return -EINVAL;
1371                 irq->u.pgm.code = s390int->parm;
1372                 break;
1373         case KVM_S390_SIGP_SET_PREFIX:
1374                 irq->u.prefix.address = s390int->parm;
1375                 break;
1376         case KVM_S390_SIGP_STOP:
1377                 irq->u.stop.flags = s390int->parm;
1378                 break;
1379         case KVM_S390_INT_EXTERNAL_CALL:
1380                 if (s390int->parm & 0xffff0000)
1381                         return -EINVAL;
1382                 irq->u.extcall.code = s390int->parm;
1383                 break;
1384         case KVM_S390_INT_EMERGENCY:
1385                 if (s390int->parm & 0xffff0000)
1386                         return -EINVAL;
1387                 irq->u.emerg.code = s390int->parm;
1388                 break;
1389         case KVM_S390_MCHK:
1390                 irq->u.mchk.mcic = s390int->parm64;
1391                 break;
1392         }
1393         return 0;
1394 }
1395
1396 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1397 {
1398         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1399
1400         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1401 }
1402
1403 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1404 {
1405         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1406
1407         spin_lock(&li->lock);
1408         li->irq.stop.flags = 0;
1409         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1410         spin_unlock(&li->lock);
1411 }
1412
1413 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1414 {
1415         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1416         int rc;
1417
1418         spin_lock(&li->lock);
1419         switch (irq->type) {
1420         case KVM_S390_PROGRAM_INT:
1421                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1422                            irq->u.pgm.code);
1423                 rc = __inject_prog(vcpu, irq);
1424                 break;
1425         case KVM_S390_SIGP_SET_PREFIX:
1426                 rc = __inject_set_prefix(vcpu, irq);
1427                 break;
1428         case KVM_S390_SIGP_STOP:
1429                 rc = __inject_sigp_stop(vcpu, irq);
1430                 break;
1431         case KVM_S390_RESTART:
1432                 rc = __inject_sigp_restart(vcpu, irq);
1433                 break;
1434         case KVM_S390_INT_CLOCK_COMP:
1435                 rc = __inject_ckc(vcpu);
1436                 break;
1437         case KVM_S390_INT_CPU_TIMER:
1438                 rc = __inject_cpu_timer(vcpu);
1439                 break;
1440         case KVM_S390_INT_EXTERNAL_CALL:
1441                 rc = __inject_extcall(vcpu, irq);
1442                 break;
1443         case KVM_S390_INT_EMERGENCY:
1444                 rc = __inject_sigp_emergency(vcpu, irq);
1445                 break;
1446         case KVM_S390_MCHK:
1447                 rc = __inject_mchk(vcpu, irq);
1448                 break;
1449         case KVM_S390_INT_PFAULT_INIT:
1450                 rc = __inject_pfault_init(vcpu, irq);
1451                 break;
1452         case KVM_S390_INT_VIRTIO:
1453         case KVM_S390_INT_SERVICE:
1454         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1455         default:
1456                 rc = -EINVAL;
1457         }
1458         spin_unlock(&li->lock);
1459         if (!rc)
1460                 kvm_s390_vcpu_wakeup(vcpu);
1461         return rc;
1462 }
1463
1464 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1465 {
1466         struct kvm_s390_float_interrupt *fi;
1467         struct kvm_s390_interrupt_info  *n, *inti = NULL;
1468
1469         fi = &kvm->arch.float_int;
1470         spin_lock(&fi->lock);
1471         list_for_each_entry_safe(inti, n, &fi->list, list) {
1472                 list_del(&inti->list);
1473                 kfree(inti);
1474         }
1475         fi->irq_count = 0;
1476         atomic_set(&fi->active, 0);
1477         spin_unlock(&fi->lock);
1478 }
1479
1480 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1481                                    u8 *addr)
1482 {
1483         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1484         struct kvm_s390_irq irq = {0};
1485
1486         irq.type = inti->type;
1487         switch (inti->type) {
1488         case KVM_S390_INT_PFAULT_INIT:
1489         case KVM_S390_INT_PFAULT_DONE:
1490         case KVM_S390_INT_VIRTIO:
1491         case KVM_S390_INT_SERVICE:
1492                 irq.u.ext = inti->ext;
1493                 break;
1494         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1495                 irq.u.io = inti->io;
1496                 break;
1497         case KVM_S390_MCHK:
1498                 irq.u.mchk = inti->mchk;
1499                 break;
1500         default:
1501                 return -EINVAL;
1502         }
1503
1504         if (copy_to_user(uptr, &irq, sizeof(irq)))
1505                 return -EFAULT;
1506
1507         return 0;
1508 }
1509
1510 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1511 {
1512         struct kvm_s390_interrupt_info *inti;
1513         struct kvm_s390_float_interrupt *fi;
1514         int ret = 0;
1515         int n = 0;
1516
1517         fi = &kvm->arch.float_int;
1518         spin_lock(&fi->lock);
1519
1520         list_for_each_entry(inti, &fi->list, list) {
1521                 if (len < sizeof(struct kvm_s390_irq)) {
1522                         /* signal userspace to try again */
1523                         ret = -ENOMEM;
1524                         break;
1525                 }
1526                 ret = copy_irq_to_user(inti, buf);
1527                 if (ret)
1528                         break;
1529                 buf += sizeof(struct kvm_s390_irq);
1530                 len -= sizeof(struct kvm_s390_irq);
1531                 n++;
1532         }
1533
1534         spin_unlock(&fi->lock);
1535
1536         return ret < 0 ? ret : n;
1537 }
1538
1539 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1540 {
1541         int r;
1542
1543         switch (attr->group) {
1544         case KVM_DEV_FLIC_GET_ALL_IRQS:
1545                 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1546                                           attr->attr);
1547                 break;
1548         default:
1549                 r = -EINVAL;
1550         }
1551
1552         return r;
1553 }
1554
1555 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1556                                      u64 addr)
1557 {
1558         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1559         void *target = NULL;
1560         void __user *source;
1561         u64 size;
1562
1563         if (get_user(inti->type, (u64 __user *)addr))
1564                 return -EFAULT;
1565
1566         switch (inti->type) {
1567         case KVM_S390_INT_PFAULT_INIT:
1568         case KVM_S390_INT_PFAULT_DONE:
1569         case KVM_S390_INT_VIRTIO:
1570         case KVM_S390_INT_SERVICE:
1571                 target = (void *) &inti->ext;
1572                 source = &uptr->u.ext;
1573                 size = sizeof(inti->ext);
1574                 break;
1575         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1576                 target = (void *) &inti->io;
1577                 source = &uptr->u.io;
1578                 size = sizeof(inti->io);
1579                 break;
1580         case KVM_S390_MCHK:
1581                 target = (void *) &inti->mchk;
1582                 source = &uptr->u.mchk;
1583                 size = sizeof(inti->mchk);
1584                 break;
1585         default:
1586                 return -EINVAL;
1587         }
1588
1589         if (copy_from_user(target, source, size))
1590                 return -EFAULT;
1591
1592         return 0;
1593 }
1594
1595 static int enqueue_floating_irq(struct kvm_device *dev,
1596                                 struct kvm_device_attr *attr)
1597 {
1598         struct kvm_s390_interrupt_info *inti = NULL;
1599         int r = 0;
1600         int len = attr->attr;
1601
1602         if (len % sizeof(struct kvm_s390_irq) != 0)
1603                 return -EINVAL;
1604         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1605                 return -EINVAL;
1606
1607         while (len >= sizeof(struct kvm_s390_irq)) {
1608                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1609                 if (!inti)
1610                         return -ENOMEM;
1611
1612                 r = copy_irq_from_user(inti, attr->addr);
1613                 if (r) {
1614                         kfree(inti);
1615                         return r;
1616                 }
1617                 r = __inject_vm(dev->kvm, inti);
1618                 if (r) {
1619                         kfree(inti);
1620                         return r;
1621                 }
1622                 len -= sizeof(struct kvm_s390_irq);
1623                 attr->addr += sizeof(struct kvm_s390_irq);
1624         }
1625
1626         return r;
1627 }
1628
1629 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1630 {
1631         if (id >= MAX_S390_IO_ADAPTERS)
1632                 return NULL;
1633         return kvm->arch.adapters[id];
1634 }
1635
1636 static int register_io_adapter(struct kvm_device *dev,
1637                                struct kvm_device_attr *attr)
1638 {
1639         struct s390_io_adapter *adapter;
1640         struct kvm_s390_io_adapter adapter_info;
1641
1642         if (copy_from_user(&adapter_info,
1643                            (void __user *)attr->addr, sizeof(adapter_info)))
1644                 return -EFAULT;
1645
1646         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1647             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1648                 return -EINVAL;
1649
1650         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1651         if (!adapter)
1652                 return -ENOMEM;
1653
1654         INIT_LIST_HEAD(&adapter->maps);
1655         init_rwsem(&adapter->maps_lock);
1656         atomic_set(&adapter->nr_maps, 0);
1657         adapter->id = adapter_info.id;
1658         adapter->isc = adapter_info.isc;
1659         adapter->maskable = adapter_info.maskable;
1660         adapter->masked = false;
1661         adapter->swap = adapter_info.swap;
1662         dev->kvm->arch.adapters[adapter->id] = adapter;
1663
1664         return 0;
1665 }
1666
1667 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1668 {
1669         int ret;
1670         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1671
1672         if (!adapter || !adapter->maskable)
1673                 return -EINVAL;
1674         ret = adapter->masked;
1675         adapter->masked = masked;
1676         return ret;
1677 }
1678
1679 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1680 {
1681         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1682         struct s390_map_info *map;
1683         int ret;
1684
1685         if (!adapter || !addr)
1686                 return -EINVAL;
1687
1688         map = kzalloc(sizeof(*map), GFP_KERNEL);
1689         if (!map) {
1690                 ret = -ENOMEM;
1691                 goto out;
1692         }
1693         INIT_LIST_HEAD(&map->list);
1694         map->guest_addr = addr;
1695         map->addr = gmap_translate(kvm->arch.gmap, addr);
1696         if (map->addr == -EFAULT) {
1697                 ret = -EFAULT;
1698                 goto out;
1699         }
1700         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1701         if (ret < 0)
1702                 goto out;
1703         BUG_ON(ret != 1);
1704         down_write(&adapter->maps_lock);
1705         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1706                 list_add_tail(&map->list, &adapter->maps);
1707                 ret = 0;
1708         } else {
1709                 put_page(map->page);
1710                 ret = -EINVAL;
1711         }
1712         up_write(&adapter->maps_lock);
1713 out:
1714         if (ret)
1715                 kfree(map);
1716         return ret;
1717 }
1718
1719 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1720 {
1721         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1722         struct s390_map_info *map, *tmp;
1723         int found = 0;
1724
1725         if (!adapter || !addr)
1726                 return -EINVAL;
1727
1728         down_write(&adapter->maps_lock);
1729         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1730                 if (map->guest_addr == addr) {
1731                         found = 1;
1732                         atomic_dec(&adapter->nr_maps);
1733                         list_del(&map->list);
1734                         put_page(map->page);
1735                         kfree(map);
1736                         break;
1737                 }
1738         }
1739         up_write(&adapter->maps_lock);
1740
1741         return found ? 0 : -EINVAL;
1742 }
1743
1744 void kvm_s390_destroy_adapters(struct kvm *kvm)
1745 {
1746         int i;
1747         struct s390_map_info *map, *tmp;
1748
1749         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1750                 if (!kvm->arch.adapters[i])
1751                         continue;
1752                 list_for_each_entry_safe(map, tmp,
1753                                          &kvm->arch.adapters[i]->maps, list) {
1754                         list_del(&map->list);
1755                         put_page(map->page);
1756                         kfree(map);
1757                 }
1758                 kfree(kvm->arch.adapters[i]);
1759         }
1760 }
1761
1762 static int modify_io_adapter(struct kvm_device *dev,
1763                              struct kvm_device_attr *attr)
1764 {
1765         struct kvm_s390_io_adapter_req req;
1766         struct s390_io_adapter *adapter;
1767         int ret;
1768
1769         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1770                 return -EFAULT;
1771
1772         adapter = get_io_adapter(dev->kvm, req.id);
1773         if (!adapter)
1774                 return -EINVAL;
1775         switch (req.type) {
1776         case KVM_S390_IO_ADAPTER_MASK:
1777                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1778                 if (ret > 0)
1779                         ret = 0;
1780                 break;
1781         case KVM_S390_IO_ADAPTER_MAP:
1782                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1783                 break;
1784         case KVM_S390_IO_ADAPTER_UNMAP:
1785                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1786                 break;
1787         default:
1788                 ret = -EINVAL;
1789         }
1790
1791         return ret;
1792 }
1793
1794 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1795 {
1796         int r = 0;
1797         unsigned int i;
1798         struct kvm_vcpu *vcpu;
1799
1800         switch (attr->group) {
1801         case KVM_DEV_FLIC_ENQUEUE:
1802                 r = enqueue_floating_irq(dev, attr);
1803                 break;
1804         case KVM_DEV_FLIC_CLEAR_IRQS:
1805                 kvm_s390_clear_float_irqs(dev->kvm);
1806                 break;
1807         case KVM_DEV_FLIC_APF_ENABLE:
1808                 dev->kvm->arch.gmap->pfault_enabled = 1;
1809                 break;
1810         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1811                 dev->kvm->arch.gmap->pfault_enabled = 0;
1812                 /*
1813                  * Make sure no async faults are in transition when
1814                  * clearing the queues. So we don't need to worry
1815                  * about late coming workers.
1816                  */
1817                 synchronize_srcu(&dev->kvm->srcu);
1818                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1819                         kvm_clear_async_pf_completion_queue(vcpu);
1820                 break;
1821         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1822                 r = register_io_adapter(dev, attr);
1823                 break;
1824         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1825                 r = modify_io_adapter(dev, attr);
1826                 break;
1827         default:
1828                 r = -EINVAL;
1829         }
1830
1831         return r;
1832 }
1833
1834 static int flic_create(struct kvm_device *dev, u32 type)
1835 {
1836         if (!dev)
1837                 return -EINVAL;
1838         if (dev->kvm->arch.flic)
1839                 return -EINVAL;
1840         dev->kvm->arch.flic = dev;
1841         return 0;
1842 }
1843
1844 static void flic_destroy(struct kvm_device *dev)
1845 {
1846         dev->kvm->arch.flic = NULL;
1847         kfree(dev);
1848 }
1849
1850 /* s390 floating irq controller (flic) */
1851 struct kvm_device_ops kvm_flic_ops = {
1852         .name = "kvm-flic",
1853         .get_attr = flic_get_attr,
1854         .set_attr = flic_set_attr,
1855         .create = flic_create,
1856         .destroy = flic_destroy,
1857 };
1858
1859 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1860 {
1861         unsigned long bit;
1862
1863         bit = bit_nr + (addr % PAGE_SIZE) * 8;
1864
1865         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1866 }
1867
1868 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1869                                           u64 addr)
1870 {
1871         struct s390_map_info *map;
1872
1873         if (!adapter)
1874                 return NULL;
1875
1876         list_for_each_entry(map, &adapter->maps, list) {
1877                 if (map->guest_addr == addr)
1878                         return map;
1879         }
1880         return NULL;
1881 }
1882
1883 static int adapter_indicators_set(struct kvm *kvm,
1884                                   struct s390_io_adapter *adapter,
1885                                   struct kvm_s390_adapter_int *adapter_int)
1886 {
1887         unsigned long bit;
1888         int summary_set, idx;
1889         struct s390_map_info *info;
1890         void *map;
1891
1892         info = get_map_info(adapter, adapter_int->ind_addr);
1893         if (!info)
1894                 return -1;
1895         map = page_address(info->page);
1896         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1897         set_bit(bit, map);
1898         idx = srcu_read_lock(&kvm->srcu);
1899         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1900         set_page_dirty_lock(info->page);
1901         info = get_map_info(adapter, adapter_int->summary_addr);
1902         if (!info) {
1903                 srcu_read_unlock(&kvm->srcu, idx);
1904                 return -1;
1905         }
1906         map = page_address(info->page);
1907         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1908                           adapter->swap);
1909         summary_set = test_and_set_bit(bit, map);
1910         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1911         set_page_dirty_lock(info->page);
1912         srcu_read_unlock(&kvm->srcu, idx);
1913         return summary_set ? 0 : 1;
1914 }
1915
1916 /*
1917  * < 0 - not injected due to error
1918  * = 0 - coalesced, summary indicator already active
1919  * > 0 - injected interrupt
1920  */
1921 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1922                            struct kvm *kvm, int irq_source_id, int level,
1923                            bool line_status)
1924 {
1925         int ret;
1926         struct s390_io_adapter *adapter;
1927
1928         /* We're only interested in the 0->1 transition. */
1929         if (!level)
1930                 return 0;
1931         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1932         if (!adapter)
1933                 return -1;
1934         down_read(&adapter->maps_lock);
1935         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1936         up_read(&adapter->maps_lock);
1937         if ((ret > 0) && !adapter->masked) {
1938                 struct kvm_s390_interrupt s390int = {
1939                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
1940                         .parm = 0,
1941                         .parm64 = (adapter->isc << 27) | 0x80000000,
1942                 };
1943                 ret = kvm_s390_inject_vm(kvm, &s390int);
1944                 if (ret == 0)
1945                         ret = 1;
1946         }
1947         return ret;
1948 }
1949
1950 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1951                           const struct kvm_irq_routing_entry *ue)
1952 {
1953         int ret;
1954
1955         switch (ue->type) {
1956         case KVM_IRQ_ROUTING_S390_ADAPTER:
1957                 e->set = set_adapter_int;
1958                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1959                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1960                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1961                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1962                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1963                 ret = 0;
1964                 break;
1965         default:
1966                 ret = -EINVAL;
1967         }
1968
1969         return ret;
1970 }
1971
1972 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1973                 int irq_source_id, int level, bool line_status)
1974 {
1975         return -EINVAL;
1976 }