ASoC: wm8776: replace codec to component
[linux-block.git] / arch / s390 / kvm / interrupt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * handling kvm guest interrupts
4  *
5  * Copyright IBM Corp. 2008, 2015
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  */
9
10 #include <linux/interrupt.h>
11 #include <linux/kvm_host.h>
12 #include <linux/hrtimer.h>
13 #include <linux/mmu_context.h>
14 #include <linux/signal.h>
15 #include <linux/slab.h>
16 #include <linux/bitmap.h>
17 #include <linux/vmalloc.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/dis.h>
20 #include <linux/uaccess.h>
21 #include <asm/sclp.h>
22 #include <asm/isc.h>
23 #include <asm/gmap.h>
24 #include <asm/switch_to.h>
25 #include <asm/nmi.h>
26 #include "kvm-s390.h"
27 #include "gaccess.h"
28 #include "trace-s390.h"
29
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
33
34 /* handle external calls via sigp interpretation facility */
35 static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
36 {
37         int c, scn;
38
39         if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
40                 return 0;
41
42         BUG_ON(!kvm_s390_use_sca_entries());
43         read_lock(&vcpu->kvm->arch.sca_lock);
44         if (vcpu->kvm->arch.use_esca) {
45                 struct esca_block *sca = vcpu->kvm->arch.sca;
46                 union esca_sigp_ctrl sigp_ctrl =
47                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
48
49                 c = sigp_ctrl.c;
50                 scn = sigp_ctrl.scn;
51         } else {
52                 struct bsca_block *sca = vcpu->kvm->arch.sca;
53                 union bsca_sigp_ctrl sigp_ctrl =
54                         sca->cpu[vcpu->vcpu_id].sigp_ctrl;
55
56                 c = sigp_ctrl.c;
57                 scn = sigp_ctrl.scn;
58         }
59         read_unlock(&vcpu->kvm->arch.sca_lock);
60
61         if (src_id)
62                 *src_id = scn;
63
64         return c;
65 }
66
67 static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
68 {
69         int expect, rc;
70
71         BUG_ON(!kvm_s390_use_sca_entries());
72         read_lock(&vcpu->kvm->arch.sca_lock);
73         if (vcpu->kvm->arch.use_esca) {
74                 struct esca_block *sca = vcpu->kvm->arch.sca;
75                 union esca_sigp_ctrl *sigp_ctrl =
76                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
77                 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
78
79                 new_val.scn = src_id;
80                 new_val.c = 1;
81                 old_val.c = 0;
82
83                 expect = old_val.value;
84                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
85         } else {
86                 struct bsca_block *sca = vcpu->kvm->arch.sca;
87                 union bsca_sigp_ctrl *sigp_ctrl =
88                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
89                 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
90
91                 new_val.scn = src_id;
92                 new_val.c = 1;
93                 old_val.c = 0;
94
95                 expect = old_val.value;
96                 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
97         }
98         read_unlock(&vcpu->kvm->arch.sca_lock);
99
100         if (rc != expect) {
101                 /* another external call is pending */
102                 return -EBUSY;
103         }
104         kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
105         return 0;
106 }
107
108 static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
109 {
110         int rc, expect;
111
112         if (!kvm_s390_use_sca_entries())
113                 return;
114         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
115         read_lock(&vcpu->kvm->arch.sca_lock);
116         if (vcpu->kvm->arch.use_esca) {
117                 struct esca_block *sca = vcpu->kvm->arch.sca;
118                 union esca_sigp_ctrl *sigp_ctrl =
119                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
120                 union esca_sigp_ctrl old = *sigp_ctrl;
121
122                 expect = old.value;
123                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
124         } else {
125                 struct bsca_block *sca = vcpu->kvm->arch.sca;
126                 union bsca_sigp_ctrl *sigp_ctrl =
127                         &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
128                 union bsca_sigp_ctrl old = *sigp_ctrl;
129
130                 expect = old.value;
131                 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
132         }
133         read_unlock(&vcpu->kvm->arch.sca_lock);
134         WARN_ON(rc != expect); /* cannot clear? */
135 }
136
137 int psw_extint_disabled(struct kvm_vcpu *vcpu)
138 {
139         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
140 }
141
142 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
143 {
144         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
145 }
146
147 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
148 {
149         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
150 }
151
152 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
153 {
154         return psw_extint_disabled(vcpu) &&
155                psw_ioint_disabled(vcpu) &&
156                psw_mchk_disabled(vcpu);
157 }
158
159 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
160 {
161         if (psw_extint_disabled(vcpu) ||
162             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
163                 return 0;
164         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
165                 /* No timer interrupts when single stepping */
166                 return 0;
167         return 1;
168 }
169
170 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
171 {
172         if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
173                 return 0;
174         return ckc_interrupts_enabled(vcpu);
175 }
176
177 static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
178 {
179         return !psw_extint_disabled(vcpu) &&
180                (vcpu->arch.sie_block->gcr[0] & 0x400ul);
181 }
182
183 static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
184 {
185         if (!cpu_timer_interrupts_enabled(vcpu))
186                 return 0;
187         return kvm_s390_get_cpu_timer(vcpu) >> 63;
188 }
189
190 static inline int is_ioirq(unsigned long irq_type)
191 {
192         return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
193                 (irq_type <= IRQ_PEND_IO_ISC_0));
194 }
195
196 static uint64_t isc_to_isc_bits(int isc)
197 {
198         return (0x80 >> isc) << 24;
199 }
200
201 static inline u32 isc_to_int_word(u8 isc)
202 {
203         return ((u32)isc << 27) | 0x80000000;
204 }
205
206 static inline u8 int_word_to_isc(u32 int_word)
207 {
208         return (int_word & 0x38000000) >> 27;
209 }
210
211 /*
212  * To use atomic bitmap functions, we have to provide a bitmap address
213  * that is u64 aligned. However, the ipm might be u32 aligned.
214  * Therefore, we logically start the bitmap at the very beginning of the
215  * struct and fixup the bit number.
216  */
217 #define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
218
219 static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
220 {
221         set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
222 }
223
224 static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
225 {
226         return READ_ONCE(gisa->ipm);
227 }
228
229 static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
230 {
231         clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
232 }
233
234 static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
235 {
236         return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
237 }
238
239 static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
240 {
241         return vcpu->kvm->arch.float_int.pending_irqs |
242                 vcpu->arch.local_int.pending_irqs |
243                 kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
244 }
245
246 static inline int isc_to_irq_type(unsigned long isc)
247 {
248         return IRQ_PEND_IO_ISC_0 - isc;
249 }
250
251 static inline int irq_type_to_isc(unsigned long irq_type)
252 {
253         return IRQ_PEND_IO_ISC_0 - irq_type;
254 }
255
256 static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
257                                    unsigned long active_mask)
258 {
259         int i;
260
261         for (i = 0; i <= MAX_ISC; i++)
262                 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
263                         active_mask &= ~(1UL << (isc_to_irq_type(i)));
264
265         return active_mask;
266 }
267
268 static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
269 {
270         unsigned long active_mask;
271
272         active_mask = pending_irqs(vcpu);
273         if (!active_mask)
274                 return 0;
275
276         if (psw_extint_disabled(vcpu))
277                 active_mask &= ~IRQ_PEND_EXT_MASK;
278         if (psw_ioint_disabled(vcpu))
279                 active_mask &= ~IRQ_PEND_IO_MASK;
280         else
281                 active_mask = disable_iscs(vcpu, active_mask);
282         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
283                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
284         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
285                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
286         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
287                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
288         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
289                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
290         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
291                 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
292         if (psw_mchk_disabled(vcpu))
293                 active_mask &= ~IRQ_PEND_MCHK_MASK;
294         /*
295          * Check both floating and local interrupt's cr14 because
296          * bit IRQ_PEND_MCHK_REP could be set in both cases.
297          */
298         if (!(vcpu->arch.sie_block->gcr[14] &
299            (vcpu->kvm->arch.float_int.mchk.cr14 |
300            vcpu->arch.local_int.irq.mchk.cr14)))
301                 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
302
303         /*
304          * STOP irqs will never be actively delivered. They are triggered via
305          * intercept requests and cleared when the stop intercept is performed.
306          */
307         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
308
309         return active_mask;
310 }
311
312 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
313 {
314         kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
315         set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
316 }
317
318 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
319 {
320         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
321         clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
322 }
323
324 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
325 {
326         kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
327                                       CPUSTAT_STOP_INT);
328         vcpu->arch.sie_block->lctl = 0x0000;
329         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
330
331         if (guestdbg_enabled(vcpu)) {
332                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
333                                                LCTL_CR10 | LCTL_CR11);
334                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
335         }
336 }
337
338 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
339 {
340         if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
341                 return;
342         else if (psw_ioint_disabled(vcpu))
343                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
344         else
345                 vcpu->arch.sie_block->lctl |= LCTL_CR6;
346 }
347
348 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
349 {
350         if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
351                 return;
352         if (psw_extint_disabled(vcpu))
353                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
354         else
355                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
356 }
357
358 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
359 {
360         if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
361                 return;
362         if (psw_mchk_disabled(vcpu))
363                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
364         else
365                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
366 }
367
368 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
369 {
370         if (kvm_s390_is_stop_irq_pending(vcpu))
371                 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
372 }
373
374 /* Set interception request for non-deliverable interrupts */
375 static void set_intercept_indicators(struct kvm_vcpu *vcpu)
376 {
377         set_intercept_indicators_io(vcpu);
378         set_intercept_indicators_ext(vcpu);
379         set_intercept_indicators_mchk(vcpu);
380         set_intercept_indicators_stop(vcpu);
381 }
382
383 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
384 {
385         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
386         int rc;
387
388         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
389                                          0, 0);
390
391         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
392                            (u16 *)__LC_EXT_INT_CODE);
393         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
394         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
395                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
396         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
397                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
398         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
399         return rc ? -EFAULT : 0;
400 }
401
402 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
403 {
404         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
405         int rc;
406
407         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
408                                          0, 0);
409
410         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
411                            (u16 __user *)__LC_EXT_INT_CODE);
412         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
413         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
414                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
415         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
416                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
417         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
418         return rc ? -EFAULT : 0;
419 }
420
421 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
422 {
423         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
424         struct kvm_s390_ext_info ext;
425         int rc;
426
427         spin_lock(&li->lock);
428         ext = li->irq.ext;
429         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
430         li->irq.ext.ext_params2 = 0;
431         spin_unlock(&li->lock);
432
433         VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
434                    ext.ext_params2);
435         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
436                                          KVM_S390_INT_PFAULT_INIT,
437                                          0, ext.ext_params2);
438
439         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
440         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
441         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
442                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
443         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
444                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
445         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
446         return rc ? -EFAULT : 0;
447 }
448
449 static int __write_machine_check(struct kvm_vcpu *vcpu,
450                                  struct kvm_s390_mchk_info *mchk)
451 {
452         unsigned long ext_sa_addr;
453         unsigned long lc;
454         freg_t fprs[NUM_FPRS];
455         union mci mci;
456         int rc;
457
458         mci.val = mchk->mcic;
459         /* take care of lazy register loading */
460         save_fpu_regs();
461         save_access_regs(vcpu->run->s.regs.acrs);
462         if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
463                 save_gs_cb(current->thread.gs_cb);
464
465         /* Extended save area */
466         rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
467                            sizeof(unsigned long));
468         /* Only bits 0 through 63-LC are used for address formation */
469         lc = ext_sa_addr & MCESA_LC_MASK;
470         if (test_kvm_facility(vcpu->kvm, 133)) {
471                 switch (lc) {
472                 case 0:
473                 case 10:
474                         ext_sa_addr &= ~0x3ffUL;
475                         break;
476                 case 11:
477                         ext_sa_addr &= ~0x7ffUL;
478                         break;
479                 case 12:
480                         ext_sa_addr &= ~0xfffUL;
481                         break;
482                 default:
483                         ext_sa_addr = 0;
484                         break;
485                 }
486         } else {
487                 ext_sa_addr &= ~0x3ffUL;
488         }
489
490         if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
491                 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
492                                     512))
493                         mci.vr = 0;
494         } else {
495                 mci.vr = 0;
496         }
497         if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
498             && (lc == 11 || lc == 12)) {
499                 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
500                                     &vcpu->run->s.regs.gscb, 32))
501                         mci.gs = 0;
502         } else {
503                 mci.gs = 0;
504         }
505
506         /* General interruption information */
507         rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
508         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
509                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
510         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
511                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
512         rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
513
514         /* Register-save areas */
515         if (MACHINE_HAS_VX) {
516                 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
517                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
518         } else {
519                 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
520                                      vcpu->run->s.regs.fprs, 128);
521         }
522         rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
523                              vcpu->run->s.regs.gprs, 128);
524         rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
525                            (u32 __user *) __LC_FP_CREG_SAVE_AREA);
526         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
527                            (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
528         rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
529                            (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
530         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
531                            (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
532         rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
533                              &vcpu->run->s.regs.acrs, 64);
534         rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
535                              &vcpu->arch.sie_block->gcr, 128);
536
537         /* Extended interruption information */
538         rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
539                            (u32 __user *) __LC_EXT_DAMAGE_CODE);
540         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
541                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
542         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
543                              sizeof(mchk->fixed_logout));
544         return rc ? -EFAULT : 0;
545 }
546
547 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
548 {
549         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
550         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
551         struct kvm_s390_mchk_info mchk = {};
552         int deliver = 0;
553         int rc = 0;
554
555         spin_lock(&fi->lock);
556         spin_lock(&li->lock);
557         if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
558             test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
559                 /*
560                  * If there was an exigent machine check pending, then any
561                  * repressible machine checks that might have been pending
562                  * are indicated along with it, so always clear bits for
563                  * repressible and exigent interrupts
564                  */
565                 mchk = li->irq.mchk;
566                 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
567                 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
568                 memset(&li->irq.mchk, 0, sizeof(mchk));
569                 deliver = 1;
570         }
571         /*
572          * We indicate floating repressible conditions along with
573          * other pending conditions. Channel Report Pending and Channel
574          * Subsystem damage are the only two and and are indicated by
575          * bits in mcic and masked in cr14.
576          */
577         if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
578                 mchk.mcic |= fi->mchk.mcic;
579                 mchk.cr14 |= fi->mchk.cr14;
580                 memset(&fi->mchk, 0, sizeof(mchk));
581                 deliver = 1;
582         }
583         spin_unlock(&li->lock);
584         spin_unlock(&fi->lock);
585
586         if (deliver) {
587                 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
588                            mchk.mcic);
589                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
590                                                  KVM_S390_MCHK,
591                                                  mchk.cr14, mchk.mcic);
592                 rc = __write_machine_check(vcpu, &mchk);
593         }
594         return rc;
595 }
596
597 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
598 {
599         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
600         int rc;
601
602         VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
603         vcpu->stat.deliver_restart_signal++;
604         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
605
606         rc  = write_guest_lc(vcpu,
607                              offsetof(struct lowcore, restart_old_psw),
608                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
609         rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
610                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
611         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
612         return rc ? -EFAULT : 0;
613 }
614
615 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
616 {
617         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
618         struct kvm_s390_prefix_info prefix;
619
620         spin_lock(&li->lock);
621         prefix = li->irq.prefix;
622         li->irq.prefix.address = 0;
623         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
624         spin_unlock(&li->lock);
625
626         vcpu->stat.deliver_prefix_signal++;
627         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
628                                          KVM_S390_SIGP_SET_PREFIX,
629                                          prefix.address, 0);
630
631         kvm_s390_set_prefix(vcpu, prefix.address);
632         return 0;
633 }
634
635 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
636 {
637         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
638         int rc;
639         int cpu_addr;
640
641         spin_lock(&li->lock);
642         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
643         clear_bit(cpu_addr, li->sigp_emerg_pending);
644         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
645                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
646         spin_unlock(&li->lock);
647
648         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
649         vcpu->stat.deliver_emergency_signal++;
650         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
651                                          cpu_addr, 0);
652
653         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
654                            (u16 *)__LC_EXT_INT_CODE);
655         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
656         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
657                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
658         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
659                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
660         return rc ? -EFAULT : 0;
661 }
662
663 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
664 {
665         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
666         struct kvm_s390_extcall_info extcall;
667         int rc;
668
669         spin_lock(&li->lock);
670         extcall = li->irq.extcall;
671         li->irq.extcall.code = 0;
672         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
673         spin_unlock(&li->lock);
674
675         VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
676         vcpu->stat.deliver_external_call++;
677         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
678                                          KVM_S390_INT_EXTERNAL_CALL,
679                                          extcall.code, 0);
680
681         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
682                            (u16 *)__LC_EXT_INT_CODE);
683         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
684         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
685                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
686         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
687                             sizeof(psw_t));
688         return rc ? -EFAULT : 0;
689 }
690
691 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
692 {
693         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
694         struct kvm_s390_pgm_info pgm_info;
695         int rc = 0, nullifying = false;
696         u16 ilen;
697
698         spin_lock(&li->lock);
699         pgm_info = li->irq.pgm;
700         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
701         memset(&li->irq.pgm, 0, sizeof(pgm_info));
702         spin_unlock(&li->lock);
703
704         ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
705         VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
706                    pgm_info.code, ilen);
707         vcpu->stat.deliver_program_int++;
708         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
709                                          pgm_info.code, 0);
710
711         switch (pgm_info.code & ~PGM_PER) {
712         case PGM_AFX_TRANSLATION:
713         case PGM_ASX_TRANSLATION:
714         case PGM_EX_TRANSLATION:
715         case PGM_LFX_TRANSLATION:
716         case PGM_LSTE_SEQUENCE:
717         case PGM_LSX_TRANSLATION:
718         case PGM_LX_TRANSLATION:
719         case PGM_PRIMARY_AUTHORITY:
720         case PGM_SECONDARY_AUTHORITY:
721                 nullifying = true;
722                 /* fall through */
723         case PGM_SPACE_SWITCH:
724                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
725                                   (u64 *)__LC_TRANS_EXC_CODE);
726                 break;
727         case PGM_ALEN_TRANSLATION:
728         case PGM_ALE_SEQUENCE:
729         case PGM_ASTE_INSTANCE:
730         case PGM_ASTE_SEQUENCE:
731         case PGM_ASTE_VALIDITY:
732         case PGM_EXTENDED_AUTHORITY:
733                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
734                                   (u8 *)__LC_EXC_ACCESS_ID);
735                 nullifying = true;
736                 break;
737         case PGM_ASCE_TYPE:
738         case PGM_PAGE_TRANSLATION:
739         case PGM_REGION_FIRST_TRANS:
740         case PGM_REGION_SECOND_TRANS:
741         case PGM_REGION_THIRD_TRANS:
742         case PGM_SEGMENT_TRANSLATION:
743                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
744                                   (u64 *)__LC_TRANS_EXC_CODE);
745                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
746                                    (u8 *)__LC_EXC_ACCESS_ID);
747                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
748                                    (u8 *)__LC_OP_ACCESS_ID);
749                 nullifying = true;
750                 break;
751         case PGM_MONITOR:
752                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
753                                   (u16 *)__LC_MON_CLASS_NR);
754                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
755                                    (u64 *)__LC_MON_CODE);
756                 break;
757         case PGM_VECTOR_PROCESSING:
758         case PGM_DATA:
759                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
760                                   (u32 *)__LC_DATA_EXC_CODE);
761                 break;
762         case PGM_PROTECTION:
763                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
764                                   (u64 *)__LC_TRANS_EXC_CODE);
765                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
766                                    (u8 *)__LC_EXC_ACCESS_ID);
767                 break;
768         case PGM_STACK_FULL:
769         case PGM_STACK_EMPTY:
770         case PGM_STACK_SPECIFICATION:
771         case PGM_STACK_TYPE:
772         case PGM_STACK_OPERATION:
773         case PGM_TRACE_TABEL:
774         case PGM_CRYPTO_OPERATION:
775                 nullifying = true;
776                 break;
777         }
778
779         if (pgm_info.code & PGM_PER) {
780                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
781                                    (u8 *) __LC_PER_CODE);
782                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
783                                    (u8 *)__LC_PER_ATMID);
784                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
785                                    (u64 *) __LC_PER_ADDRESS);
786                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
787                                    (u8 *) __LC_PER_ACCESS_ID);
788         }
789
790         if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
791                 kvm_s390_rewind_psw(vcpu, ilen);
792
793         /* bit 1+2 of the target are the ilc, so we can directly use ilen */
794         rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
795         rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
796                                  (u64 *) __LC_LAST_BREAK);
797         rc |= put_guest_lc(vcpu, pgm_info.code,
798                            (u16 *)__LC_PGM_INT_CODE);
799         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
800                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
801         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
802                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
803         return rc ? -EFAULT : 0;
804 }
805
806 static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
807 {
808         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
809         struct kvm_s390_ext_info ext;
810         int rc = 0;
811
812         spin_lock(&fi->lock);
813         if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
814                 spin_unlock(&fi->lock);
815                 return 0;
816         }
817         ext = fi->srv_signal;
818         memset(&fi->srv_signal, 0, sizeof(ext));
819         clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
820         spin_unlock(&fi->lock);
821
822         VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
823                    ext.ext_params);
824         vcpu->stat.deliver_service_signal++;
825         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
826                                          ext.ext_params, 0);
827
828         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
829         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
830         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
831                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
832         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
833                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
834         rc |= put_guest_lc(vcpu, ext.ext_params,
835                            (u32 *)__LC_EXT_PARAMS);
836
837         return rc ? -EFAULT : 0;
838 }
839
840 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
841 {
842         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
843         struct kvm_s390_interrupt_info *inti;
844         int rc = 0;
845
846         spin_lock(&fi->lock);
847         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
848                                         struct kvm_s390_interrupt_info,
849                                         list);
850         if (inti) {
851                 list_del(&inti->list);
852                 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
853         }
854         if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
855                 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
856         spin_unlock(&fi->lock);
857
858         if (inti) {
859                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
860                                                  KVM_S390_INT_PFAULT_DONE, 0,
861                                                  inti->ext.ext_params2);
862                 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
863                            inti->ext.ext_params2);
864
865                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
866                                 (u16 *)__LC_EXT_INT_CODE);
867                 rc |= put_guest_lc(vcpu, PFAULT_DONE,
868                                 (u16 *)__LC_EXT_CPU_ADDR);
869                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
870                                 &vcpu->arch.sie_block->gpsw,
871                                 sizeof(psw_t));
872                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
873                                 &vcpu->arch.sie_block->gpsw,
874                                 sizeof(psw_t));
875                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
876                                 (u64 *)__LC_EXT_PARAMS2);
877                 kfree(inti);
878         }
879         return rc ? -EFAULT : 0;
880 }
881
882 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
883 {
884         struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
885         struct kvm_s390_interrupt_info *inti;
886         int rc = 0;
887
888         spin_lock(&fi->lock);
889         inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
890                                         struct kvm_s390_interrupt_info,
891                                         list);
892         if (inti) {
893                 VCPU_EVENT(vcpu, 4,
894                            "deliver: virtio parm: 0x%x,parm64: 0x%llx",
895                            inti->ext.ext_params, inti->ext.ext_params2);
896                 vcpu->stat.deliver_virtio_interrupt++;
897                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
898                                 inti->type,
899                                 inti->ext.ext_params,
900                                 inti->ext.ext_params2);
901                 list_del(&inti->list);
902                 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
903         }
904         if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
905                 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
906         spin_unlock(&fi->lock);
907
908         if (inti) {
909                 rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
910                                 (u16 *)__LC_EXT_INT_CODE);
911                 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
912                                 (u16 *)__LC_EXT_CPU_ADDR);
913                 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
914                                 &vcpu->arch.sie_block->gpsw,
915                                 sizeof(psw_t));
916                 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
917                                 &vcpu->arch.sie_block->gpsw,
918                                 sizeof(psw_t));
919                 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
920                                 (u32 *)__LC_EXT_PARAMS);
921                 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
922                                 (u64 *)__LC_EXT_PARAMS2);
923                 kfree(inti);
924         }
925         return rc ? -EFAULT : 0;
926 }
927
928 static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
929 {
930         int rc;
931
932         rc  = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
933         rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
934         rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
935         rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
936         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
937                              &vcpu->arch.sie_block->gpsw,
938                              sizeof(psw_t));
939         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
940                             &vcpu->arch.sie_block->gpsw,
941                             sizeof(psw_t));
942         return rc ? -EFAULT : 0;
943 }
944
945 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
946                                      unsigned long irq_type)
947 {
948         struct list_head *isc_list;
949         struct kvm_s390_float_interrupt *fi;
950         struct kvm_s390_interrupt_info *inti = NULL;
951         struct kvm_s390_io_info io;
952         u32 isc;
953         int rc = 0;
954
955         fi = &vcpu->kvm->arch.float_int;
956
957         spin_lock(&fi->lock);
958         isc = irq_type_to_isc(irq_type);
959         isc_list = &fi->lists[isc];
960         inti = list_first_entry_or_null(isc_list,
961                                         struct kvm_s390_interrupt_info,
962                                         list);
963         if (inti) {
964                 if (inti->type & KVM_S390_INT_IO_AI_MASK)
965                         VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
966                 else
967                         VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
968                         inti->io.subchannel_id >> 8,
969                         inti->io.subchannel_id >> 1 & 0x3,
970                         inti->io.subchannel_nr);
971
972                 vcpu->stat.deliver_io_int++;
973                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
974                                 inti->type,
975                                 ((__u32)inti->io.subchannel_id << 16) |
976                                 inti->io.subchannel_nr,
977                                 ((__u64)inti->io.io_int_parm << 32) |
978                                 inti->io.io_int_word);
979                 list_del(&inti->list);
980                 fi->counters[FIRQ_CNTR_IO] -= 1;
981         }
982         if (list_empty(isc_list))
983                 clear_bit(irq_type, &fi->pending_irqs);
984         spin_unlock(&fi->lock);
985
986         if (inti) {
987                 rc = __do_deliver_io(vcpu, &(inti->io));
988                 kfree(inti);
989                 goto out;
990         }
991
992         if (vcpu->kvm->arch.gisa &&
993             kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
994                 /*
995                  * in case an adapter interrupt was not delivered
996                  * in SIE context KVM will handle the delivery
997                  */
998                 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
999                 memset(&io, 0, sizeof(io));
1000                 io.io_int_word = isc_to_int_word(isc);
1001                 vcpu->stat.deliver_io_int++;
1002                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1003                         KVM_S390_INT_IO(1, 0, 0, 0),
1004                         ((__u32)io.subchannel_id << 16) |
1005                         io.subchannel_nr,
1006                         ((__u64)io.io_int_parm << 32) |
1007                         io.io_int_word);
1008                 rc = __do_deliver_io(vcpu, &io);
1009         }
1010 out:
1011         return rc;
1012 }
1013
1014 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
1015
1016 static const deliver_irq_t deliver_irq_funcs[] = {
1017         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
1018         [IRQ_PEND_MCHK_REP]       = __deliver_machine_check,
1019         [IRQ_PEND_PROG]           = __deliver_prog,
1020         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
1021         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
1022         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
1023         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
1024         [IRQ_PEND_RESTART]        = __deliver_restart,
1025         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
1026         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
1027         [IRQ_PEND_EXT_SERVICE]    = __deliver_service,
1028         [IRQ_PEND_PFAULT_DONE]    = __deliver_pfault_done,
1029         [IRQ_PEND_VIRTIO]         = __deliver_virtio,
1030 };
1031
1032 /* Check whether an external call is pending (deliverable or not) */
1033 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
1034 {
1035         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1036
1037         if (!sclp.has_sigpif)
1038                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
1039
1040         return sca_ext_call_pending(vcpu, NULL);
1041 }
1042
1043 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
1044 {
1045         if (deliverable_irqs(vcpu))
1046                 return 1;
1047
1048         if (kvm_cpu_has_pending_timer(vcpu))
1049                 return 1;
1050
1051         /* external call pending and deliverable */
1052         if (kvm_s390_ext_call_pending(vcpu) &&
1053             !psw_extint_disabled(vcpu) &&
1054             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
1055                 return 1;
1056
1057         if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1058                 return 1;
1059         return 0;
1060 }
1061
1062 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1063 {
1064         return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
1065 }
1066
1067 static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1068 {
1069         u64 now, cputm, sltime = 0;
1070
1071         if (ckc_interrupts_enabled(vcpu)) {
1072                 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1073                 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
1074                 /* already expired or overflow? */
1075                 if (!sltime || vcpu->arch.sie_block->ckc <= now)
1076                         return 0;
1077                 if (cpu_timer_interrupts_enabled(vcpu)) {
1078                         cputm = kvm_s390_get_cpu_timer(vcpu);
1079                         /* already expired? */
1080                         if (cputm >> 63)
1081                                 return 0;
1082                         return min(sltime, tod_to_ns(cputm));
1083                 }
1084         } else if (cpu_timer_interrupts_enabled(vcpu)) {
1085                 sltime = kvm_s390_get_cpu_timer(vcpu);
1086                 /* already expired? */
1087                 if (sltime >> 63)
1088                         return 0;
1089         }
1090         return sltime;
1091 }
1092
1093 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1094 {
1095         u64 sltime;
1096
1097         vcpu->stat.exit_wait_state++;
1098
1099         /* fast path */
1100         if (kvm_arch_vcpu_runnable(vcpu))
1101                 return 0;
1102
1103         if (psw_interrupts_disabled(vcpu)) {
1104                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
1105                 return -EOPNOTSUPP; /* disabled wait */
1106         }
1107
1108         if (!ckc_interrupts_enabled(vcpu) &&
1109             !cpu_timer_interrupts_enabled(vcpu)) {
1110                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
1111                 __set_cpu_idle(vcpu);
1112                 goto no_timer;
1113         }
1114
1115         sltime = __calculate_sltime(vcpu);
1116         if (!sltime)
1117                 return 0;
1118
1119         __set_cpu_idle(vcpu);
1120         hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
1121         VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
1122 no_timer:
1123         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1124         kvm_vcpu_block(vcpu);
1125         __unset_cpu_idle(vcpu);
1126         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1127
1128         hrtimer_cancel(&vcpu->arch.ckc_timer);
1129         return 0;
1130 }
1131
1132 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1133 {
1134         /*
1135          * We cannot move this into the if, as the CPU might be already
1136          * in kvm_vcpu_block without having the waitqueue set (polling)
1137          */
1138         vcpu->valid_wakeup = true;
1139         /*
1140          * This is mostly to document, that the read in swait_active could
1141          * be moved before other stores, leading to subtle races.
1142          * All current users do not store or use an atomic like update
1143          */
1144         smp_mb__after_atomic();
1145         if (swait_active(&vcpu->wq)) {
1146                 /*
1147                  * The vcpu gave up the cpu voluntarily, mark it as a good
1148                  * yield-candidate.
1149                  */
1150                 vcpu->preempted = true;
1151                 swake_up(&vcpu->wq);
1152                 vcpu->stat.halt_wakeup++;
1153         }
1154         /*
1155          * The VCPU might not be sleeping but is executing the VSIE. Let's
1156          * kick it, so it leaves the SIE to process the request.
1157          */
1158         kvm_s390_vsie_kick(vcpu);
1159 }
1160
1161 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1162 {
1163         struct kvm_vcpu *vcpu;
1164         u64 sltime;
1165
1166         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
1167         sltime = __calculate_sltime(vcpu);
1168
1169         /*
1170          * If the monotonic clock runs faster than the tod clock we might be
1171          * woken up too early and have to go back to sleep to avoid deadlocks.
1172          */
1173         if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
1174                 return HRTIMER_RESTART;
1175         kvm_s390_vcpu_wakeup(vcpu);
1176         return HRTIMER_NORESTART;
1177 }
1178
1179 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1180 {
1181         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1182
1183         spin_lock(&li->lock);
1184         li->pending_irqs = 0;
1185         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1186         memset(&li->irq, 0, sizeof(li->irq));
1187         spin_unlock(&li->lock);
1188
1189         sca_clear_ext_call(vcpu);
1190 }
1191
1192 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
1193 {
1194         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1195         deliver_irq_t func;
1196         int rc = 0;
1197         unsigned long irq_type;
1198         unsigned long irqs;
1199
1200         __reset_intercept_indicators(vcpu);
1201
1202         /* pending ckc conditions might have been invalidated */
1203         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1204         if (ckc_irq_pending(vcpu))
1205                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1206
1207         /* pending cpu timer conditions might have been invalidated */
1208         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1209         if (cpu_timer_irq_pending(vcpu))
1210                 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1211
1212         while ((irqs = deliverable_irqs(vcpu)) && !rc) {
1213                 /* bits are in the reverse order of interrupt priority */
1214                 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
1215                 if (is_ioirq(irq_type)) {
1216                         rc = __deliver_io(vcpu, irq_type);
1217                 } else {
1218                         func = deliver_irq_funcs[irq_type];
1219                         if (!func) {
1220                                 WARN_ON_ONCE(func == NULL);
1221                                 clear_bit(irq_type, &li->pending_irqs);
1222                                 continue;
1223                         }
1224                         rc = func(vcpu);
1225                 }
1226         }
1227
1228         set_intercept_indicators(vcpu);
1229
1230         return rc;
1231 }
1232
1233 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1234 {
1235         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1236
1237         VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1238         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1239                                    irq->u.pgm.code, 0);
1240
1241         if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1242                 /* auto detection if no valid ILC was given */
1243                 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1244                 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1245                 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1246         }
1247
1248         if (irq->u.pgm.code == PGM_PER) {
1249                 li->irq.pgm.code |= PGM_PER;
1250                 li->irq.pgm.flags = irq->u.pgm.flags;
1251                 /* only modify PER related information */
1252                 li->irq.pgm.per_address = irq->u.pgm.per_address;
1253                 li->irq.pgm.per_code = irq->u.pgm.per_code;
1254                 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1255                 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1256         } else if (!(irq->u.pgm.code & PGM_PER)) {
1257                 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1258                                    irq->u.pgm.code;
1259                 li->irq.pgm.flags = irq->u.pgm.flags;
1260                 /* only modify non-PER information */
1261                 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1262                 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1263                 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1264                 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1265                 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1266                 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1267         } else {
1268                 li->irq.pgm = irq->u.pgm;
1269         }
1270         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
1271         return 0;
1272 }
1273
1274 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1275 {
1276         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1277
1278         VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1279                    irq->u.ext.ext_params2);
1280         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1281                                    irq->u.ext.ext_params,
1282                                    irq->u.ext.ext_params2);
1283
1284         li->irq.ext = irq->u.ext;
1285         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1286         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1287         return 0;
1288 }
1289
1290 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1291 {
1292         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1293         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1294         uint16_t src_id = irq->u.extcall.code;
1295
1296         VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
1297                    src_id);
1298         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1299                                    src_id, 0);
1300
1301         /* sending vcpu invalid */
1302         if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1303                 return -EINVAL;
1304
1305         if (sclp.has_sigpif)
1306                 return sca_inject_ext_call(vcpu, src_id);
1307
1308         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1309                 return -EBUSY;
1310         *extcall = irq->u.extcall;
1311         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1312         return 0;
1313 }
1314
1315 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1316 {
1317         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1318         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1319
1320         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
1321                    irq->u.prefix.address);
1322         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1323                                    irq->u.prefix.address, 0);
1324
1325         if (!is_vcpu_stopped(vcpu))
1326                 return -EBUSY;
1327
1328         *prefix = irq->u.prefix;
1329         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1330         return 0;
1331 }
1332
1333 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1334 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1335 {
1336         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1337         struct kvm_s390_stop_info *stop = &li->irq.stop;
1338         int rc = 0;
1339
1340         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
1341
1342         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1343                 return -EINVAL;
1344
1345         if (is_vcpu_stopped(vcpu)) {
1346                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1347                         rc = kvm_s390_store_status_unloaded(vcpu,
1348                                                 KVM_S390_STORE_STATUS_NOADDR);
1349                 return rc;
1350         }
1351
1352         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1353                 return -EBUSY;
1354         stop->flags = irq->u.stop.flags;
1355         kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
1356         return 0;
1357 }
1358
1359 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1360                                  struct kvm_s390_irq *irq)
1361 {
1362         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1363
1364         VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
1365         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
1366
1367         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1368         return 0;
1369 }
1370
1371 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1372                                    struct kvm_s390_irq *irq)
1373 {
1374         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1375
1376         VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
1377                    irq->u.emerg.code);
1378         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1379                                    irq->u.emerg.code, 0);
1380
1381         /* sending vcpu invalid */
1382         if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1383                 return -EINVAL;
1384
1385         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1386         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1387         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1388         return 0;
1389 }
1390
1391 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1392 {
1393         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1394         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1395
1396         VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
1397                    irq->u.mchk.mcic);
1398         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1399                                    irq->u.mchk.mcic);
1400
1401         /*
1402          * Because repressible machine checks can be indicated along with
1403          * exigent machine checks (PoP, Chapter 11, Interruption action)
1404          * we need to combine cr14, mcic and external damage code.
1405          * Failing storage address and the logout area should not be or'ed
1406          * together, we just indicate the last occurrence of the corresponding
1407          * machine check
1408          */
1409         mchk->cr14 |= irq->u.mchk.cr14;
1410         mchk->mcic |= irq->u.mchk.mcic;
1411         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1412         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1413         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1414                sizeof(mchk->fixed_logout));
1415         if (mchk->mcic & MCHK_EX_MASK)
1416                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1417         else if (mchk->mcic & MCHK_REP_MASK)
1418                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1419         return 0;
1420 }
1421
1422 static int __inject_ckc(struct kvm_vcpu *vcpu)
1423 {
1424         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1425
1426         VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
1427         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1428                                    0, 0);
1429
1430         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1431         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1432         return 0;
1433 }
1434
1435 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1436 {
1437         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1438
1439         VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
1440         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1441                                    0, 0);
1442
1443         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1444         kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
1445         return 0;
1446 }
1447
1448 static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1449                                                   int isc, u32 schid)
1450 {
1451         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1452         struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1453         struct kvm_s390_interrupt_info *iter;
1454         u16 id = (schid & 0xffff0000U) >> 16;
1455         u16 nr = schid & 0x0000ffffU;
1456
1457         spin_lock(&fi->lock);
1458         list_for_each_entry(iter, isc_list, list) {
1459                 if (schid && (id != iter->io.subchannel_id ||
1460                               nr != iter->io.subchannel_nr))
1461                         continue;
1462                 /* found an appropriate entry */
1463                 list_del_init(&iter->list);
1464                 fi->counters[FIRQ_CNTR_IO] -= 1;
1465                 if (list_empty(isc_list))
1466                         clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1467                 spin_unlock(&fi->lock);
1468                 return iter;
1469         }
1470         spin_unlock(&fi->lock);
1471         return NULL;
1472 }
1473
1474 static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1475                                                       u64 isc_mask, u32 schid)
1476 {
1477         struct kvm_s390_interrupt_info *inti = NULL;
1478         int isc;
1479
1480         for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1481                 if (isc_mask & isc_to_isc_bits(isc))
1482                         inti = get_io_int(kvm, isc, schid);
1483         }
1484         return inti;
1485 }
1486
1487 static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1488 {
1489         unsigned long active_mask;
1490         int isc;
1491
1492         if (schid)
1493                 goto out;
1494         if (!kvm->arch.gisa)
1495                 goto out;
1496
1497         active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32;
1498         while (active_mask) {
1499                 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
1500                 if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc))
1501                         return isc;
1502                 clear_bit_inv(isc, &active_mask);
1503         }
1504 out:
1505         return -EINVAL;
1506 }
1507
1508 /*
1509  * Dequeue and return an I/O interrupt matching any of the interruption
1510  * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1511  * Take into account the interrupts pending in the interrupt list and in GISA.
1512  *
1513  * Note that for a guest that does not enable I/O interrupts
1514  * but relies on TPI, a flood of classic interrupts may starve
1515  * out adapter interrupts on the same isc. Linux does not do
1516  * that, and it is possible to work around the issue by configuring
1517  * different iscs for classic and adapter interrupts in the guest,
1518  * but we may want to revisit this in the future.
1519  */
1520 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1521                                                     u64 isc_mask, u32 schid)
1522 {
1523         struct kvm_s390_interrupt_info *inti, *tmp_inti;
1524         int isc;
1525
1526         inti = get_top_io_int(kvm, isc_mask, schid);
1527
1528         isc = get_top_gisa_isc(kvm, isc_mask, schid);
1529         if (isc < 0)
1530                 /* no AI in GISA */
1531                 goto out;
1532
1533         if (!inti)
1534                 /* AI in GISA but no classical IO int */
1535                 goto gisa_out;
1536
1537         /* both types of interrupts present */
1538         if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1539                 /* classical IO int with higher priority */
1540                 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1541                 goto out;
1542         }
1543 gisa_out:
1544         tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1545         if (tmp_inti) {
1546                 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1547                 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1548                 if (inti)
1549                         kvm_s390_reinject_io_int(kvm, inti);
1550                 inti = tmp_inti;
1551         } else
1552                 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1553 out:
1554         return inti;
1555 }
1556
1557 #define SCCB_MASK 0xFFFFFFF8
1558 #define SCCB_EVENT_PENDING 0x3
1559
1560 static int __inject_service(struct kvm *kvm,
1561                              struct kvm_s390_interrupt_info *inti)
1562 {
1563         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1564
1565         spin_lock(&fi->lock);
1566         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1567         /*
1568          * Early versions of the QEMU s390 bios will inject several
1569          * service interrupts after another without handling a
1570          * condition code indicating busy.
1571          * We will silently ignore those superfluous sccb values.
1572          * A future version of QEMU will take care of serialization
1573          * of servc requests
1574          */
1575         if (fi->srv_signal.ext_params & SCCB_MASK)
1576                 goto out;
1577         fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1578         set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1579 out:
1580         spin_unlock(&fi->lock);
1581         kfree(inti);
1582         return 0;
1583 }
1584
1585 static int __inject_virtio(struct kvm *kvm,
1586                             struct kvm_s390_interrupt_info *inti)
1587 {
1588         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1589
1590         spin_lock(&fi->lock);
1591         if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1592                 spin_unlock(&fi->lock);
1593                 return -EBUSY;
1594         }
1595         fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1596         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1597         set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1598         spin_unlock(&fi->lock);
1599         return 0;
1600 }
1601
1602 static int __inject_pfault_done(struct kvm *kvm,
1603                                  struct kvm_s390_interrupt_info *inti)
1604 {
1605         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1606
1607         spin_lock(&fi->lock);
1608         if (fi->counters[FIRQ_CNTR_PFAULT] >=
1609                 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1610                 spin_unlock(&fi->lock);
1611                 return -EBUSY;
1612         }
1613         fi->counters[FIRQ_CNTR_PFAULT] += 1;
1614         list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1615         set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1616         spin_unlock(&fi->lock);
1617         return 0;
1618 }
1619
1620 #define CR_PENDING_SUBCLASS 28
1621 static int __inject_float_mchk(struct kvm *kvm,
1622                                 struct kvm_s390_interrupt_info *inti)
1623 {
1624         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1625
1626         spin_lock(&fi->lock);
1627         fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1628         fi->mchk.mcic |= inti->mchk.mcic;
1629         set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1630         spin_unlock(&fi->lock);
1631         kfree(inti);
1632         return 0;
1633 }
1634
1635 static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1636 {
1637         struct kvm_s390_float_interrupt *fi;
1638         struct list_head *list;
1639         int isc;
1640
1641         isc = int_word_to_isc(inti->io.io_int_word);
1642
1643         if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
1644                 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
1645                 kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
1646                 kfree(inti);
1647                 return 0;
1648         }
1649
1650         fi = &kvm->arch.float_int;
1651         spin_lock(&fi->lock);
1652         if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1653                 spin_unlock(&fi->lock);
1654                 return -EBUSY;
1655         }
1656         fi->counters[FIRQ_CNTR_IO] += 1;
1657
1658         if (inti->type & KVM_S390_INT_IO_AI_MASK)
1659                 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1660         else
1661                 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1662                         inti->io.subchannel_id >> 8,
1663                         inti->io.subchannel_id >> 1 & 0x3,
1664                         inti->io.subchannel_nr);
1665         list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1666         list_add_tail(&inti->list, list);
1667         set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
1668         spin_unlock(&fi->lock);
1669         return 0;
1670 }
1671
1672 /*
1673  * Find a destination VCPU for a floating irq and kick it.
1674  */
1675 static void __floating_irq_kick(struct kvm *kvm, u64 type)
1676 {
1677         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1678         struct kvm_vcpu *dst_vcpu;
1679         int sigcpu, online_vcpus, nr_tries = 0;
1680
1681         online_vcpus = atomic_read(&kvm->online_vcpus);
1682         if (!online_vcpus)
1683                 return;
1684
1685         /* find idle VCPUs first, then round robin */
1686         sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1687         if (sigcpu == online_vcpus) {
1688                 do {
1689                         sigcpu = fi->next_rr_cpu;
1690                         fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1691                         /* avoid endless loops if all vcpus are stopped */
1692                         if (nr_tries++ >= online_vcpus)
1693                                 return;
1694                 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1695         }
1696         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1697
1698         /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1699         switch (type) {
1700         case KVM_S390_MCHK:
1701                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
1702                 break;
1703         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1704                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
1705                 break;
1706         default:
1707                 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
1708                 break;
1709         }
1710         kvm_s390_vcpu_wakeup(dst_vcpu);
1711 }
1712
1713 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1714 {
1715         u64 type = READ_ONCE(inti->type);
1716         int rc;
1717
1718         switch (type) {
1719         case KVM_S390_MCHK:
1720                 rc = __inject_float_mchk(kvm, inti);
1721                 break;
1722         case KVM_S390_INT_VIRTIO:
1723                 rc = __inject_virtio(kvm, inti);
1724                 break;
1725         case KVM_S390_INT_SERVICE:
1726                 rc = __inject_service(kvm, inti);
1727                 break;
1728         case KVM_S390_INT_PFAULT_DONE:
1729                 rc = __inject_pfault_done(kvm, inti);
1730                 break;
1731         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1732                 rc = __inject_io(kvm, inti);
1733                 break;
1734         default:
1735                 rc = -EINVAL;
1736         }
1737         if (rc)
1738                 return rc;
1739
1740         __floating_irq_kick(kvm, type);
1741         return 0;
1742 }
1743
1744 int kvm_s390_inject_vm(struct kvm *kvm,
1745                        struct kvm_s390_interrupt *s390int)
1746 {
1747         struct kvm_s390_interrupt_info *inti;
1748         int rc;
1749
1750         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1751         if (!inti)
1752                 return -ENOMEM;
1753
1754         inti->type = s390int->type;
1755         switch (inti->type) {
1756         case KVM_S390_INT_VIRTIO:
1757                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1758                          s390int->parm, s390int->parm64);
1759                 inti->ext.ext_params = s390int->parm;
1760                 inti->ext.ext_params2 = s390int->parm64;
1761                 break;
1762         case KVM_S390_INT_SERVICE:
1763                 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
1764                 inti->ext.ext_params = s390int->parm;
1765                 break;
1766         case KVM_S390_INT_PFAULT_DONE:
1767                 inti->ext.ext_params2 = s390int->parm64;
1768                 break;
1769         case KVM_S390_MCHK:
1770                 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
1771                          s390int->parm64);
1772                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1773                 inti->mchk.mcic = s390int->parm64;
1774                 break;
1775         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1776                 inti->io.subchannel_id = s390int->parm >> 16;
1777                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1778                 inti->io.io_int_parm = s390int->parm64 >> 32;
1779                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1780                 break;
1781         default:
1782                 kfree(inti);
1783                 return -EINVAL;
1784         }
1785         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1786                                  2);
1787
1788         rc = __inject_vm(kvm, inti);
1789         if (rc)
1790                 kfree(inti);
1791         return rc;
1792 }
1793
1794 int kvm_s390_reinject_io_int(struct kvm *kvm,
1795                               struct kvm_s390_interrupt_info *inti)
1796 {
1797         return __inject_vm(kvm, inti);
1798 }
1799
1800 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1801                        struct kvm_s390_irq *irq)
1802 {
1803         irq->type = s390int->type;
1804         switch (irq->type) {
1805         case KVM_S390_PROGRAM_INT:
1806                 if (s390int->parm & 0xffff0000)
1807                         return -EINVAL;
1808                 irq->u.pgm.code = s390int->parm;
1809                 break;
1810         case KVM_S390_SIGP_SET_PREFIX:
1811                 irq->u.prefix.address = s390int->parm;
1812                 break;
1813         case KVM_S390_SIGP_STOP:
1814                 irq->u.stop.flags = s390int->parm;
1815                 break;
1816         case KVM_S390_INT_EXTERNAL_CALL:
1817                 if (s390int->parm & 0xffff0000)
1818                         return -EINVAL;
1819                 irq->u.extcall.code = s390int->parm;
1820                 break;
1821         case KVM_S390_INT_EMERGENCY:
1822                 if (s390int->parm & 0xffff0000)
1823                         return -EINVAL;
1824                 irq->u.emerg.code = s390int->parm;
1825                 break;
1826         case KVM_S390_MCHK:
1827                 irq->u.mchk.mcic = s390int->parm64;
1828                 break;
1829         }
1830         return 0;
1831 }
1832
1833 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1834 {
1835         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1836
1837         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1838 }
1839
1840 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1841 {
1842         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1843
1844         spin_lock(&li->lock);
1845         li->irq.stop.flags = 0;
1846         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1847         spin_unlock(&li->lock);
1848 }
1849
1850 static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1851 {
1852         int rc;
1853
1854         switch (irq->type) {
1855         case KVM_S390_PROGRAM_INT:
1856                 rc = __inject_prog(vcpu, irq);
1857                 break;
1858         case KVM_S390_SIGP_SET_PREFIX:
1859                 rc = __inject_set_prefix(vcpu, irq);
1860                 break;
1861         case KVM_S390_SIGP_STOP:
1862                 rc = __inject_sigp_stop(vcpu, irq);
1863                 break;
1864         case KVM_S390_RESTART:
1865                 rc = __inject_sigp_restart(vcpu, irq);
1866                 break;
1867         case KVM_S390_INT_CLOCK_COMP:
1868                 rc = __inject_ckc(vcpu);
1869                 break;
1870         case KVM_S390_INT_CPU_TIMER:
1871                 rc = __inject_cpu_timer(vcpu);
1872                 break;
1873         case KVM_S390_INT_EXTERNAL_CALL:
1874                 rc = __inject_extcall(vcpu, irq);
1875                 break;
1876         case KVM_S390_INT_EMERGENCY:
1877                 rc = __inject_sigp_emergency(vcpu, irq);
1878                 break;
1879         case KVM_S390_MCHK:
1880                 rc = __inject_mchk(vcpu, irq);
1881                 break;
1882         case KVM_S390_INT_PFAULT_INIT:
1883                 rc = __inject_pfault_init(vcpu, irq);
1884                 break;
1885         case KVM_S390_INT_VIRTIO:
1886         case KVM_S390_INT_SERVICE:
1887         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1888         default:
1889                 rc = -EINVAL;
1890         }
1891
1892         return rc;
1893 }
1894
1895 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1896 {
1897         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1898         int rc;
1899
1900         spin_lock(&li->lock);
1901         rc = do_inject_vcpu(vcpu, irq);
1902         spin_unlock(&li->lock);
1903         if (!rc)
1904                 kvm_s390_vcpu_wakeup(vcpu);
1905         return rc;
1906 }
1907
1908 static inline void clear_irq_list(struct list_head *_list)
1909 {
1910         struct kvm_s390_interrupt_info *inti, *n;
1911
1912         list_for_each_entry_safe(inti, n, _list, list) {
1913                 list_del(&inti->list);
1914                 kfree(inti);
1915         }
1916 }
1917
1918 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1919                        struct kvm_s390_irq *irq)
1920 {
1921         irq->type = inti->type;
1922         switch (inti->type) {
1923         case KVM_S390_INT_PFAULT_INIT:
1924         case KVM_S390_INT_PFAULT_DONE:
1925         case KVM_S390_INT_VIRTIO:
1926                 irq->u.ext = inti->ext;
1927                 break;
1928         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1929                 irq->u.io = inti->io;
1930                 break;
1931         }
1932 }
1933
1934 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1935 {
1936         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1937         int i;
1938
1939         spin_lock(&fi->lock);
1940         fi->pending_irqs = 0;
1941         memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1942         memset(&fi->mchk, 0, sizeof(fi->mchk));
1943         for (i = 0; i < FIRQ_LIST_COUNT; i++)
1944                 clear_irq_list(&fi->lists[i]);
1945         for (i = 0; i < FIRQ_MAX_COUNT; i++)
1946                 fi->counters[i] = 0;
1947         spin_unlock(&fi->lock);
1948         kvm_s390_gisa_clear(kvm);
1949 };
1950
1951 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
1952 {
1953         struct kvm_s390_interrupt_info *inti;
1954         struct kvm_s390_float_interrupt *fi;
1955         struct kvm_s390_irq *buf;
1956         struct kvm_s390_irq *irq;
1957         int max_irqs;
1958         int ret = 0;
1959         int n = 0;
1960         int i;
1961
1962         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1963                 return -EINVAL;
1964
1965         /*
1966          * We are already using -ENOMEM to signal
1967          * userspace it may retry with a bigger buffer,
1968          * so we need to use something else for this case
1969          */
1970         buf = vzalloc(len);
1971         if (!buf)
1972                 return -ENOBUFS;
1973
1974         max_irqs = len / sizeof(struct kvm_s390_irq);
1975
1976         if (kvm->arch.gisa &&
1977             kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
1978                 for (i = 0; i <= MAX_ISC; i++) {
1979                         if (n == max_irqs) {
1980                                 /* signal userspace to try again */
1981                                 ret = -ENOMEM;
1982                                 goto out_nolock;
1983                         }
1984                         if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) {
1985                                 irq = (struct kvm_s390_irq *) &buf[n];
1986                                 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
1987                                 irq->u.io.io_int_word = isc_to_int_word(i);
1988                                 n++;
1989                         }
1990                 }
1991         }
1992         fi = &kvm->arch.float_int;
1993         spin_lock(&fi->lock);
1994         for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1995                 list_for_each_entry(inti, &fi->lists[i], list) {
1996                         if (n == max_irqs) {
1997                                 /* signal userspace to try again */
1998                                 ret = -ENOMEM;
1999                                 goto out;
2000                         }
2001                         inti_to_irq(inti, &buf[n]);
2002                         n++;
2003                 }
2004         }
2005         if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
2006                 if (n == max_irqs) {
2007                         /* signal userspace to try again */
2008                         ret = -ENOMEM;
2009                         goto out;
2010                 }
2011                 irq = (struct kvm_s390_irq *) &buf[n];
2012                 irq->type = KVM_S390_INT_SERVICE;
2013                 irq->u.ext = fi->srv_signal;
2014                 n++;
2015         }
2016         if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2017                 if (n == max_irqs) {
2018                                 /* signal userspace to try again */
2019                                 ret = -ENOMEM;
2020                                 goto out;
2021                 }
2022                 irq = (struct kvm_s390_irq *) &buf[n];
2023                 irq->type = KVM_S390_MCHK;
2024                 irq->u.mchk = fi->mchk;
2025                 n++;
2026 }
2027
2028 out:
2029         spin_unlock(&fi->lock);
2030 out_nolock:
2031         if (!ret && n > 0) {
2032                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2033                         ret = -EFAULT;
2034         }
2035         vfree(buf);
2036
2037         return ret < 0 ? ret : n;
2038 }
2039
2040 static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2041 {
2042         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2043         struct kvm_s390_ais_all ais;
2044
2045         if (attr->attr < sizeof(ais))
2046                 return -EINVAL;
2047
2048         if (!test_kvm_facility(kvm, 72))
2049                 return -ENOTSUPP;
2050
2051         mutex_lock(&fi->ais_lock);
2052         ais.simm = fi->simm;
2053         ais.nimm = fi->nimm;
2054         mutex_unlock(&fi->ais_lock);
2055
2056         if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2057                 return -EFAULT;
2058
2059         return 0;
2060 }
2061
2062 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2063 {
2064         int r;
2065
2066         switch (attr->group) {
2067         case KVM_DEV_FLIC_GET_ALL_IRQS:
2068                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2069                                           attr->attr);
2070                 break;
2071         case KVM_DEV_FLIC_AISM_ALL:
2072                 r = flic_ais_mode_get_all(dev->kvm, attr);
2073                 break;
2074         default:
2075                 r = -EINVAL;
2076         }
2077
2078         return r;
2079 }
2080
2081 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2082                                      u64 addr)
2083 {
2084         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2085         void *target = NULL;
2086         void __user *source;
2087         u64 size;
2088
2089         if (get_user(inti->type, (u64 __user *)addr))
2090                 return -EFAULT;
2091
2092         switch (inti->type) {
2093         case KVM_S390_INT_PFAULT_INIT:
2094         case KVM_S390_INT_PFAULT_DONE:
2095         case KVM_S390_INT_VIRTIO:
2096         case KVM_S390_INT_SERVICE:
2097                 target = (void *) &inti->ext;
2098                 source = &uptr->u.ext;
2099                 size = sizeof(inti->ext);
2100                 break;
2101         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2102                 target = (void *) &inti->io;
2103                 source = &uptr->u.io;
2104                 size = sizeof(inti->io);
2105                 break;
2106         case KVM_S390_MCHK:
2107                 target = (void *) &inti->mchk;
2108                 source = &uptr->u.mchk;
2109                 size = sizeof(inti->mchk);
2110                 break;
2111         default:
2112                 return -EINVAL;
2113         }
2114
2115         if (copy_from_user(target, source, size))
2116                 return -EFAULT;
2117
2118         return 0;
2119 }
2120
2121 static int enqueue_floating_irq(struct kvm_device *dev,
2122                                 struct kvm_device_attr *attr)
2123 {
2124         struct kvm_s390_interrupt_info *inti = NULL;
2125         int r = 0;
2126         int len = attr->attr;
2127
2128         if (len % sizeof(struct kvm_s390_irq) != 0)
2129                 return -EINVAL;
2130         else if (len > KVM_S390_FLIC_MAX_BUFFER)
2131                 return -EINVAL;
2132
2133         while (len >= sizeof(struct kvm_s390_irq)) {
2134                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2135                 if (!inti)
2136                         return -ENOMEM;
2137
2138                 r = copy_irq_from_user(inti, attr->addr);
2139                 if (r) {
2140                         kfree(inti);
2141                         return r;
2142                 }
2143                 r = __inject_vm(dev->kvm, inti);
2144                 if (r) {
2145                         kfree(inti);
2146                         return r;
2147                 }
2148                 len -= sizeof(struct kvm_s390_irq);
2149                 attr->addr += sizeof(struct kvm_s390_irq);
2150         }
2151
2152         return r;
2153 }
2154
2155 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2156 {
2157         if (id >= MAX_S390_IO_ADAPTERS)
2158                 return NULL;
2159         return kvm->arch.adapters[id];
2160 }
2161
2162 static int register_io_adapter(struct kvm_device *dev,
2163                                struct kvm_device_attr *attr)
2164 {
2165         struct s390_io_adapter *adapter;
2166         struct kvm_s390_io_adapter adapter_info;
2167
2168         if (copy_from_user(&adapter_info,
2169                            (void __user *)attr->addr, sizeof(adapter_info)))
2170                 return -EFAULT;
2171
2172         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
2173             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
2174                 return -EINVAL;
2175
2176         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2177         if (!adapter)
2178                 return -ENOMEM;
2179
2180         INIT_LIST_HEAD(&adapter->maps);
2181         init_rwsem(&adapter->maps_lock);
2182         atomic_set(&adapter->nr_maps, 0);
2183         adapter->id = adapter_info.id;
2184         adapter->isc = adapter_info.isc;
2185         adapter->maskable = adapter_info.maskable;
2186         adapter->masked = false;
2187         adapter->swap = adapter_info.swap;
2188         adapter->suppressible = (adapter_info.flags) &
2189                                 KVM_S390_ADAPTER_SUPPRESSIBLE;
2190         dev->kvm->arch.adapters[adapter->id] = adapter;
2191
2192         return 0;
2193 }
2194
2195 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2196 {
2197         int ret;
2198         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2199
2200         if (!adapter || !adapter->maskable)
2201                 return -EINVAL;
2202         ret = adapter->masked;
2203         adapter->masked = masked;
2204         return ret;
2205 }
2206
2207 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2208 {
2209         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2210         struct s390_map_info *map;
2211         int ret;
2212
2213         if (!adapter || !addr)
2214                 return -EINVAL;
2215
2216         map = kzalloc(sizeof(*map), GFP_KERNEL);
2217         if (!map) {
2218                 ret = -ENOMEM;
2219                 goto out;
2220         }
2221         INIT_LIST_HEAD(&map->list);
2222         map->guest_addr = addr;
2223         map->addr = gmap_translate(kvm->arch.gmap, addr);
2224         if (map->addr == -EFAULT) {
2225                 ret = -EFAULT;
2226                 goto out;
2227         }
2228         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2229         if (ret < 0)
2230                 goto out;
2231         BUG_ON(ret != 1);
2232         down_write(&adapter->maps_lock);
2233         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2234                 list_add_tail(&map->list, &adapter->maps);
2235                 ret = 0;
2236         } else {
2237                 put_page(map->page);
2238                 ret = -EINVAL;
2239         }
2240         up_write(&adapter->maps_lock);
2241 out:
2242         if (ret)
2243                 kfree(map);
2244         return ret;
2245 }
2246
2247 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2248 {
2249         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2250         struct s390_map_info *map, *tmp;
2251         int found = 0;
2252
2253         if (!adapter || !addr)
2254                 return -EINVAL;
2255
2256         down_write(&adapter->maps_lock);
2257         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2258                 if (map->guest_addr == addr) {
2259                         found = 1;
2260                         atomic_dec(&adapter->nr_maps);
2261                         list_del(&map->list);
2262                         put_page(map->page);
2263                         kfree(map);
2264                         break;
2265                 }
2266         }
2267         up_write(&adapter->maps_lock);
2268
2269         return found ? 0 : -EINVAL;
2270 }
2271
2272 void kvm_s390_destroy_adapters(struct kvm *kvm)
2273 {
2274         int i;
2275         struct s390_map_info *map, *tmp;
2276
2277         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2278                 if (!kvm->arch.adapters[i])
2279                         continue;
2280                 list_for_each_entry_safe(map, tmp,
2281                                          &kvm->arch.adapters[i]->maps, list) {
2282                         list_del(&map->list);
2283                         put_page(map->page);
2284                         kfree(map);
2285                 }
2286                 kfree(kvm->arch.adapters[i]);
2287         }
2288 }
2289
2290 static int modify_io_adapter(struct kvm_device *dev,
2291                              struct kvm_device_attr *attr)
2292 {
2293         struct kvm_s390_io_adapter_req req;
2294         struct s390_io_adapter *adapter;
2295         int ret;
2296
2297         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2298                 return -EFAULT;
2299
2300         adapter = get_io_adapter(dev->kvm, req.id);
2301         if (!adapter)
2302                 return -EINVAL;
2303         switch (req.type) {
2304         case KVM_S390_IO_ADAPTER_MASK:
2305                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2306                 if (ret > 0)
2307                         ret = 0;
2308                 break;
2309         case KVM_S390_IO_ADAPTER_MAP:
2310                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2311                 break;
2312         case KVM_S390_IO_ADAPTER_UNMAP:
2313                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2314                 break;
2315         default:
2316                 ret = -EINVAL;
2317         }
2318
2319         return ret;
2320 }
2321
2322 static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2323
2324 {
2325         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2326         u32 schid;
2327
2328         if (attr->flags)
2329                 return -EINVAL;
2330         if (attr->attr != sizeof(schid))
2331                 return -EINVAL;
2332         if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2333                 return -EFAULT;
2334         if (!schid)
2335                 return -EINVAL;
2336         kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2337         /*
2338          * If userspace is conforming to the architecture, we can have at most
2339          * one pending I/O interrupt per subchannel, so this is effectively a
2340          * clear all.
2341          */
2342         return 0;
2343 }
2344
2345 static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2346 {
2347         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2348         struct kvm_s390_ais_req req;
2349         int ret = 0;
2350
2351         if (!test_kvm_facility(kvm, 72))
2352                 return -ENOTSUPP;
2353
2354         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2355                 return -EFAULT;
2356
2357         if (req.isc > MAX_ISC)
2358                 return -EINVAL;
2359
2360         trace_kvm_s390_modify_ais_mode(req.isc,
2361                                        (fi->simm & AIS_MODE_MASK(req.isc)) ?
2362                                        (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2363                                        2 : KVM_S390_AIS_MODE_SINGLE :
2364                                        KVM_S390_AIS_MODE_ALL, req.mode);
2365
2366         mutex_lock(&fi->ais_lock);
2367         switch (req.mode) {
2368         case KVM_S390_AIS_MODE_ALL:
2369                 fi->simm &= ~AIS_MODE_MASK(req.isc);
2370                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2371                 break;
2372         case KVM_S390_AIS_MODE_SINGLE:
2373                 fi->simm |= AIS_MODE_MASK(req.isc);
2374                 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2375                 break;
2376         default:
2377                 ret = -EINVAL;
2378         }
2379         mutex_unlock(&fi->ais_lock);
2380
2381         return ret;
2382 }
2383
2384 static int kvm_s390_inject_airq(struct kvm *kvm,
2385                                 struct s390_io_adapter *adapter)
2386 {
2387         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2388         struct kvm_s390_interrupt s390int = {
2389                 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2390                 .parm = 0,
2391                 .parm64 = isc_to_int_word(adapter->isc),
2392         };
2393         int ret = 0;
2394
2395         if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
2396                 return kvm_s390_inject_vm(kvm, &s390int);
2397
2398         mutex_lock(&fi->ais_lock);
2399         if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2400                 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2401                 goto out;
2402         }
2403
2404         ret = kvm_s390_inject_vm(kvm, &s390int);
2405         if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2406                 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2407                 trace_kvm_s390_modify_ais_mode(adapter->isc,
2408                                                KVM_S390_AIS_MODE_SINGLE, 2);
2409         }
2410 out:
2411         mutex_unlock(&fi->ais_lock);
2412         return ret;
2413 }
2414
2415 static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2416 {
2417         unsigned int id = attr->attr;
2418         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2419
2420         if (!adapter)
2421                 return -EINVAL;
2422
2423         return kvm_s390_inject_airq(kvm, adapter);
2424 }
2425
2426 static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2427 {
2428         struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2429         struct kvm_s390_ais_all ais;
2430
2431         if (!test_kvm_facility(kvm, 72))
2432                 return -ENOTSUPP;
2433
2434         if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2435                 return -EFAULT;
2436
2437         mutex_lock(&fi->ais_lock);
2438         fi->simm = ais.simm;
2439         fi->nimm = ais.nimm;
2440         mutex_unlock(&fi->ais_lock);
2441
2442         return 0;
2443 }
2444
2445 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2446 {
2447         int r = 0;
2448         unsigned int i;
2449         struct kvm_vcpu *vcpu;
2450
2451         switch (attr->group) {
2452         case KVM_DEV_FLIC_ENQUEUE:
2453                 r = enqueue_floating_irq(dev, attr);
2454                 break;
2455         case KVM_DEV_FLIC_CLEAR_IRQS:
2456                 kvm_s390_clear_float_irqs(dev->kvm);
2457                 break;
2458         case KVM_DEV_FLIC_APF_ENABLE:
2459                 dev->kvm->arch.gmap->pfault_enabled = 1;
2460                 break;
2461         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2462                 dev->kvm->arch.gmap->pfault_enabled = 0;
2463                 /*
2464                  * Make sure no async faults are in transition when
2465                  * clearing the queues. So we don't need to worry
2466                  * about late coming workers.
2467                  */
2468                 synchronize_srcu(&dev->kvm->srcu);
2469                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2470                         kvm_clear_async_pf_completion_queue(vcpu);
2471                 break;
2472         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2473                 r = register_io_adapter(dev, attr);
2474                 break;
2475         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2476                 r = modify_io_adapter(dev, attr);
2477                 break;
2478         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2479                 r = clear_io_irq(dev->kvm, attr);
2480                 break;
2481         case KVM_DEV_FLIC_AISM:
2482                 r = modify_ais_mode(dev->kvm, attr);
2483                 break;
2484         case KVM_DEV_FLIC_AIRQ_INJECT:
2485                 r = flic_inject_airq(dev->kvm, attr);
2486                 break;
2487         case KVM_DEV_FLIC_AISM_ALL:
2488                 r = flic_ais_mode_set_all(dev->kvm, attr);
2489                 break;
2490         default:
2491                 r = -EINVAL;
2492         }
2493
2494         return r;
2495 }
2496
2497 static int flic_has_attr(struct kvm_device *dev,
2498                              struct kvm_device_attr *attr)
2499 {
2500         switch (attr->group) {
2501         case KVM_DEV_FLIC_GET_ALL_IRQS:
2502         case KVM_DEV_FLIC_ENQUEUE:
2503         case KVM_DEV_FLIC_CLEAR_IRQS:
2504         case KVM_DEV_FLIC_APF_ENABLE:
2505         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2506         case KVM_DEV_FLIC_ADAPTER_REGISTER:
2507         case KVM_DEV_FLIC_ADAPTER_MODIFY:
2508         case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2509         case KVM_DEV_FLIC_AISM:
2510         case KVM_DEV_FLIC_AIRQ_INJECT:
2511         case KVM_DEV_FLIC_AISM_ALL:
2512                 return 0;
2513         }
2514         return -ENXIO;
2515 }
2516
2517 static int flic_create(struct kvm_device *dev, u32 type)
2518 {
2519         if (!dev)
2520                 return -EINVAL;
2521         if (dev->kvm->arch.flic)
2522                 return -EINVAL;
2523         dev->kvm->arch.flic = dev;
2524         return 0;
2525 }
2526
2527 static void flic_destroy(struct kvm_device *dev)
2528 {
2529         dev->kvm->arch.flic = NULL;
2530         kfree(dev);
2531 }
2532
2533 /* s390 floating irq controller (flic) */
2534 struct kvm_device_ops kvm_flic_ops = {
2535         .name = "kvm-flic",
2536         .get_attr = flic_get_attr,
2537         .set_attr = flic_set_attr,
2538         .has_attr = flic_has_attr,
2539         .create = flic_create,
2540         .destroy = flic_destroy,
2541 };
2542
2543 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2544 {
2545         unsigned long bit;
2546
2547         bit = bit_nr + (addr % PAGE_SIZE) * 8;
2548
2549         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2550 }
2551
2552 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2553                                           u64 addr)
2554 {
2555         struct s390_map_info *map;
2556
2557         if (!adapter)
2558                 return NULL;
2559
2560         list_for_each_entry(map, &adapter->maps, list) {
2561                 if (map->guest_addr == addr)
2562                         return map;
2563         }
2564         return NULL;
2565 }
2566
2567 static int adapter_indicators_set(struct kvm *kvm,
2568                                   struct s390_io_adapter *adapter,
2569                                   struct kvm_s390_adapter_int *adapter_int)
2570 {
2571         unsigned long bit;
2572         int summary_set, idx;
2573         struct s390_map_info *info;
2574         void *map;
2575
2576         info = get_map_info(adapter, adapter_int->ind_addr);
2577         if (!info)
2578                 return -1;
2579         map = page_address(info->page);
2580         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2581         set_bit(bit, map);
2582         idx = srcu_read_lock(&kvm->srcu);
2583         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2584         set_page_dirty_lock(info->page);
2585         info = get_map_info(adapter, adapter_int->summary_addr);
2586         if (!info) {
2587                 srcu_read_unlock(&kvm->srcu, idx);
2588                 return -1;
2589         }
2590         map = page_address(info->page);
2591         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2592                           adapter->swap);
2593         summary_set = test_and_set_bit(bit, map);
2594         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2595         set_page_dirty_lock(info->page);
2596         srcu_read_unlock(&kvm->srcu, idx);
2597         return summary_set ? 0 : 1;
2598 }
2599
2600 /*
2601  * < 0 - not injected due to error
2602  * = 0 - coalesced, summary indicator already active
2603  * > 0 - injected interrupt
2604  */
2605 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2606                            struct kvm *kvm, int irq_source_id, int level,
2607                            bool line_status)
2608 {
2609         int ret;
2610         struct s390_io_adapter *adapter;
2611
2612         /* We're only interested in the 0->1 transition. */
2613         if (!level)
2614                 return 0;
2615         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2616         if (!adapter)
2617                 return -1;
2618         down_read(&adapter->maps_lock);
2619         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2620         up_read(&adapter->maps_lock);
2621         if ((ret > 0) && !adapter->masked) {
2622                 ret = kvm_s390_inject_airq(kvm, adapter);
2623                 if (ret == 0)
2624                         ret = 1;
2625         }
2626         return ret;
2627 }
2628
2629 /*
2630  * Inject the machine check to the guest.
2631  */
2632 void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2633                                      struct mcck_volatile_info *mcck_info)
2634 {
2635         struct kvm_s390_interrupt_info inti;
2636         struct kvm_s390_irq irq;
2637         struct kvm_s390_mchk_info *mchk;
2638         union mci mci;
2639         __u64 cr14 = 0;         /* upper bits are not used */
2640         int rc;
2641
2642         mci.val = mcck_info->mcic;
2643         if (mci.sr)
2644                 cr14 |= CR14_RECOVERY_SUBMASK;
2645         if (mci.dg)
2646                 cr14 |= CR14_DEGRADATION_SUBMASK;
2647         if (mci.w)
2648                 cr14 |= CR14_WARNING_SUBMASK;
2649
2650         mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2651         mchk->cr14 = cr14;
2652         mchk->mcic = mcck_info->mcic;
2653         mchk->ext_damage_code = mcck_info->ext_damage_code;
2654         mchk->failing_storage_address = mcck_info->failing_storage_address;
2655         if (mci.ck) {
2656                 /* Inject the floating machine check */
2657                 inti.type = KVM_S390_MCHK;
2658                 rc = __inject_vm(vcpu->kvm, &inti);
2659         } else {
2660                 /* Inject the machine check to specified vcpu */
2661                 irq.type = KVM_S390_MCHK;
2662                 rc = kvm_s390_inject_vcpu(vcpu, &irq);
2663         }
2664         WARN_ON_ONCE(rc);
2665 }
2666
2667 int kvm_set_routing_entry(struct kvm *kvm,
2668                           struct kvm_kernel_irq_routing_entry *e,
2669                           const struct kvm_irq_routing_entry *ue)
2670 {
2671         int ret;
2672
2673         switch (ue->type) {
2674         case KVM_IRQ_ROUTING_S390_ADAPTER:
2675                 e->set = set_adapter_int;
2676                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2677                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2678                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2679                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2680                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2681                 ret = 0;
2682                 break;
2683         default:
2684                 ret = -EINVAL;
2685         }
2686
2687         return ret;
2688 }
2689
2690 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2691                 int irq_source_id, int level, bool line_status)
2692 {
2693         return -EINVAL;
2694 }
2695
2696 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2697 {
2698         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2699         struct kvm_s390_irq *buf;
2700         int r = 0;
2701         int n;
2702
2703         buf = vmalloc(len);
2704         if (!buf)
2705                 return -ENOMEM;
2706
2707         if (copy_from_user((void *) buf, irqstate, len)) {
2708                 r = -EFAULT;
2709                 goto out_free;
2710         }
2711
2712         /*
2713          * Don't allow setting the interrupt state
2714          * when there are already interrupts pending
2715          */
2716         spin_lock(&li->lock);
2717         if (li->pending_irqs) {
2718                 r = -EBUSY;
2719                 goto out_unlock;
2720         }
2721
2722         for (n = 0; n < len / sizeof(*buf); n++) {
2723                 r = do_inject_vcpu(vcpu, &buf[n]);
2724                 if (r)
2725                         break;
2726         }
2727
2728 out_unlock:
2729         spin_unlock(&li->lock);
2730 out_free:
2731         vfree(buf);
2732
2733         return r;
2734 }
2735
2736 static void store_local_irq(struct kvm_s390_local_interrupt *li,
2737                             struct kvm_s390_irq *irq,
2738                             unsigned long irq_type)
2739 {
2740         switch (irq_type) {
2741         case IRQ_PEND_MCHK_EX:
2742         case IRQ_PEND_MCHK_REP:
2743                 irq->type = KVM_S390_MCHK;
2744                 irq->u.mchk = li->irq.mchk;
2745                 break;
2746         case IRQ_PEND_PROG:
2747                 irq->type = KVM_S390_PROGRAM_INT;
2748                 irq->u.pgm = li->irq.pgm;
2749                 break;
2750         case IRQ_PEND_PFAULT_INIT:
2751                 irq->type = KVM_S390_INT_PFAULT_INIT;
2752                 irq->u.ext = li->irq.ext;
2753                 break;
2754         case IRQ_PEND_EXT_EXTERNAL:
2755                 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2756                 irq->u.extcall = li->irq.extcall;
2757                 break;
2758         case IRQ_PEND_EXT_CLOCK_COMP:
2759                 irq->type = KVM_S390_INT_CLOCK_COMP;
2760                 break;
2761         case IRQ_PEND_EXT_CPU_TIMER:
2762                 irq->type = KVM_S390_INT_CPU_TIMER;
2763                 break;
2764         case IRQ_PEND_SIGP_STOP:
2765                 irq->type = KVM_S390_SIGP_STOP;
2766                 irq->u.stop = li->irq.stop;
2767                 break;
2768         case IRQ_PEND_RESTART:
2769                 irq->type = KVM_S390_RESTART;
2770                 break;
2771         case IRQ_PEND_SET_PREFIX:
2772                 irq->type = KVM_S390_SIGP_SET_PREFIX;
2773                 irq->u.prefix = li->irq.prefix;
2774                 break;
2775         }
2776 }
2777
2778 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2779 {
2780         int scn;
2781         unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2782         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2783         unsigned long pending_irqs;
2784         struct kvm_s390_irq irq;
2785         unsigned long irq_type;
2786         int cpuaddr;
2787         int n = 0;
2788
2789         spin_lock(&li->lock);
2790         pending_irqs = li->pending_irqs;
2791         memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2792                sizeof(sigp_emerg_pending));
2793         spin_unlock(&li->lock);
2794
2795         for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2796                 memset(&irq, 0, sizeof(irq));
2797                 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2798                         continue;
2799                 if (n + sizeof(irq) > len)
2800                         return -ENOBUFS;
2801                 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2802                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2803                         return -EFAULT;
2804                 n += sizeof(irq);
2805         }
2806
2807         if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2808                 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2809                         memset(&irq, 0, sizeof(irq));
2810                         if (n + sizeof(irq) > len)
2811                                 return -ENOBUFS;
2812                         irq.type = KVM_S390_INT_EMERGENCY;
2813                         irq.u.emerg.code = cpuaddr;
2814                         if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2815                                 return -EFAULT;
2816                         n += sizeof(irq);
2817                 }
2818         }
2819
2820         if (sca_ext_call_pending(vcpu, &scn)) {
2821                 if (n + sizeof(irq) > len)
2822                         return -ENOBUFS;
2823                 memset(&irq, 0, sizeof(irq));
2824                 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2825                 irq.u.extcall.code = scn;
2826                 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2827                         return -EFAULT;
2828                 n += sizeof(irq);
2829         }
2830
2831         return n;
2832 }
2833
2834 void kvm_s390_gisa_clear(struct kvm *kvm)
2835 {
2836         if (kvm->arch.gisa) {
2837                 memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
2838                 kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
2839                 VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
2840         }
2841 }
2842
2843 void kvm_s390_gisa_init(struct kvm *kvm)
2844 {
2845         if (css_general_characteristics.aiv) {
2846                 kvm->arch.gisa = &kvm->arch.sie_page2->gisa;
2847                 VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa);
2848                 kvm_s390_gisa_clear(kvm);
2849         }
2850 }
2851
2852 void kvm_s390_gisa_destroy(struct kvm *kvm)
2853 {
2854         if (!kvm->arch.gisa)
2855                 return;
2856         kvm->arch.gisa = NULL;
2857 }