KVM: s390: adapter interrupt sources
[linux-2.6-block.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008,2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/signal.h>
17 #include <linux/slab.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/uaccess.h>
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 #include "trace-s390.h"
23
24 #define IOINT_SCHID_MASK 0x0000ffff
25 #define IOINT_SSID_MASK 0x00030000
26 #define IOINT_CSSID_MASK 0x03fc0000
27 #define IOINT_AI_MASK 0x04000000
28
29 static int is_ioint(u64 type)
30 {
31         return ((type & 0xfffe0000u) != 0xfffe0000u);
32 }
33
34 int psw_extint_disabled(struct kvm_vcpu *vcpu)
35 {
36         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
37 }
38
39 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
40 {
41         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
42 }
43
44 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
45 {
46         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
47 }
48
49 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
50 {
51         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
52             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
53             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
54                 return 0;
55         return 1;
56 }
57
58 static u64 int_word_to_isc_bits(u32 int_word)
59 {
60         u8 isc = (int_word & 0x38000000) >> 27;
61
62         return (0x80 >> isc) << 24;
63 }
64
65 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
66                                       struct kvm_s390_interrupt_info *inti)
67 {
68         switch (inti->type) {
69         case KVM_S390_INT_EXTERNAL_CALL:
70                 if (psw_extint_disabled(vcpu))
71                         return 0;
72                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
73                         return 1;
74         case KVM_S390_INT_EMERGENCY:
75                 if (psw_extint_disabled(vcpu))
76                         return 0;
77                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
78                         return 1;
79                 return 0;
80         case KVM_S390_INT_SERVICE:
81         case KVM_S390_INT_PFAULT_INIT:
82         case KVM_S390_INT_PFAULT_DONE:
83         case KVM_S390_INT_VIRTIO:
84                 if (psw_extint_disabled(vcpu))
85                         return 0;
86                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
87                         return 1;
88                 return 0;
89         case KVM_S390_PROGRAM_INT:
90         case KVM_S390_SIGP_STOP:
91         case KVM_S390_SIGP_SET_PREFIX:
92         case KVM_S390_RESTART:
93                 return 1;
94         case KVM_S390_MCHK:
95                 if (psw_mchk_disabled(vcpu))
96                         return 0;
97                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
98                         return 1;
99                 return 0;
100         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
101                 if (psw_ioint_disabled(vcpu))
102                         return 0;
103                 if (vcpu->arch.sie_block->gcr[6] &
104                     int_word_to_isc_bits(inti->io.io_int_word))
105                         return 1;
106                 return 0;
107         default:
108                 printk(KERN_WARNING "illegal interrupt type %llx\n",
109                        inti->type);
110                 BUG();
111         }
112         return 0;
113 }
114
115 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
116 {
117         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
118         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
119 }
120
121 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
122 {
123         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
124         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
125 }
126
127 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
128 {
129         atomic_clear_mask(CPUSTAT_ECALL_PEND |
130                 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
131                 &vcpu->arch.sie_block->cpuflags);
132         vcpu->arch.sie_block->lctl = 0x0000;
133         vcpu->arch.sie_block->ictl &= ~ICTL_LPSW;
134 }
135
136 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
137 {
138         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
139 }
140
141 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
142                                       struct kvm_s390_interrupt_info *inti)
143 {
144         switch (inti->type) {
145         case KVM_S390_INT_EXTERNAL_CALL:
146         case KVM_S390_INT_EMERGENCY:
147         case KVM_S390_INT_SERVICE:
148         case KVM_S390_INT_PFAULT_INIT:
149         case KVM_S390_INT_PFAULT_DONE:
150         case KVM_S390_INT_VIRTIO:
151                 if (psw_extint_disabled(vcpu))
152                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
153                 else
154                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
155                 break;
156         case KVM_S390_SIGP_STOP:
157                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
158                 break;
159         case KVM_S390_MCHK:
160                 if (psw_mchk_disabled(vcpu))
161                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
162                 else
163                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
164                 break;
165         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
166                 if (psw_ioint_disabled(vcpu))
167                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
168                 else
169                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
170                 break;
171         default:
172                 BUG();
173         }
174 }
175
176 static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
177                                    struct kvm_s390_interrupt_info *inti)
178 {
179         const unsigned short table[] = { 2, 4, 4, 6 };
180         int rc = 0;
181
182         switch (inti->type) {
183         case KVM_S390_INT_EMERGENCY:
184                 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
185                 vcpu->stat.deliver_emergency_signal++;
186                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
187                                                  inti->emerg.code, 0);
188                 rc  = put_guest(vcpu, 0x1201, (u16 __user *)__LC_EXT_INT_CODE);
189                 rc |= put_guest(vcpu, inti->emerg.code,
190                                 (u16 __user *)__LC_EXT_CPU_ADDR);
191                 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
192                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
193                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
194                                       __LC_EXT_NEW_PSW, sizeof(psw_t));
195                 break;
196         case KVM_S390_INT_EXTERNAL_CALL:
197                 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
198                 vcpu->stat.deliver_external_call++;
199                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
200                                                  inti->extcall.code, 0);
201                 rc  = put_guest(vcpu, 0x1202, (u16 __user *)__LC_EXT_INT_CODE);
202                 rc |= put_guest(vcpu, inti->extcall.code,
203                                 (u16 __user *)__LC_EXT_CPU_ADDR);
204                 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
205                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
206                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
207                                       __LC_EXT_NEW_PSW, sizeof(psw_t));
208                 break;
209         case KVM_S390_INT_SERVICE:
210                 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
211                            inti->ext.ext_params);
212                 vcpu->stat.deliver_service_signal++;
213                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
214                                                  inti->ext.ext_params, 0);
215                 rc  = put_guest(vcpu, 0x2401, (u16 __user *)__LC_EXT_INT_CODE);
216                 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
217                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
218                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
219                                       __LC_EXT_NEW_PSW, sizeof(psw_t));
220                 rc |= put_guest(vcpu, inti->ext.ext_params,
221                                 (u32 __user *)__LC_EXT_PARAMS);
222                 break;
223         case KVM_S390_INT_PFAULT_INIT:
224                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
225                                                  inti->ext.ext_params2);
226                 rc  = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
227                 rc |= put_guest(vcpu, 0x0600, (u16 __user *) __LC_EXT_CPU_ADDR);
228                 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
229                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
230                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
231                                       __LC_EXT_NEW_PSW, sizeof(psw_t));
232                 rc |= put_guest(vcpu, inti->ext.ext_params2,
233                                 (u64 __user *) __LC_EXT_PARAMS2);
234                 break;
235         case KVM_S390_INT_PFAULT_DONE:
236                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
237                                                  inti->ext.ext_params2);
238                 rc  = put_guest(vcpu, 0x2603, (u16 __user *) __LC_EXT_INT_CODE);
239                 rc |= put_guest(vcpu, 0x0680, (u16 __user *) __LC_EXT_CPU_ADDR);
240                 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
241                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
242                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
243                                       __LC_EXT_NEW_PSW, sizeof(psw_t));
244                 rc |= put_guest(vcpu, inti->ext.ext_params2,
245                                 (u64 __user *) __LC_EXT_PARAMS2);
246                 break;
247         case KVM_S390_INT_VIRTIO:
248                 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
249                            inti->ext.ext_params, inti->ext.ext_params2);
250                 vcpu->stat.deliver_virtio_interrupt++;
251                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
252                                                  inti->ext.ext_params,
253                                                  inti->ext.ext_params2);
254                 rc  = put_guest(vcpu, 0x2603, (u16 __user *)__LC_EXT_INT_CODE);
255                 rc |= put_guest(vcpu, 0x0d00, (u16 __user *)__LC_EXT_CPU_ADDR);
256                 rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
257                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
258                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
259                                       __LC_EXT_NEW_PSW, sizeof(psw_t));
260                 rc |= put_guest(vcpu, inti->ext.ext_params,
261                                 (u32 __user *)__LC_EXT_PARAMS);
262                 rc |= put_guest(vcpu, inti->ext.ext_params2,
263                                 (u64 __user *)__LC_EXT_PARAMS2);
264                 break;
265         case KVM_S390_SIGP_STOP:
266                 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
267                 vcpu->stat.deliver_stop_signal++;
268                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
269                                                  0, 0);
270                 __set_intercept_indicator(vcpu, inti);
271                 break;
272
273         case KVM_S390_SIGP_SET_PREFIX:
274                 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
275                            inti->prefix.address);
276                 vcpu->stat.deliver_prefix_signal++;
277                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
278                                                  inti->prefix.address, 0);
279                 kvm_s390_set_prefix(vcpu, inti->prefix.address);
280                 break;
281
282         case KVM_S390_RESTART:
283                 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
284                 vcpu->stat.deliver_restart_signal++;
285                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
286                                                  0, 0);
287                 rc  = copy_to_guest(vcpu,
288                                     offsetof(struct _lowcore, restart_old_psw),
289                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
290                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
291                                       offsetof(struct _lowcore, restart_psw),
292                                       sizeof(psw_t));
293                 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
294                 break;
295         case KVM_S390_PROGRAM_INT:
296                 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
297                            inti->pgm.code,
298                            table[vcpu->arch.sie_block->ipa >> 14]);
299                 vcpu->stat.deliver_program_int++;
300                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
301                                                  inti->pgm.code, 0);
302                 rc  = put_guest(vcpu, inti->pgm.code, (u16 __user *)__LC_PGM_INT_CODE);
303                 rc |= put_guest(vcpu, table[vcpu->arch.sie_block->ipa >> 14],
304                                 (u16 __user *)__LC_PGM_ILC);
305                 rc |= copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
306                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
307                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
308                                       __LC_PGM_NEW_PSW, sizeof(psw_t));
309                 break;
310
311         case KVM_S390_MCHK:
312                 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
313                            inti->mchk.mcic);
314                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
315                                                  inti->mchk.cr14,
316                                                  inti->mchk.mcic);
317                 rc  = kvm_s390_vcpu_store_status(vcpu,
318                                                  KVM_S390_STORE_STATUS_PREFIXED);
319                 rc |= put_guest(vcpu, inti->mchk.mcic, (u64 __user *) __LC_MCCK_CODE);
320                 rc |= copy_to_guest(vcpu, __LC_MCK_OLD_PSW,
321                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
322                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
323                                       __LC_MCK_NEW_PSW, sizeof(psw_t));
324                 break;
325
326         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
327         {
328                 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
329                         inti->io.subchannel_nr;
330                 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
331                         inti->io.io_int_word;
332                 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
333                 vcpu->stat.deliver_io_int++;
334                 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
335                                                  param0, param1);
336                 rc  = put_guest(vcpu, inti->io.subchannel_id,
337                                 (u16 __user *) __LC_SUBCHANNEL_ID);
338                 rc |= put_guest(vcpu, inti->io.subchannel_nr,
339                                 (u16 __user *) __LC_SUBCHANNEL_NR);
340                 rc |= put_guest(vcpu, inti->io.io_int_parm,
341                                 (u32 __user *) __LC_IO_INT_PARM);
342                 rc |= put_guest(vcpu, inti->io.io_int_word,
343                                 (u32 __user *) __LC_IO_INT_WORD);
344                 rc |= copy_to_guest(vcpu, __LC_IO_OLD_PSW,
345                                     &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
346                 rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
347                                       __LC_IO_NEW_PSW, sizeof(psw_t));
348                 break;
349         }
350         default:
351                 BUG();
352         }
353         if (rc) {
354                 printk("kvm: The guest lowcore is not mapped during interrupt "
355                        "delivery, killing userspace\n");
356                 do_exit(SIGKILL);
357         }
358 }
359
360 static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
361 {
362         int rc;
363
364         if (psw_extint_disabled(vcpu))
365                 return 0;
366         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
367                 return 0;
368         rc  = put_guest(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
369         rc |= copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
370                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
371         rc |= copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
372                               __LC_EXT_NEW_PSW, sizeof(psw_t));
373         if (rc) {
374                 printk("kvm: The guest lowcore is not mapped during interrupt "
375                         "delivery, killing userspace\n");
376                 do_exit(SIGKILL);
377         }
378         return 1;
379 }
380
381 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
382 {
383         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
384         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
385         struct kvm_s390_interrupt_info  *inti;
386         int rc = 0;
387
388         if (atomic_read(&li->active)) {
389                 spin_lock_bh(&li->lock);
390                 list_for_each_entry(inti, &li->list, list)
391                         if (__interrupt_is_deliverable(vcpu, inti)) {
392                                 rc = 1;
393                                 break;
394                         }
395                 spin_unlock_bh(&li->lock);
396         }
397
398         if ((!rc) && atomic_read(&fi->active)) {
399                 spin_lock(&fi->lock);
400                 list_for_each_entry(inti, &fi->list, list)
401                         if (__interrupt_is_deliverable(vcpu, inti)) {
402                                 rc = 1;
403                                 break;
404                         }
405                 spin_unlock(&fi->lock);
406         }
407
408         if ((!rc) && (vcpu->arch.sie_block->ckc <
409                 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
410                 if ((!psw_extint_disabled(vcpu)) &&
411                         (vcpu->arch.sie_block->gcr[0] & 0x800ul))
412                         rc = 1;
413         }
414
415         return rc;
416 }
417
418 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
419 {
420         return 0;
421 }
422
423 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
424 {
425         u64 now, sltime;
426         DECLARE_WAITQUEUE(wait, current);
427
428         vcpu->stat.exit_wait_state++;
429         if (kvm_cpu_has_interrupt(vcpu))
430                 return 0;
431
432         __set_cpu_idle(vcpu);
433         spin_lock_bh(&vcpu->arch.local_int.lock);
434         vcpu->arch.local_int.timer_due = 0;
435         spin_unlock_bh(&vcpu->arch.local_int.lock);
436
437         if (psw_interrupts_disabled(vcpu)) {
438                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
439                 __unset_cpu_idle(vcpu);
440                 return -EOPNOTSUPP; /* disabled wait */
441         }
442
443         if (psw_extint_disabled(vcpu) ||
444             (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
445                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
446                 goto no_timer;
447         }
448
449         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
450         if (vcpu->arch.sie_block->ckc < now) {
451                 __unset_cpu_idle(vcpu);
452                 return 0;
453         }
454
455         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
456
457         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
458         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
459 no_timer:
460         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
461         spin_lock(&vcpu->arch.local_int.float_int->lock);
462         spin_lock_bh(&vcpu->arch.local_int.lock);
463         add_wait_queue(&vcpu->wq, &wait);
464         while (list_empty(&vcpu->arch.local_int.list) &&
465                 list_empty(&vcpu->arch.local_int.float_int->list) &&
466                 (!vcpu->arch.local_int.timer_due) &&
467                 !signal_pending(current)) {
468                 set_current_state(TASK_INTERRUPTIBLE);
469                 spin_unlock_bh(&vcpu->arch.local_int.lock);
470                 spin_unlock(&vcpu->arch.local_int.float_int->lock);
471                 schedule();
472                 spin_lock(&vcpu->arch.local_int.float_int->lock);
473                 spin_lock_bh(&vcpu->arch.local_int.lock);
474         }
475         __unset_cpu_idle(vcpu);
476         __set_current_state(TASK_RUNNING);
477         remove_wait_queue(&vcpu->wq, &wait);
478         spin_unlock_bh(&vcpu->arch.local_int.lock);
479         spin_unlock(&vcpu->arch.local_int.float_int->lock);
480         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
481
482         hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
483         return 0;
484 }
485
486 void kvm_s390_tasklet(unsigned long parm)
487 {
488         struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
489
490         spin_lock(&vcpu->arch.local_int.lock);
491         vcpu->arch.local_int.timer_due = 1;
492         if (waitqueue_active(&vcpu->wq))
493                 wake_up_interruptible(&vcpu->wq);
494         spin_unlock(&vcpu->arch.local_int.lock);
495 }
496
497 /*
498  * low level hrtimer wake routine. Because this runs in hardirq context
499  * we schedule a tasklet to do the real work.
500  */
501 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
502 {
503         struct kvm_vcpu *vcpu;
504
505         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
506         vcpu->preempted = true;
507         tasklet_schedule(&vcpu->arch.tasklet);
508
509         return HRTIMER_NORESTART;
510 }
511
512 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
513 {
514         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
515         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
516         struct kvm_s390_interrupt_info  *n, *inti = NULL;
517         int deliver;
518
519         __reset_intercept_indicators(vcpu);
520         if (atomic_read(&li->active)) {
521                 do {
522                         deliver = 0;
523                         spin_lock_bh(&li->lock);
524                         list_for_each_entry_safe(inti, n, &li->list, list) {
525                                 if (__interrupt_is_deliverable(vcpu, inti)) {
526                                         list_del(&inti->list);
527                                         deliver = 1;
528                                         break;
529                                 }
530                                 __set_intercept_indicator(vcpu, inti);
531                         }
532                         if (list_empty(&li->list))
533                                 atomic_set(&li->active, 0);
534                         spin_unlock_bh(&li->lock);
535                         if (deliver) {
536                                 __do_deliver_interrupt(vcpu, inti);
537                                 kfree(inti);
538                         }
539                 } while (deliver);
540         }
541
542         if ((vcpu->arch.sie_block->ckc <
543                 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
544                 __try_deliver_ckc_interrupt(vcpu);
545
546         if (atomic_read(&fi->active)) {
547                 do {
548                         deliver = 0;
549                         spin_lock(&fi->lock);
550                         list_for_each_entry_safe(inti, n, &fi->list, list) {
551                                 if (__interrupt_is_deliverable(vcpu, inti)) {
552                                         list_del(&inti->list);
553                                         fi->irq_count--;
554                                         deliver = 1;
555                                         break;
556                                 }
557                                 __set_intercept_indicator(vcpu, inti);
558                         }
559                         if (list_empty(&fi->list))
560                                 atomic_set(&fi->active, 0);
561                         spin_unlock(&fi->lock);
562                         if (deliver) {
563                                 __do_deliver_interrupt(vcpu, inti);
564                                 kfree(inti);
565                         }
566                 } while (deliver);
567         }
568 }
569
570 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
571 {
572         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
573         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
574         struct kvm_s390_interrupt_info  *n, *inti = NULL;
575         int deliver;
576
577         __reset_intercept_indicators(vcpu);
578         if (atomic_read(&li->active)) {
579                 do {
580                         deliver = 0;
581                         spin_lock_bh(&li->lock);
582                         list_for_each_entry_safe(inti, n, &li->list, list) {
583                                 if ((inti->type == KVM_S390_MCHK) &&
584                                     __interrupt_is_deliverable(vcpu, inti)) {
585                                         list_del(&inti->list);
586                                         deliver = 1;
587                                         break;
588                                 }
589                                 __set_intercept_indicator(vcpu, inti);
590                         }
591                         if (list_empty(&li->list))
592                                 atomic_set(&li->active, 0);
593                         spin_unlock_bh(&li->lock);
594                         if (deliver) {
595                                 __do_deliver_interrupt(vcpu, inti);
596                                 kfree(inti);
597                         }
598                 } while (deliver);
599         }
600
601         if (atomic_read(&fi->active)) {
602                 do {
603                         deliver = 0;
604                         spin_lock(&fi->lock);
605                         list_for_each_entry_safe(inti, n, &fi->list, list) {
606                                 if ((inti->type == KVM_S390_MCHK) &&
607                                     __interrupt_is_deliverable(vcpu, inti)) {
608                                         list_del(&inti->list);
609                                         fi->irq_count--;
610                                         deliver = 1;
611                                         break;
612                                 }
613                                 __set_intercept_indicator(vcpu, inti);
614                         }
615                         if (list_empty(&fi->list))
616                                 atomic_set(&fi->active, 0);
617                         spin_unlock(&fi->lock);
618                         if (deliver) {
619                                 __do_deliver_interrupt(vcpu, inti);
620                                 kfree(inti);
621                         }
622                 } while (deliver);
623         }
624 }
625
626 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
627 {
628         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
629         struct kvm_s390_interrupt_info *inti;
630
631         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
632         if (!inti)
633                 return -ENOMEM;
634
635         inti->type = KVM_S390_PROGRAM_INT;
636         inti->pgm.code = code;
637
638         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
639         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
640         spin_lock_bh(&li->lock);
641         list_add(&inti->list, &li->list);
642         atomic_set(&li->active, 1);
643         BUG_ON(waitqueue_active(li->wq));
644         spin_unlock_bh(&li->lock);
645         return 0;
646 }
647
648 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
649                                                     u64 cr6, u64 schid)
650 {
651         struct kvm_s390_float_interrupt *fi;
652         struct kvm_s390_interrupt_info *inti, *iter;
653
654         if ((!schid && !cr6) || (schid && cr6))
655                 return NULL;
656         mutex_lock(&kvm->lock);
657         fi = &kvm->arch.float_int;
658         spin_lock(&fi->lock);
659         inti = NULL;
660         list_for_each_entry(iter, &fi->list, list) {
661                 if (!is_ioint(iter->type))
662                         continue;
663                 if (cr6 &&
664                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
665                         continue;
666                 if (schid) {
667                         if (((schid & 0x00000000ffff0000) >> 16) !=
668                             iter->io.subchannel_id)
669                                 continue;
670                         if ((schid & 0x000000000000ffff) !=
671                             iter->io.subchannel_nr)
672                                 continue;
673                 }
674                 inti = iter;
675                 break;
676         }
677         if (inti) {
678                 list_del_init(&inti->list);
679                 fi->irq_count--;
680         }
681         if (list_empty(&fi->list))
682                 atomic_set(&fi->active, 0);
683         spin_unlock(&fi->lock);
684         mutex_unlock(&kvm->lock);
685         return inti;
686 }
687
688 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
689 {
690         struct kvm_s390_local_interrupt *li;
691         struct kvm_s390_float_interrupt *fi;
692         struct kvm_s390_interrupt_info *iter;
693         struct kvm_vcpu *dst_vcpu = NULL;
694         int sigcpu;
695         int rc = 0;
696
697         mutex_lock(&kvm->lock);
698         fi = &kvm->arch.float_int;
699         spin_lock(&fi->lock);
700         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
701                 rc = -EINVAL;
702                 goto unlock_fi;
703         }
704         fi->irq_count++;
705         if (!is_ioint(inti->type)) {
706                 list_add_tail(&inti->list, &fi->list);
707         } else {
708                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
709
710                 /* Keep I/O interrupts sorted in isc order. */
711                 list_for_each_entry(iter, &fi->list, list) {
712                         if (!is_ioint(iter->type))
713                                 continue;
714                         if (int_word_to_isc_bits(iter->io.io_int_word)
715                             <= isc_bits)
716                                 continue;
717                         break;
718                 }
719                 list_add_tail(&inti->list, &iter->list);
720         }
721         atomic_set(&fi->active, 1);
722         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
723         if (sigcpu == KVM_MAX_VCPUS) {
724                 do {
725                         sigcpu = fi->next_rr_cpu++;
726                         if (sigcpu == KVM_MAX_VCPUS)
727                                 sigcpu = fi->next_rr_cpu = 0;
728                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
729         }
730         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
731         li = &dst_vcpu->arch.local_int;
732         spin_lock_bh(&li->lock);
733         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
734         if (waitqueue_active(li->wq))
735                 wake_up_interruptible(li->wq);
736         kvm_get_vcpu(kvm, sigcpu)->preempted = true;
737         spin_unlock_bh(&li->lock);
738 unlock_fi:
739         spin_unlock(&fi->lock);
740         mutex_unlock(&kvm->lock);
741         return rc;
742 }
743
744 int kvm_s390_inject_vm(struct kvm *kvm,
745                        struct kvm_s390_interrupt *s390int)
746 {
747         struct kvm_s390_interrupt_info *inti;
748
749         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
750         if (!inti)
751                 return -ENOMEM;
752
753         inti->type = s390int->type;
754         switch (inti->type) {
755         case KVM_S390_INT_VIRTIO:
756                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
757                          s390int->parm, s390int->parm64);
758                 inti->ext.ext_params = s390int->parm;
759                 inti->ext.ext_params2 = s390int->parm64;
760                 break;
761         case KVM_S390_INT_SERVICE:
762                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
763                 inti->ext.ext_params = s390int->parm;
764                 break;
765         case KVM_S390_INT_PFAULT_DONE:
766                 inti->type = s390int->type;
767                 inti->ext.ext_params2 = s390int->parm64;
768                 break;
769         case KVM_S390_MCHK:
770                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
771                          s390int->parm64);
772                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
773                 inti->mchk.mcic = s390int->parm64;
774                 break;
775         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
776                 if (inti->type & IOINT_AI_MASK)
777                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
778                 else
779                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
780                                  s390int->type & IOINT_CSSID_MASK,
781                                  s390int->type & IOINT_SSID_MASK,
782                                  s390int->type & IOINT_SCHID_MASK);
783                 inti->io.subchannel_id = s390int->parm >> 16;
784                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
785                 inti->io.io_int_parm = s390int->parm64 >> 32;
786                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
787                 break;
788         default:
789                 kfree(inti);
790                 return -EINVAL;
791         }
792         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
793                                  2);
794
795         return __inject_vm(kvm, inti);
796 }
797
798 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
799                          struct kvm_s390_interrupt *s390int)
800 {
801         struct kvm_s390_local_interrupt *li;
802         struct kvm_s390_interrupt_info *inti;
803
804         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
805         if (!inti)
806                 return -ENOMEM;
807
808         switch (s390int->type) {
809         case KVM_S390_PROGRAM_INT:
810                 if (s390int->parm & 0xffff0000) {
811                         kfree(inti);
812                         return -EINVAL;
813                 }
814                 inti->type = s390int->type;
815                 inti->pgm.code = s390int->parm;
816                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
817                            s390int->parm);
818                 break;
819         case KVM_S390_SIGP_SET_PREFIX:
820                 inti->prefix.address = s390int->parm;
821                 inti->type = s390int->type;
822                 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
823                            s390int->parm);
824                 break;
825         case KVM_S390_SIGP_STOP:
826         case KVM_S390_RESTART:
827                 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
828                 inti->type = s390int->type;
829                 break;
830         case KVM_S390_INT_EXTERNAL_CALL:
831                 if (s390int->parm & 0xffff0000) {
832                         kfree(inti);
833                         return -EINVAL;
834                 }
835                 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
836                            s390int->parm);
837                 inti->type = s390int->type;
838                 inti->extcall.code = s390int->parm;
839                 break;
840         case KVM_S390_INT_EMERGENCY:
841                 if (s390int->parm & 0xffff0000) {
842                         kfree(inti);
843                         return -EINVAL;
844                 }
845                 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
846                 inti->type = s390int->type;
847                 inti->emerg.code = s390int->parm;
848                 break;
849         case KVM_S390_MCHK:
850                 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
851                            s390int->parm64);
852                 inti->type = s390int->type;
853                 inti->mchk.mcic = s390int->parm64;
854                 break;
855         case KVM_S390_INT_PFAULT_INIT:
856                 inti->type = s390int->type;
857                 inti->ext.ext_params2 = s390int->parm64;
858                 break;
859         case KVM_S390_INT_VIRTIO:
860         case KVM_S390_INT_SERVICE:
861         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
862         default:
863                 kfree(inti);
864                 return -EINVAL;
865         }
866         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
867                                    s390int->parm64, 2);
868
869         mutex_lock(&vcpu->kvm->lock);
870         li = &vcpu->arch.local_int;
871         spin_lock_bh(&li->lock);
872         if (inti->type == KVM_S390_PROGRAM_INT)
873                 list_add(&inti->list, &li->list);
874         else
875                 list_add_tail(&inti->list, &li->list);
876         atomic_set(&li->active, 1);
877         if (inti->type == KVM_S390_SIGP_STOP)
878                 li->action_bits |= ACTION_STOP_ON_STOP;
879         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
880         if (waitqueue_active(&vcpu->wq))
881                 wake_up_interruptible(&vcpu->wq);
882         vcpu->preempted = true;
883         spin_unlock_bh(&li->lock);
884         mutex_unlock(&vcpu->kvm->lock);
885         return 0;
886 }
887
888 static void clear_floating_interrupts(struct kvm *kvm)
889 {
890         struct kvm_s390_float_interrupt *fi;
891         struct kvm_s390_interrupt_info  *n, *inti = NULL;
892
893         mutex_lock(&kvm->lock);
894         fi = &kvm->arch.float_int;
895         spin_lock(&fi->lock);
896         list_for_each_entry_safe(inti, n, &fi->list, list) {
897                 list_del(&inti->list);
898                 kfree(inti);
899         }
900         fi->irq_count = 0;
901         atomic_set(&fi->active, 0);
902         spin_unlock(&fi->lock);
903         mutex_unlock(&kvm->lock);
904 }
905
906 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
907                                    u8 *addr)
908 {
909         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
910         struct kvm_s390_irq irq = {0};
911
912         irq.type = inti->type;
913         switch (inti->type) {
914         case KVM_S390_INT_PFAULT_INIT:
915         case KVM_S390_INT_PFAULT_DONE:
916         case KVM_S390_INT_VIRTIO:
917         case KVM_S390_INT_SERVICE:
918                 irq.u.ext = inti->ext;
919                 break;
920         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
921                 irq.u.io = inti->io;
922                 break;
923         case KVM_S390_MCHK:
924                 irq.u.mchk = inti->mchk;
925                 break;
926         default:
927                 return -EINVAL;
928         }
929
930         if (copy_to_user(uptr, &irq, sizeof(irq)))
931                 return -EFAULT;
932
933         return 0;
934 }
935
936 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
937 {
938         struct kvm_s390_interrupt_info *inti;
939         struct kvm_s390_float_interrupt *fi;
940         int ret = 0;
941         int n = 0;
942
943         mutex_lock(&kvm->lock);
944         fi = &kvm->arch.float_int;
945         spin_lock(&fi->lock);
946
947         list_for_each_entry(inti, &fi->list, list) {
948                 if (len < sizeof(struct kvm_s390_irq)) {
949                         /* signal userspace to try again */
950                         ret = -ENOMEM;
951                         break;
952                 }
953                 ret = copy_irq_to_user(inti, buf);
954                 if (ret)
955                         break;
956                 buf += sizeof(struct kvm_s390_irq);
957                 len -= sizeof(struct kvm_s390_irq);
958                 n++;
959         }
960
961         spin_unlock(&fi->lock);
962         mutex_unlock(&kvm->lock);
963
964         return ret < 0 ? ret : n;
965 }
966
967 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
968 {
969         int r;
970
971         switch (attr->group) {
972         case KVM_DEV_FLIC_GET_ALL_IRQS:
973                 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
974                                           attr->attr);
975                 break;
976         default:
977                 r = -EINVAL;
978         }
979
980         return r;
981 }
982
983 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
984                                      u64 addr)
985 {
986         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
987         void *target = NULL;
988         void __user *source;
989         u64 size;
990
991         if (get_user(inti->type, (u64 __user *)addr))
992                 return -EFAULT;
993
994         switch (inti->type) {
995         case KVM_S390_INT_PFAULT_INIT:
996         case KVM_S390_INT_PFAULT_DONE:
997         case KVM_S390_INT_VIRTIO:
998         case KVM_S390_INT_SERVICE:
999                 target = (void *) &inti->ext;
1000                 source = &uptr->u.ext;
1001                 size = sizeof(inti->ext);
1002                 break;
1003         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1004                 target = (void *) &inti->io;
1005                 source = &uptr->u.io;
1006                 size = sizeof(inti->io);
1007                 break;
1008         case KVM_S390_MCHK:
1009                 target = (void *) &inti->mchk;
1010                 source = &uptr->u.mchk;
1011                 size = sizeof(inti->mchk);
1012                 break;
1013         default:
1014                 return -EINVAL;
1015         }
1016
1017         if (copy_from_user(target, source, size))
1018                 return -EFAULT;
1019
1020         return 0;
1021 }
1022
1023 static int enqueue_floating_irq(struct kvm_device *dev,
1024                                 struct kvm_device_attr *attr)
1025 {
1026         struct kvm_s390_interrupt_info *inti = NULL;
1027         int r = 0;
1028         int len = attr->attr;
1029
1030         if (len % sizeof(struct kvm_s390_irq) != 0)
1031                 return -EINVAL;
1032         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1033                 return -EINVAL;
1034
1035         while (len >= sizeof(struct kvm_s390_irq)) {
1036                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1037                 if (!inti)
1038                         return -ENOMEM;
1039
1040                 r = copy_irq_from_user(inti, attr->addr);
1041                 if (r) {
1042                         kfree(inti);
1043                         return r;
1044                 }
1045                 r = __inject_vm(dev->kvm, inti);
1046                 if (r) {
1047                         kfree(inti);
1048                         return r;
1049                 }
1050                 len -= sizeof(struct kvm_s390_irq);
1051                 attr->addr += sizeof(struct kvm_s390_irq);
1052         }
1053
1054         return r;
1055 }
1056
1057 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1058 {
1059         if (id >= MAX_S390_IO_ADAPTERS)
1060                 return NULL;
1061         return kvm->arch.adapters[id];
1062 }
1063
1064 static int register_io_adapter(struct kvm_device *dev,
1065                                struct kvm_device_attr *attr)
1066 {
1067         struct s390_io_adapter *adapter;
1068         struct kvm_s390_io_adapter adapter_info;
1069
1070         if (copy_from_user(&adapter_info,
1071                            (void __user *)attr->addr, sizeof(adapter_info)))
1072                 return -EFAULT;
1073
1074         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1075             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1076                 return -EINVAL;
1077
1078         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1079         if (!adapter)
1080                 return -ENOMEM;
1081
1082         INIT_LIST_HEAD(&adapter->maps);
1083         init_rwsem(&adapter->maps_lock);
1084         atomic_set(&adapter->nr_maps, 0);
1085         adapter->id = adapter_info.id;
1086         adapter->isc = adapter_info.isc;
1087         adapter->maskable = adapter_info.maskable;
1088         adapter->masked = false;
1089         adapter->swap = adapter_info.swap;
1090         dev->kvm->arch.adapters[adapter->id] = adapter;
1091
1092         return 0;
1093 }
1094
1095 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1096 {
1097         int ret;
1098         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1099
1100         if (!adapter || !adapter->maskable)
1101                 return -EINVAL;
1102         ret = adapter->masked;
1103         adapter->masked = masked;
1104         return ret;
1105 }
1106
1107 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1108 {
1109         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1110         struct s390_map_info *map;
1111         int ret;
1112
1113         if (!adapter || !addr)
1114                 return -EINVAL;
1115
1116         map = kzalloc(sizeof(*map), GFP_KERNEL);
1117         if (!map) {
1118                 ret = -ENOMEM;
1119                 goto out;
1120         }
1121         INIT_LIST_HEAD(&map->list);
1122         map->guest_addr = addr;
1123         map->addr = gmap_translate(addr, kvm->arch.gmap);
1124         if (map->addr == -EFAULT) {
1125                 ret = -EFAULT;
1126                 goto out;
1127         }
1128         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1129         if (ret < 0)
1130                 goto out;
1131         BUG_ON(ret != 1);
1132         down_write(&adapter->maps_lock);
1133         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1134                 list_add_tail(&map->list, &adapter->maps);
1135                 ret = 0;
1136         } else {
1137                 put_page(map->page);
1138                 ret = -EINVAL;
1139         }
1140         up_write(&adapter->maps_lock);
1141 out:
1142         if (ret)
1143                 kfree(map);
1144         return ret;
1145 }
1146
1147 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1148 {
1149         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1150         struct s390_map_info *map, *tmp;
1151         int found = 0;
1152
1153         if (!adapter || !addr)
1154                 return -EINVAL;
1155
1156         down_write(&adapter->maps_lock);
1157         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1158                 if (map->guest_addr == addr) {
1159                         found = 1;
1160                         atomic_dec(&adapter->nr_maps);
1161                         list_del(&map->list);
1162                         put_page(map->page);
1163                         kfree(map);
1164                         break;
1165                 }
1166         }
1167         up_write(&adapter->maps_lock);
1168
1169         return found ? 0 : -EINVAL;
1170 }
1171
1172 void kvm_s390_destroy_adapters(struct kvm *kvm)
1173 {
1174         int i;
1175         struct s390_map_info *map, *tmp;
1176
1177         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1178                 if (!kvm->arch.adapters[i])
1179                         continue;
1180                 list_for_each_entry_safe(map, tmp,
1181                                          &kvm->arch.adapters[i]->maps, list) {
1182                         list_del(&map->list);
1183                         put_page(map->page);
1184                         kfree(map);
1185                 }
1186                 kfree(kvm->arch.adapters[i]);
1187         }
1188 }
1189
1190 static int modify_io_adapter(struct kvm_device *dev,
1191                              struct kvm_device_attr *attr)
1192 {
1193         struct kvm_s390_io_adapter_req req;
1194         struct s390_io_adapter *adapter;
1195         int ret;
1196
1197         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1198                 return -EFAULT;
1199
1200         adapter = get_io_adapter(dev->kvm, req.id);
1201         if (!adapter)
1202                 return -EINVAL;
1203         switch (req.type) {
1204         case KVM_S390_IO_ADAPTER_MASK:
1205                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1206                 if (ret > 0)
1207                         ret = 0;
1208                 break;
1209         case KVM_S390_IO_ADAPTER_MAP:
1210                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1211                 break;
1212         case KVM_S390_IO_ADAPTER_UNMAP:
1213                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1214                 break;
1215         default:
1216                 ret = -EINVAL;
1217         }
1218
1219         return ret;
1220 }
1221
1222 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1223 {
1224         int r = 0;
1225         unsigned int i;
1226         struct kvm_vcpu *vcpu;
1227
1228         switch (attr->group) {
1229         case KVM_DEV_FLIC_ENQUEUE:
1230                 r = enqueue_floating_irq(dev, attr);
1231                 break;
1232         case KVM_DEV_FLIC_CLEAR_IRQS:
1233                 r = 0;
1234                 clear_floating_interrupts(dev->kvm);
1235                 break;
1236         case KVM_DEV_FLIC_APF_ENABLE:
1237                 dev->kvm->arch.gmap->pfault_enabled = 1;
1238                 break;
1239         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1240                 dev->kvm->arch.gmap->pfault_enabled = 0;
1241                 /*
1242                  * Make sure no async faults are in transition when
1243                  * clearing the queues. So we don't need to worry
1244                  * about late coming workers.
1245                  */
1246                 synchronize_srcu(&dev->kvm->srcu);
1247                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1248                         kvm_clear_async_pf_completion_queue(vcpu);
1249                 break;
1250         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1251                 r = register_io_adapter(dev, attr);
1252                 break;
1253         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1254                 r = modify_io_adapter(dev, attr);
1255                 break;
1256         default:
1257                 r = -EINVAL;
1258         }
1259
1260         return r;
1261 }
1262
1263 static int flic_create(struct kvm_device *dev, u32 type)
1264 {
1265         if (!dev)
1266                 return -EINVAL;
1267         if (dev->kvm->arch.flic)
1268                 return -EINVAL;
1269         dev->kvm->arch.flic = dev;
1270         return 0;
1271 }
1272
1273 static void flic_destroy(struct kvm_device *dev)
1274 {
1275         dev->kvm->arch.flic = NULL;
1276         kfree(dev);
1277 }
1278
1279 /* s390 floating irq controller (flic) */
1280 struct kvm_device_ops kvm_flic_ops = {
1281         .name = "kvm-flic",
1282         .get_attr = flic_get_attr,
1283         .set_attr = flic_set_attr,
1284         .create = flic_create,
1285         .destroy = flic_destroy,
1286 };