Merge tag 'kvm-4.16-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_xive.c
1 /*
2  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation.
7  */
8
9 #define pr_fmt(fmt) "xive-kvm: " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/err.h>
14 #include <linux/gfp.h>
15 #include <linux/spinlock.h>
16 #include <linux/delay.h>
17 #include <linux/percpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/uaccess.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/hvcall.h>
23 #include <asm/xics.h>
24 #include <asm/xive.h>
25 #include <asm/xive-regs.h>
26 #include <asm/debug.h>
27 #include <asm/debugfs.h>
28 #include <asm/time.h>
29 #include <asm/opal.h>
30
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
33
34 #include "book3s_xive.h"
35
36
37 /*
38  * Virtual mode variants of the hcalls for use on radix/radix
39  * with AIL. They require the VCPU's VP to be "pushed"
40  *
41  * We still instanciate them here because we use some of the
42  * generated utility functions as well in this file.
43  */
44 #define XIVE_RUNTIME_CHECKS
45 #define X_PFX xive_vm_
46 #define X_STATIC static
47 #define X_STAT_PFX stat_vm_
48 #define __x_tima                xive_tima
49 #define __x_eoi_page(xd)        ((void __iomem *)((xd)->eoi_mmio))
50 #define __x_trig_page(xd)       ((void __iomem *)((xd)->trig_mmio))
51 #define __x_writeb      __raw_writeb
52 #define __x_readw       __raw_readw
53 #define __x_readq       __raw_readq
54 #define __x_writeq      __raw_writeq
55
56 #include "book3s_xive_template.c"
57
58 /*
59  * We leave a gap of a couple of interrupts in the queue to
60  * account for the IPI and additional safety guard.
61  */
62 #define XIVE_Q_GAP      2
63
64 /*
65  * This is a simple trigger for a generic XIVE IRQ. This must
66  * only be called for interrupts that support a trigger page
67  */
68 static bool xive_irq_trigger(struct xive_irq_data *xd)
69 {
70         /* This should be only for MSIs */
71         if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
72                 return false;
73
74         /* Those interrupts should always have a trigger page */
75         if (WARN_ON(!xd->trig_mmio))
76                 return false;
77
78         out_be64(xd->trig_mmio, 0);
79
80         return true;
81 }
82
83 static irqreturn_t xive_esc_irq(int irq, void *data)
84 {
85         struct kvm_vcpu *vcpu = data;
86
87         vcpu->arch.irq_pending = 1;
88         smp_mb();
89         if (vcpu->arch.ceded)
90                 kvmppc_fast_vcpu_kick(vcpu);
91
92         /* Since we have the no-EOI flag, the interrupt is effectively
93          * disabled now. Clearing xive_esc_on means we won't bother
94          * doing so on the next entry.
95          *
96          * This also allows the entry code to know that if a PQ combination
97          * of 10 is observed while xive_esc_on is true, it means the queue
98          * contains an unprocessed escalation interrupt. We don't make use of
99          * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
100          */
101         vcpu->arch.xive_esc_on = false;
102
103         return IRQ_HANDLED;
104 }
105
106 static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
107 {
108         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
109         struct xive_q *q = &xc->queues[prio];
110         char *name = NULL;
111         int rc;
112
113         /* Already there ? */
114         if (xc->esc_virq[prio])
115                 return 0;
116
117         /* Hook up the escalation interrupt */
118         xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
119         if (!xc->esc_virq[prio]) {
120                 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
121                        prio, xc->server_num);
122                 return -EIO;
123         }
124
125         if (xc->xive->single_escalation)
126                 name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
127                                  vcpu->kvm->arch.lpid, xc->server_num);
128         else
129                 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
130                                  vcpu->kvm->arch.lpid, xc->server_num, prio);
131         if (!name) {
132                 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
133                        prio, xc->server_num);
134                 rc = -ENOMEM;
135                 goto error;
136         }
137
138         pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
139
140         rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
141                          IRQF_NO_THREAD, name, vcpu);
142         if (rc) {
143                 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
144                        prio, xc->server_num);
145                 goto error;
146         }
147         xc->esc_virq_names[prio] = name;
148
149         /* In single escalation mode, we grab the ESB MMIO of the
150          * interrupt and mask it. Also populate the VCPU v/raddr
151          * of the ESB page for use by asm entry/exit code. Finally
152          * set the XIVE_IRQ_NO_EOI flag which will prevent the
153          * core code from performing an EOI on the escalation
154          * interrupt, thus leaving it effectively masked after
155          * it fires once.
156          */
157         if (xc->xive->single_escalation) {
158                 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
159                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
160
161                 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
162                 vcpu->arch.xive_esc_raddr = xd->eoi_page;
163                 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
164                 xd->flags |= XIVE_IRQ_NO_EOI;
165         }
166
167         return 0;
168 error:
169         irq_dispose_mapping(xc->esc_virq[prio]);
170         xc->esc_virq[prio] = 0;
171         kfree(name);
172         return rc;
173 }
174
175 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
176 {
177         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
178         struct kvmppc_xive *xive = xc->xive;
179         struct xive_q *q =  &xc->queues[prio];
180         void *qpage;
181         int rc;
182
183         if (WARN_ON(q->qpage))
184                 return 0;
185
186         /* Allocate the queue and retrieve infos on current node for now */
187         qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
188         if (!qpage) {
189                 pr_err("Failed to allocate queue %d for VCPU %d\n",
190                        prio, xc->server_num);
191                 return -ENOMEM;;
192         }
193         memset(qpage, 0, 1 << xive->q_order);
194
195         /*
196          * Reconfigure the queue. This will set q->qpage only once the
197          * queue is fully configured. This is a requirement for prio 0
198          * as we will stop doing EOIs for every IPI as soon as we observe
199          * qpage being non-NULL, and instead will only EOI when we receive
200          * corresponding queue 0 entries
201          */
202         rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
203                                          xive->q_order, true);
204         if (rc)
205                 pr_err("Failed to configure queue %d for VCPU %d\n",
206                        prio, xc->server_num);
207         return rc;
208 }
209
210 /* Called with kvm_lock held */
211 static int xive_check_provisioning(struct kvm *kvm, u8 prio)
212 {
213         struct kvmppc_xive *xive = kvm->arch.xive;
214         struct kvm_vcpu *vcpu;
215         int i, rc;
216
217         lockdep_assert_held(&kvm->lock);
218
219         /* Already provisioned ? */
220         if (xive->qmap & (1 << prio))
221                 return 0;
222
223         pr_devel("Provisioning prio... %d\n", prio);
224
225         /* Provision each VCPU and enable escalations if needed */
226         kvm_for_each_vcpu(i, vcpu, kvm) {
227                 if (!vcpu->arch.xive_vcpu)
228                         continue;
229                 rc = xive_provision_queue(vcpu, prio);
230                 if (rc == 0 && !xive->single_escalation)
231                         xive_attach_escalation(vcpu, prio);
232                 if (rc)
233                         return rc;
234         }
235
236         /* Order previous stores and mark it as provisioned */
237         mb();
238         xive->qmap |= (1 << prio);
239         return 0;
240 }
241
242 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
243 {
244         struct kvm_vcpu *vcpu;
245         struct kvmppc_xive_vcpu *xc;
246         struct xive_q *q;
247
248         /* Locate target server */
249         vcpu = kvmppc_xive_find_server(kvm, server);
250         if (!vcpu) {
251                 pr_warn("%s: Can't find server %d\n", __func__, server);
252                 return;
253         }
254         xc = vcpu->arch.xive_vcpu;
255         if (WARN_ON(!xc))
256                 return;
257
258         q = &xc->queues[prio];
259         atomic_inc(&q->pending_count);
260 }
261
262 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
263 {
264         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
265         struct xive_q *q;
266         u32 max;
267
268         if (WARN_ON(!xc))
269                 return -ENXIO;
270         if (!xc->valid)
271                 return -ENXIO;
272
273         q = &xc->queues[prio];
274         if (WARN_ON(!q->qpage))
275                 return -ENXIO;
276
277         /* Calculate max number of interrupts in that queue. */
278         max = (q->msk + 1) - XIVE_Q_GAP;
279         return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
280 }
281
282 static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
283 {
284         struct kvm_vcpu *vcpu;
285         int i, rc;
286
287         /* Locate target server */
288         vcpu = kvmppc_xive_find_server(kvm, *server);
289         if (!vcpu) {
290                 pr_devel("Can't find server %d\n", *server);
291                 return -EINVAL;
292         }
293
294         pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
295
296         /* Try pick it */
297         rc = xive_try_pick_queue(vcpu, prio);
298         if (rc == 0)
299                 return rc;
300
301         pr_devel(" .. failed, looking up candidate...\n");
302
303         /* Failed, pick another VCPU */
304         kvm_for_each_vcpu(i, vcpu, kvm) {
305                 if (!vcpu->arch.xive_vcpu)
306                         continue;
307                 rc = xive_try_pick_queue(vcpu, prio);
308                 if (rc == 0) {
309                         *server = vcpu->arch.xive_vcpu->server_num;
310                         pr_devel("  found on 0x%x/%d\n", *server, prio);
311                         return rc;
312                 }
313         }
314         pr_devel("  no available target !\n");
315
316         /* No available target ! */
317         return -EBUSY;
318 }
319
320 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
321                              struct kvmppc_xive_src_block *sb,
322                              struct kvmppc_xive_irq_state *state)
323 {
324         struct xive_irq_data *xd;
325         u32 hw_num;
326         u8 old_prio;
327         u64 val;
328
329         /*
330          * Take the lock, set masked, try again if racing
331          * with H_EOI
332          */
333         for (;;) {
334                 arch_spin_lock(&sb->lock);
335                 old_prio = state->guest_priority;
336                 state->guest_priority = MASKED;
337                 mb();
338                 if (!state->in_eoi)
339                         break;
340                 state->guest_priority = old_prio;
341                 arch_spin_unlock(&sb->lock);
342         }
343
344         /* No change ? Bail */
345         if (old_prio == MASKED)
346                 return old_prio;
347
348         /* Get the right irq */
349         kvmppc_xive_select_irq(state, &hw_num, &xd);
350
351         /*
352          * If the interrupt is marked as needing masking via
353          * firmware, we do it here. Firmware masking however
354          * is "lossy", it won't return the old p and q bits
355          * and won't set the interrupt to a state where it will
356          * record queued ones. If this is an issue we should do
357          * lazy masking instead.
358          *
359          * For now, we work around this in unmask by forcing
360          * an interrupt whenever we unmask a non-LSI via FW
361          * (if ever).
362          */
363         if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
364                 xive_native_configure_irq(hw_num,
365                                           xive->vp_base + state->act_server,
366                                           MASKED, state->number);
367                 /* set old_p so we can track if an H_EOI was done */
368                 state->old_p = true;
369                 state->old_q = false;
370         } else {
371                 /* Set PQ to 10, return old P and old Q and remember them */
372                 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
373                 state->old_p = !!(val & 2);
374                 state->old_q = !!(val & 1);
375
376                 /*
377                  * Synchronize hardware to sensure the queues are updated
378                  * when masking
379                  */
380                 xive_native_sync_source(hw_num);
381         }
382
383         return old_prio;
384 }
385
386 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
387                                  struct kvmppc_xive_irq_state *state)
388 {
389         /*
390          * Take the lock try again if racing with H_EOI
391          */
392         for (;;) {
393                 arch_spin_lock(&sb->lock);
394                 if (!state->in_eoi)
395                         break;
396                 arch_spin_unlock(&sb->lock);
397         }
398 }
399
400 static void xive_finish_unmask(struct kvmppc_xive *xive,
401                                struct kvmppc_xive_src_block *sb,
402                                struct kvmppc_xive_irq_state *state,
403                                u8 prio)
404 {
405         struct xive_irq_data *xd;
406         u32 hw_num;
407
408         /* If we aren't changing a thing, move on */
409         if (state->guest_priority != MASKED)
410                 goto bail;
411
412         /* Get the right irq */
413         kvmppc_xive_select_irq(state, &hw_num, &xd);
414
415         /*
416          * See command in xive_lock_and_mask() concerning masking
417          * via firmware.
418          */
419         if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
420                 xive_native_configure_irq(hw_num,
421                                           xive->vp_base + state->act_server,
422                                           state->act_priority, state->number);
423                 /* If an EOI is needed, do it here */
424                 if (!state->old_p)
425                         xive_vm_source_eoi(hw_num, xd);
426                 /* If this is not an LSI, force a trigger */
427                 if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
428                         xive_irq_trigger(xd);
429                 goto bail;
430         }
431
432         /* Old Q set, set PQ to 11 */
433         if (state->old_q)
434                 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
435
436         /*
437          * If not old P, then perform an "effective" EOI,
438          * on the source. This will handle the cases where
439          * FW EOI is needed.
440          */
441         if (!state->old_p)
442                 xive_vm_source_eoi(hw_num, xd);
443
444         /* Synchronize ordering and mark unmasked */
445         mb();
446 bail:
447         state->guest_priority = prio;
448 }
449
450 /*
451  * Target an interrupt to a given server/prio, this will fallback
452  * to another server if necessary and perform the HW targetting
453  * updates as needed
454  *
455  * NOTE: Must be called with the state lock held
456  */
457 static int xive_target_interrupt(struct kvm *kvm,
458                                  struct kvmppc_xive_irq_state *state,
459                                  u32 server, u8 prio)
460 {
461         struct kvmppc_xive *xive = kvm->arch.xive;
462         u32 hw_num;
463         int rc;
464
465         /*
466          * This will return a tentative server and actual
467          * priority. The count for that new target will have
468          * already been incremented.
469          */
470         rc = xive_select_target(kvm, &server, prio);
471
472         /*
473          * We failed to find a target ? Not much we can do
474          * at least until we support the GIQ.
475          */
476         if (rc)
477                 return rc;
478
479         /*
480          * Increment the old queue pending count if there
481          * was one so that the old queue count gets adjusted later
482          * when observed to be empty.
483          */
484         if (state->act_priority != MASKED)
485                 xive_inc_q_pending(kvm,
486                                    state->act_server,
487                                    state->act_priority);
488         /*
489          * Update state and HW
490          */
491         state->act_priority = prio;
492         state->act_server = server;
493
494         /* Get the right irq */
495         kvmppc_xive_select_irq(state, &hw_num, NULL);
496
497         return xive_native_configure_irq(hw_num,
498                                          xive->vp_base + server,
499                                          prio, state->number);
500 }
501
502 /*
503  * Targetting rules: In order to avoid losing track of
504  * pending interrupts accross mask and unmask, which would
505  * allow queue overflows, we implement the following rules:
506  *
507  *  - Unless it was never enabled (or we run out of capacity)
508  *    an interrupt is always targetted at a valid server/queue
509  *    pair even when "masked" by the guest. This pair tends to
510  *    be the last one used but it can be changed under some
511  *    circumstances. That allows us to separate targetting
512  *    from masking, we only handle accounting during (re)targetting,
513  *    this also allows us to let an interrupt drain into its target
514  *    queue after masking, avoiding complex schemes to remove
515  *    interrupts out of remote processor queues.
516  *
517  *  - When masking, we set PQ to 10 and save the previous value
518  *    of P and Q.
519  *
520  *  - When unmasking, if saved Q was set, we set PQ to 11
521  *    otherwise we leave PQ to the HW state which will be either
522  *    10 if nothing happened or 11 if the interrupt fired while
523  *    masked. Effectively we are OR'ing the previous Q into the
524  *    HW Q.
525  *
526  *    Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
527  *    which will unmask the interrupt and shoot a new one if Q was
528  *    set.
529  *
530  *    Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
531  *    effectively meaning an H_EOI from the guest is still expected
532  *    for that interrupt).
533  *
534  *  - If H_EOI occurs while masked, we clear the saved P.
535  *
536  *  - When changing target, we account on the new target and
537  *    increment a separate "pending" counter on the old one.
538  *    This pending counter will be used to decrement the old
539  *    target's count when its queue has been observed empty.
540  */
541
542 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
543                          u32 priority)
544 {
545         struct kvmppc_xive *xive = kvm->arch.xive;
546         struct kvmppc_xive_src_block *sb;
547         struct kvmppc_xive_irq_state *state;
548         u8 new_act_prio;
549         int rc = 0;
550         u16 idx;
551
552         if (!xive)
553                 return -ENODEV;
554
555         pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
556                  irq, server, priority);
557
558         /* First, check provisioning of queues */
559         if (priority != MASKED)
560                 rc = xive_check_provisioning(xive->kvm,
561                               xive_prio_from_guest(priority));
562         if (rc) {
563                 pr_devel("  provisioning failure %d !\n", rc);
564                 return rc;
565         }
566
567         sb = kvmppc_xive_find_source(xive, irq, &idx);
568         if (!sb)
569                 return -EINVAL;
570         state = &sb->irq_state[idx];
571
572         /*
573          * We first handle masking/unmasking since the locking
574          * might need to be retried due to EOIs, we'll handle
575          * targetting changes later. These functions will return
576          * with the SB lock held.
577          *
578          * xive_lock_and_mask() will also set state->guest_priority
579          * but won't otherwise change other fields of the state.
580          *
581          * xive_lock_for_unmask will not actually unmask, this will
582          * be done later by xive_finish_unmask() once the targetting
583          * has been done, so we don't try to unmask an interrupt
584          * that hasn't yet been targetted.
585          */
586         if (priority == MASKED)
587                 xive_lock_and_mask(xive, sb, state);
588         else
589                 xive_lock_for_unmask(sb, state);
590
591
592         /*
593          * Then we handle targetting.
594          *
595          * First calculate a new "actual priority"
596          */
597         new_act_prio = state->act_priority;
598         if (priority != MASKED)
599                 new_act_prio = xive_prio_from_guest(priority);
600
601         pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
602                  new_act_prio, state->act_server, state->act_priority);
603
604         /*
605          * Then check if we actually need to change anything,
606          *
607          * The condition for re-targetting the interrupt is that
608          * we have a valid new priority (new_act_prio is not 0xff)
609          * and either the server or the priority changed.
610          *
611          * Note: If act_priority was ff and the new priority is
612          *       also ff, we don't do anything and leave the interrupt
613          *       untargetted. An attempt of doing an int_on on an
614          *       untargetted interrupt will fail. If that is a problem
615          *       we could initialize interrupts with valid default
616          */
617
618         if (new_act_prio != MASKED &&
619             (state->act_server != server ||
620              state->act_priority != new_act_prio))
621                 rc = xive_target_interrupt(kvm, state, server, new_act_prio);
622
623         /*
624          * Perform the final unmasking of the interrupt source
625          * if necessary
626          */
627         if (priority != MASKED)
628                 xive_finish_unmask(xive, sb, state, priority);
629
630         /*
631          * Finally Update saved_priority to match. Only int_on/off
632          * set this field to a different value.
633          */
634         state->saved_priority = priority;
635
636         arch_spin_unlock(&sb->lock);
637         return rc;
638 }
639
640 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
641                          u32 *priority)
642 {
643         struct kvmppc_xive *xive = kvm->arch.xive;
644         struct kvmppc_xive_src_block *sb;
645         struct kvmppc_xive_irq_state *state;
646         u16 idx;
647
648         if (!xive)
649                 return -ENODEV;
650
651         sb = kvmppc_xive_find_source(xive, irq, &idx);
652         if (!sb)
653                 return -EINVAL;
654         state = &sb->irq_state[idx];
655         arch_spin_lock(&sb->lock);
656         *server = state->act_server;
657         *priority = state->guest_priority;
658         arch_spin_unlock(&sb->lock);
659
660         return 0;
661 }
662
663 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
664 {
665         struct kvmppc_xive *xive = kvm->arch.xive;
666         struct kvmppc_xive_src_block *sb;
667         struct kvmppc_xive_irq_state *state;
668         u16 idx;
669
670         if (!xive)
671                 return -ENODEV;
672
673         sb = kvmppc_xive_find_source(xive, irq, &idx);
674         if (!sb)
675                 return -EINVAL;
676         state = &sb->irq_state[idx];
677
678         pr_devel("int_on(irq=0x%x)\n", irq);
679
680         /*
681          * Check if interrupt was not targetted
682          */
683         if (state->act_priority == MASKED) {
684                 pr_devel("int_on on untargetted interrupt\n");
685                 return -EINVAL;
686         }
687
688         /* If saved_priority is 0xff, do nothing */
689         if (state->saved_priority == MASKED)
690                 return 0;
691
692         /*
693          * Lock and unmask it.
694          */
695         xive_lock_for_unmask(sb, state);
696         xive_finish_unmask(xive, sb, state, state->saved_priority);
697         arch_spin_unlock(&sb->lock);
698
699         return 0;
700 }
701
702 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
703 {
704         struct kvmppc_xive *xive = kvm->arch.xive;
705         struct kvmppc_xive_src_block *sb;
706         struct kvmppc_xive_irq_state *state;
707         u16 idx;
708
709         if (!xive)
710                 return -ENODEV;
711
712         sb = kvmppc_xive_find_source(xive, irq, &idx);
713         if (!sb)
714                 return -EINVAL;
715         state = &sb->irq_state[idx];
716
717         pr_devel("int_off(irq=0x%x)\n", irq);
718
719         /*
720          * Lock and mask
721          */
722         state->saved_priority = xive_lock_and_mask(xive, sb, state);
723         arch_spin_unlock(&sb->lock);
724
725         return 0;
726 }
727
728 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
729 {
730         struct kvmppc_xive_src_block *sb;
731         struct kvmppc_xive_irq_state *state;
732         u16 idx;
733
734         sb = kvmppc_xive_find_source(xive, irq, &idx);
735         if (!sb)
736                 return false;
737         state = &sb->irq_state[idx];
738         if (!state->valid)
739                 return false;
740
741         /*
742          * Trigger the IPI. This assumes we never restore a pass-through
743          * interrupt which should be safe enough
744          */
745         xive_irq_trigger(&state->ipi_data);
746
747         return true;
748 }
749
750 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
751 {
752         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
753
754         if (!xc)
755                 return 0;
756
757         /* Return the per-cpu state for state saving/migration */
758         return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
759                (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
760                (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
761 }
762
763 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
764 {
765         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
766         struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
767         u8 cppr, mfrr;
768         u32 xisr;
769
770         if (!xc || !xive)
771                 return -ENOENT;
772
773         /* Grab individual state fields. We don't use pending_pri */
774         cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
775         xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
776                 KVM_REG_PPC_ICP_XISR_MASK;
777         mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
778
779         pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
780                  xc->server_num, cppr, mfrr, xisr);
781
782         /*
783          * We can't update the state of a "pushed" VCPU, but that
784          * shouldn't happen.
785          */
786         if (WARN_ON(vcpu->arch.xive_pushed))
787                 return -EIO;
788
789         /* Update VCPU HW saved state */
790         vcpu->arch.xive_saved_state.cppr = cppr;
791         xc->hw_cppr = xc->cppr = cppr;
792
793         /*
794          * Update MFRR state. If it's not 0xff, we mark the VCPU as
795          * having a pending MFRR change, which will re-evaluate the
796          * target. The VCPU will thus potentially get a spurious
797          * interrupt but that's not a big deal.
798          */
799         xc->mfrr = mfrr;
800         if (mfrr < cppr)
801                 xive_irq_trigger(&xc->vp_ipi_data);
802
803         /*
804          * Now saved XIRR is "interesting". It means there's something in
805          * the legacy "1 element" queue... for an IPI we simply ignore it,
806          * as the MFRR restore will handle that. For anything else we need
807          * to force a resend of the source.
808          * However the source may not have been setup yet. If that's the
809          * case, we keep that info and increment a counter in the xive to
810          * tell subsequent xive_set_source() to go look.
811          */
812         if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
813                 xc->delayed_irq = xisr;
814                 xive->delayed_irqs++;
815                 pr_devel("  xisr restore delayed\n");
816         }
817
818         return 0;
819 }
820
821 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
822                            struct irq_desc *host_desc)
823 {
824         struct kvmppc_xive *xive = kvm->arch.xive;
825         struct kvmppc_xive_src_block *sb;
826         struct kvmppc_xive_irq_state *state;
827         struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
828         unsigned int host_irq = irq_desc_get_irq(host_desc);
829         unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
830         u16 idx;
831         u8 prio;
832         int rc;
833
834         if (!xive)
835                 return -ENODEV;
836
837         pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
838
839         sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
840         if (!sb)
841                 return -EINVAL;
842         state = &sb->irq_state[idx];
843
844         /*
845          * Mark the passed-through interrupt as going to a VCPU,
846          * this will prevent further EOIs and similar operations
847          * from the XIVE code. It will also mask the interrupt
848          * to either PQ=10 or 11 state, the latter if the interrupt
849          * is pending. This will allow us to unmask or retrigger it
850          * after routing it to the guest with a simple EOI.
851          *
852          * The "state" argument is a "token", all it needs is to be
853          * non-NULL to switch to passed-through or NULL for the
854          * other way around. We may not yet have an actual VCPU
855          * target here and we don't really care.
856          */
857         rc = irq_set_vcpu_affinity(host_irq, state);
858         if (rc) {
859                 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
860                 return rc;
861         }
862
863         /*
864          * Mask and read state of IPI. We need to know if its P bit
865          * is set as that means it's potentially already using a
866          * queue entry in the target
867          */
868         prio = xive_lock_and_mask(xive, sb, state);
869         pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
870                  state->old_p, state->old_q);
871
872         /* Turn the IPI hard off */
873         xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
874
875         /* Grab info about irq */
876         state->pt_number = hw_irq;
877         state->pt_data = irq_data_get_irq_handler_data(host_data);
878
879         /*
880          * Configure the IRQ to match the existing configuration of
881          * the IPI if it was already targetted. Otherwise this will
882          * mask the interrupt in a lossy way (act_priority is 0xff)
883          * which is fine for a never started interrupt.
884          */
885         xive_native_configure_irq(hw_irq,
886                                   xive->vp_base + state->act_server,
887                                   state->act_priority, state->number);
888
889         /*
890          * We do an EOI to enable the interrupt (and retrigger if needed)
891          * if the guest has the interrupt unmasked and the P bit was *not*
892          * set in the IPI. If it was set, we know a slot may still be in
893          * use in the target queue thus we have to wait for a guest
894          * originated EOI
895          */
896         if (prio != MASKED && !state->old_p)
897                 xive_vm_source_eoi(hw_irq, state->pt_data);
898
899         /* Clear old_p/old_q as they are no longer relevant */
900         state->old_p = state->old_q = false;
901
902         /* Restore guest prio (unlocks EOI) */
903         mb();
904         state->guest_priority = prio;
905         arch_spin_unlock(&sb->lock);
906
907         return 0;
908 }
909 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
910
911 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
912                            struct irq_desc *host_desc)
913 {
914         struct kvmppc_xive *xive = kvm->arch.xive;
915         struct kvmppc_xive_src_block *sb;
916         struct kvmppc_xive_irq_state *state;
917         unsigned int host_irq = irq_desc_get_irq(host_desc);
918         u16 idx;
919         u8 prio;
920         int rc;
921
922         if (!xive)
923                 return -ENODEV;
924
925         pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
926
927         sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
928         if (!sb)
929                 return -EINVAL;
930         state = &sb->irq_state[idx];
931
932         /*
933          * Mask and read state of IRQ. We need to know if its P bit
934          * is set as that means it's potentially already using a
935          * queue entry in the target
936          */
937         prio = xive_lock_and_mask(xive, sb, state);
938         pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
939                  state->old_p, state->old_q);
940
941         /*
942          * If old_p is set, the interrupt is pending, we switch it to
943          * PQ=11. This will force a resend in the host so the interrupt
944          * isn't lost to whatver host driver may pick it up
945          */
946         if (state->old_p)
947                 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
948
949         /* Release the passed-through interrupt to the host */
950         rc = irq_set_vcpu_affinity(host_irq, NULL);
951         if (rc) {
952                 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
953                 return rc;
954         }
955
956         /* Forget about the IRQ */
957         state->pt_number = 0;
958         state->pt_data = NULL;
959
960         /* Reconfigure the IPI */
961         xive_native_configure_irq(state->ipi_number,
962                                   xive->vp_base + state->act_server,
963                                   state->act_priority, state->number);
964
965         /*
966          * If old_p is set (we have a queue entry potentially
967          * occupied) or the interrupt is masked, we set the IPI
968          * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
969          */
970         if (prio == MASKED || state->old_p)
971                 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
972         else
973                 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
974
975         /* Restore guest prio (unlocks EOI) */
976         mb();
977         state->guest_priority = prio;
978         arch_spin_unlock(&sb->lock);
979
980         return 0;
981 }
982 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
983
984 static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
985 {
986         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
987         struct kvm *kvm = vcpu->kvm;
988         struct kvmppc_xive *xive = kvm->arch.xive;
989         int i, j;
990
991         for (i = 0; i <= xive->max_sbid; i++) {
992                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
993
994                 if (!sb)
995                         continue;
996                 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
997                         struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
998
999                         if (!state->valid)
1000                                 continue;
1001                         if (state->act_priority == MASKED)
1002                                 continue;
1003                         if (state->act_server != xc->server_num)
1004                                 continue;
1005
1006                         /* Clean it up */
1007                         arch_spin_lock(&sb->lock);
1008                         state->act_priority = MASKED;
1009                         xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1010                         xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1011                         if (state->pt_number) {
1012                                 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1013                                 xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1014                         }
1015                         arch_spin_unlock(&sb->lock);
1016                 }
1017         }
1018 }
1019
1020 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1021 {
1022         struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1023         struct kvmppc_xive *xive = xc->xive;
1024         int i;
1025
1026         pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1027
1028         /* Ensure no interrupt is still routed to that VP */
1029         xc->valid = false;
1030         kvmppc_xive_disable_vcpu_interrupts(vcpu);
1031
1032         /* Mask the VP IPI */
1033         xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1034
1035         /* Disable the VP */
1036         xive_native_disable_vp(xc->vp_id);
1037
1038         /* Free the queues & associated interrupts */
1039         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1040                 struct xive_q *q = &xc->queues[i];
1041
1042                 /* Free the escalation irq */
1043                 if (xc->esc_virq[i]) {
1044                         free_irq(xc->esc_virq[i], vcpu);
1045                         irq_dispose_mapping(xc->esc_virq[i]);
1046                         kfree(xc->esc_virq_names[i]);
1047                 }
1048                 /* Free the queue */
1049                 xive_native_disable_queue(xc->vp_id, q, i);
1050                 if (q->qpage) {
1051                         free_pages((unsigned long)q->qpage,
1052                                    xive->q_page_order);
1053                         q->qpage = NULL;
1054                 }
1055         }
1056
1057         /* Free the IPI */
1058         if (xc->vp_ipi) {
1059                 xive_cleanup_irq_data(&xc->vp_ipi_data);
1060                 xive_native_free_irq(xc->vp_ipi);
1061         }
1062         /* Free the VP */
1063         kfree(xc);
1064 }
1065
1066 int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1067                              struct kvm_vcpu *vcpu, u32 cpu)
1068 {
1069         struct kvmppc_xive *xive = dev->private;
1070         struct kvmppc_xive_vcpu *xc;
1071         int i, r = -EBUSY;
1072
1073         pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1074
1075         if (dev->ops != &kvm_xive_ops) {
1076                 pr_devel("Wrong ops !\n");
1077                 return -EPERM;
1078         }
1079         if (xive->kvm != vcpu->kvm)
1080                 return -EPERM;
1081         if (vcpu->arch.irq_type)
1082                 return -EBUSY;
1083         if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
1084                 pr_devel("Duplicate !\n");
1085                 return -EEXIST;
1086         }
1087         if (cpu >= KVM_MAX_VCPUS) {
1088                 pr_devel("Out of bounds !\n");
1089                 return -EINVAL;
1090         }
1091         xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1092         if (!xc)
1093                 return -ENOMEM;
1094
1095         /* We need to synchronize with queue provisioning */
1096         mutex_lock(&vcpu->kvm->lock);
1097         vcpu->arch.xive_vcpu = xc;
1098         xc->xive = xive;
1099         xc->vcpu = vcpu;
1100         xc->server_num = cpu;
1101         xc->vp_id = xive->vp_base + cpu;
1102         xc->mfrr = 0xff;
1103         xc->valid = true;
1104
1105         r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1106         if (r)
1107                 goto bail;
1108
1109         /* Configure VCPU fields for use by assembly push/pull */
1110         vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1111         vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1112
1113         /* Allocate IPI */
1114         xc->vp_ipi = xive_native_alloc_irq();
1115         if (!xc->vp_ipi) {
1116                 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1117                 r = -EIO;
1118                 goto bail;
1119         }
1120         pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1121
1122         r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1123         if (r)
1124                 goto bail;
1125
1126         /*
1127          * Enable the VP first as the single escalation mode will
1128          * affect escalation interrupts numbering
1129          */
1130         r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1131         if (r) {
1132                 pr_err("Failed to enable VP in OPAL, err %d\n", r);
1133                 goto bail;
1134         }
1135
1136         /*
1137          * Initialize queues. Initially we set them all for no queueing
1138          * and we enable escalation for queue 0 only which we'll use for
1139          * our mfrr change notifications. If the VCPU is hot-plugged, we
1140          * do handle provisioning however based on the existing "map"
1141          * of enabled queues.
1142          */
1143         for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1144                 struct xive_q *q = &xc->queues[i];
1145
1146                 /* Single escalation, no queue 7 */
1147                 if (i == 7 && xive->single_escalation)
1148                         break;
1149
1150                 /* Is queue already enabled ? Provision it */
1151                 if (xive->qmap & (1 << i)) {
1152                         r = xive_provision_queue(vcpu, i);
1153                         if (r == 0 && !xive->single_escalation)
1154                                 xive_attach_escalation(vcpu, i);
1155                         if (r)
1156                                 goto bail;
1157                 } else {
1158                         r = xive_native_configure_queue(xc->vp_id,
1159                                                         q, i, NULL, 0, true);
1160                         if (r) {
1161                                 pr_err("Failed to configure queue %d for VCPU %d\n",
1162                                        i, cpu);
1163                                 goto bail;
1164                         }
1165                 }
1166         }
1167
1168         /* If not done above, attach priority 0 escalation */
1169         r = xive_attach_escalation(vcpu, 0);
1170         if (r)
1171                 goto bail;
1172
1173         /* Route the IPI */
1174         r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1175         if (!r)
1176                 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1177
1178 bail:
1179         mutex_unlock(&vcpu->kvm->lock);
1180         if (r) {
1181                 kvmppc_xive_cleanup_vcpu(vcpu);
1182                 return r;
1183         }
1184
1185         vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1186         return 0;
1187 }
1188
1189 /*
1190  * Scanning of queues before/after migration save
1191  */
1192 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1193 {
1194         struct kvmppc_xive_src_block *sb;
1195         struct kvmppc_xive_irq_state *state;
1196         u16 idx;
1197
1198         sb = kvmppc_xive_find_source(xive, irq, &idx);
1199         if (!sb)
1200                 return;
1201
1202         state = &sb->irq_state[idx];
1203
1204         /* Some sanity checking */
1205         if (!state->valid) {
1206                 pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1207                 return;
1208         }
1209
1210         /*
1211          * If the interrupt is in a queue it should have P set.
1212          * We warn so that gets reported. A backtrace isn't useful
1213          * so no need to use a WARN_ON.
1214          */
1215         if (!state->saved_p)
1216                 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1217
1218         /* Set flag */
1219         state->in_queue = true;
1220 }
1221
1222 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1223                                    struct kvmppc_xive_src_block *sb,
1224                                    u32 irq)
1225 {
1226         struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1227
1228         if (!state->valid)
1229                 return;
1230
1231         /* Mask and save state, this will also sync HW queues */
1232         state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1233
1234         /* Transfer P and Q */
1235         state->saved_p = state->old_p;
1236         state->saved_q = state->old_q;
1237
1238         /* Unlock */
1239         arch_spin_unlock(&sb->lock);
1240 }
1241
1242 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1243                                      struct kvmppc_xive_src_block *sb,
1244                                      u32 irq)
1245 {
1246         struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1247
1248         if (!state->valid)
1249                 return;
1250
1251         /*
1252          * Lock / exclude EOI (not technically necessary if the
1253          * guest isn't running concurrently. If this becomes a
1254          * performance issue we can probably remove the lock.
1255          */
1256         xive_lock_for_unmask(sb, state);
1257
1258         /* Restore mask/prio if it wasn't masked */
1259         if (state->saved_scan_prio != MASKED)
1260                 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1261
1262         /* Unlock */
1263         arch_spin_unlock(&sb->lock);
1264 }
1265
1266 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1267 {
1268         u32 idx = q->idx;
1269         u32 toggle = q->toggle;
1270         u32 irq;
1271
1272         do {
1273                 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1274                 if (irq > XICS_IPI)
1275                         xive_pre_save_set_queued(xive, irq);
1276         } while(irq);
1277 }
1278
1279 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1280 {
1281         struct kvm_vcpu *vcpu = NULL;
1282         int i, j;
1283
1284         /*
1285          * See comment in xive_get_source() about how this
1286          * work. Collect a stable state for all interrupts
1287          */
1288         for (i = 0; i <= xive->max_sbid; i++) {
1289                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1290                 if (!sb)
1291                         continue;
1292                 for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1293                         xive_pre_save_mask_irq(xive, sb, j);
1294         }
1295
1296         /* Then scan the queues and update the "in_queue" flag */
1297         kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1298                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1299                 if (!xc)
1300                         continue;
1301                 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1302                         if (xc->queues[j].qpage)
1303                                 xive_pre_save_queue(xive, &xc->queues[j]);
1304                 }
1305         }
1306
1307         /* Finally restore interrupt states */
1308         for (i = 0; i <= xive->max_sbid; i++) {
1309                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1310                 if (!sb)
1311                         continue;
1312                 for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1313                         xive_pre_save_unmask_irq(xive, sb, j);
1314         }
1315 }
1316
1317 static void xive_post_save_scan(struct kvmppc_xive *xive)
1318 {
1319         u32 i, j;
1320
1321         /* Clear all the in_queue flags */
1322         for (i = 0; i <= xive->max_sbid; i++) {
1323                 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1324                 if (!sb)
1325                         continue;
1326                 for (j = 0;  j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1327                         sb->irq_state[j].in_queue = false;
1328         }
1329
1330         /* Next get_source() will do a new scan */
1331         xive->saved_src_count = 0;
1332 }
1333
1334 /*
1335  * This returns the source configuration and state to user space.
1336  */
1337 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1338 {
1339         struct kvmppc_xive_src_block *sb;
1340         struct kvmppc_xive_irq_state *state;
1341         u64 __user *ubufp = (u64 __user *) addr;
1342         u64 val, prio;
1343         u16 idx;
1344
1345         sb = kvmppc_xive_find_source(xive, irq, &idx);
1346         if (!sb)
1347                 return -ENOENT;
1348
1349         state = &sb->irq_state[idx];
1350
1351         if (!state->valid)
1352                 return -ENOENT;
1353
1354         pr_devel("get_source(%ld)...\n", irq);
1355
1356         /*
1357          * So to properly save the state into something that looks like a
1358          * XICS migration stream we cannot treat interrupts individually.
1359          *
1360          * We need, instead, mask them all (& save their previous PQ state)
1361          * to get a stable state in the HW, then sync them to ensure that
1362          * any interrupt that had already fired hits its queue, and finally
1363          * scan all the queues to collect which interrupts are still present
1364          * in the queues, so we can set the "pending" flag on them and
1365          * they can be resent on restore.
1366          *
1367          * So we do it all when the "first" interrupt gets saved, all the
1368          * state is collected at that point, the rest of xive_get_source()
1369          * will merely collect and convert that state to the expected
1370          * userspace bit mask.
1371          */
1372         if (xive->saved_src_count == 0)
1373                 xive_pre_save_scan(xive);
1374         xive->saved_src_count++;
1375
1376         /* Convert saved state into something compatible with xics */
1377         val = state->act_server;
1378         prio = state->saved_scan_prio;
1379
1380         if (prio == MASKED) {
1381                 val |= KVM_XICS_MASKED;
1382                 prio = state->saved_priority;
1383         }
1384         val |= prio << KVM_XICS_PRIORITY_SHIFT;
1385         if (state->lsi) {
1386                 val |= KVM_XICS_LEVEL_SENSITIVE;
1387                 if (state->saved_p)
1388                         val |= KVM_XICS_PENDING;
1389         } else {
1390                 if (state->saved_p)
1391                         val |= KVM_XICS_PRESENTED;
1392
1393                 if (state->saved_q)
1394                         val |= KVM_XICS_QUEUED;
1395
1396                 /*
1397                  * We mark it pending (which will attempt a re-delivery)
1398                  * if we are in a queue *or* we were masked and had
1399                  * Q set which is equivalent to the XICS "masked pending"
1400                  * state
1401                  */
1402                 if (state->in_queue || (prio == MASKED && state->saved_q))
1403                         val |= KVM_XICS_PENDING;
1404         }
1405
1406         /*
1407          * If that was the last interrupt saved, reset the
1408          * in_queue flags
1409          */
1410         if (xive->saved_src_count == xive->src_count)
1411                 xive_post_save_scan(xive);
1412
1413         /* Copy the result to userspace */
1414         if (put_user(val, ubufp))
1415                 return -EFAULT;
1416
1417         return 0;
1418 }
1419
1420 static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
1421                                                            int irq)
1422 {
1423         struct kvm *kvm = xive->kvm;
1424         struct kvmppc_xive_src_block *sb;
1425         int i, bid;
1426
1427         bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1428
1429         mutex_lock(&kvm->lock);
1430
1431         /* block already exists - somebody else got here first */
1432         if (xive->src_blocks[bid])
1433                 goto out;
1434
1435         /* Create the ICS */
1436         sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1437         if (!sb)
1438                 goto out;
1439
1440         sb->id = bid;
1441
1442         for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1443                 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1444                 sb->irq_state[i].guest_priority = MASKED;
1445                 sb->irq_state[i].saved_priority = MASKED;
1446                 sb->irq_state[i].act_priority = MASKED;
1447         }
1448         smp_wmb();
1449         xive->src_blocks[bid] = sb;
1450
1451         if (bid > xive->max_sbid)
1452                 xive->max_sbid = bid;
1453
1454 out:
1455         mutex_unlock(&kvm->lock);
1456         return xive->src_blocks[bid];
1457 }
1458
1459 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1460 {
1461         struct kvm *kvm = xive->kvm;
1462         struct kvm_vcpu *vcpu = NULL;
1463         int i;
1464
1465         kvm_for_each_vcpu(i, vcpu, kvm) {
1466                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1467
1468                 if (!xc)
1469                         continue;
1470
1471                 if (xc->delayed_irq == irq) {
1472                         xc->delayed_irq = 0;
1473                         xive->delayed_irqs--;
1474                         return true;
1475                 }
1476         }
1477         return false;
1478 }
1479
1480 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1481 {
1482         struct kvmppc_xive_src_block *sb;
1483         struct kvmppc_xive_irq_state *state;
1484         u64 __user *ubufp = (u64 __user *) addr;
1485         u16 idx;
1486         u64 val;
1487         u8 act_prio, guest_prio;
1488         u32 server;
1489         int rc = 0;
1490
1491         if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1492                 return -ENOENT;
1493
1494         pr_devel("set_source(irq=0x%lx)\n", irq);
1495
1496         /* Find the source */
1497         sb = kvmppc_xive_find_source(xive, irq, &idx);
1498         if (!sb) {
1499                 pr_devel("No source, creating source block...\n");
1500                 sb = xive_create_src_block(xive, irq);
1501                 if (!sb) {
1502                         pr_devel("Failed to create block...\n");
1503                         return -ENOMEM;
1504                 }
1505         }
1506         state = &sb->irq_state[idx];
1507
1508         /* Read user passed data */
1509         if (get_user(val, ubufp)) {
1510                 pr_devel("fault getting user info !\n");
1511                 return -EFAULT;
1512         }
1513
1514         server = val & KVM_XICS_DESTINATION_MASK;
1515         guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1516
1517         pr_devel("  val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1518                  val, server, guest_prio);
1519
1520         /*
1521          * If the source doesn't already have an IPI, allocate
1522          * one and get the corresponding data
1523          */
1524         if (!state->ipi_number) {
1525                 state->ipi_number = xive_native_alloc_irq();
1526                 if (state->ipi_number == 0) {
1527                         pr_devel("Failed to allocate IPI !\n");
1528                         return -ENOMEM;
1529                 }
1530                 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1531                 pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1532         }
1533
1534         /*
1535          * We use lock_and_mask() to set us in the right masked
1536          * state. We will override that state from the saved state
1537          * further down, but this will handle the cases of interrupts
1538          * that need FW masking. We set the initial guest_priority to
1539          * 0 before calling it to ensure it actually performs the masking.
1540          */
1541         state->guest_priority = 0;
1542         xive_lock_and_mask(xive, sb, state);
1543
1544         /*
1545          * Now, we select a target if we have one. If we don't we
1546          * leave the interrupt untargetted. It means that an interrupt
1547          * can become "untargetted" accross migration if it was masked
1548          * by set_xive() but there is little we can do about it.
1549          */
1550
1551         /* First convert prio and mark interrupt as untargetted */
1552         act_prio = xive_prio_from_guest(guest_prio);
1553         state->act_priority = MASKED;
1554
1555         /*
1556          * We need to drop the lock due to the mutex below. Hopefully
1557          * nothing is touching that interrupt yet since it hasn't been
1558          * advertized to a running guest yet
1559          */
1560         arch_spin_unlock(&sb->lock);
1561
1562         /* If we have a priority target the interrupt */
1563         if (act_prio != MASKED) {
1564                 /* First, check provisioning of queues */
1565                 mutex_lock(&xive->kvm->lock);
1566                 rc = xive_check_provisioning(xive->kvm, act_prio);
1567                 mutex_unlock(&xive->kvm->lock);
1568
1569                 /* Target interrupt */
1570                 if (rc == 0)
1571                         rc = xive_target_interrupt(xive->kvm, state,
1572                                                    server, act_prio);
1573                 /*
1574                  * If provisioning or targetting failed, leave it
1575                  * alone and masked. It will remain disabled until
1576                  * the guest re-targets it.
1577                  */
1578         }
1579
1580         /*
1581          * Find out if this was a delayed irq stashed in an ICP,
1582          * in which case, treat it as pending
1583          */
1584         if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1585                 val |= KVM_XICS_PENDING;
1586                 pr_devel("  Found delayed ! forcing PENDING !\n");
1587         }
1588
1589         /* Cleanup the SW state */
1590         state->old_p = false;
1591         state->old_q = false;
1592         state->lsi = false;
1593         state->asserted = false;
1594
1595         /* Restore LSI state */
1596         if (val & KVM_XICS_LEVEL_SENSITIVE) {
1597                 state->lsi = true;
1598                 if (val & KVM_XICS_PENDING)
1599                         state->asserted = true;
1600                 pr_devel("  LSI ! Asserted=%d\n", state->asserted);
1601         }
1602
1603         /*
1604          * Restore P and Q. If the interrupt was pending, we
1605          * force Q and !P, which will trigger a resend.
1606          *
1607          * That means that a guest that had both an interrupt
1608          * pending (queued) and Q set will restore with only
1609          * one instance of that interrupt instead of 2, but that
1610          * is perfectly fine as coalescing interrupts that haven't
1611          * been presented yet is always allowed.
1612          */
1613         if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1614                 state->old_p = true;
1615         if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1616                 state->old_q = true;
1617
1618         pr_devel("  P=%d, Q=%d\n", state->old_p, state->old_q);
1619
1620         /*
1621          * If the interrupt was unmasked, update guest priority and
1622          * perform the appropriate state transition and do a
1623          * re-trigger if necessary.
1624          */
1625         if (val & KVM_XICS_MASKED) {
1626                 pr_devel("  masked, saving prio\n");
1627                 state->guest_priority = MASKED;
1628                 state->saved_priority = guest_prio;
1629         } else {
1630                 pr_devel("  unmasked, restoring to prio %d\n", guest_prio);
1631                 xive_finish_unmask(xive, sb, state, guest_prio);
1632                 state->saved_priority = guest_prio;
1633         }
1634
1635         /* Increment the number of valid sources and mark this one valid */
1636         if (!state->valid)
1637                 xive->src_count++;
1638         state->valid = true;
1639
1640         return 0;
1641 }
1642
1643 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1644                         bool line_status)
1645 {
1646         struct kvmppc_xive *xive = kvm->arch.xive;
1647         struct kvmppc_xive_src_block *sb;
1648         struct kvmppc_xive_irq_state *state;
1649         u16 idx;
1650
1651         if (!xive)
1652                 return -ENODEV;
1653
1654         sb = kvmppc_xive_find_source(xive, irq, &idx);
1655         if (!sb)
1656                 return -EINVAL;
1657
1658         /* Perform locklessly .... (we need to do some RCUisms here...) */
1659         state = &sb->irq_state[idx];
1660         if (!state->valid)
1661                 return -EINVAL;
1662
1663         /* We don't allow a trigger on a passed-through interrupt */
1664         if (state->pt_number)
1665                 return -EINVAL;
1666
1667         if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1668                 state->asserted = 1;
1669         else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1670                 state->asserted = 0;
1671                 return 0;
1672         }
1673
1674         /* Trigger the IPI */
1675         xive_irq_trigger(&state->ipi_data);
1676
1677         return 0;
1678 }
1679
1680 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1681 {
1682         struct kvmppc_xive *xive = dev->private;
1683
1684         /* We honor the existing XICS ioctl */
1685         switch (attr->group) {
1686         case KVM_DEV_XICS_GRP_SOURCES:
1687                 return xive_set_source(xive, attr->attr, attr->addr);
1688         }
1689         return -ENXIO;
1690 }
1691
1692 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1693 {
1694         struct kvmppc_xive *xive = dev->private;
1695
1696         /* We honor the existing XICS ioctl */
1697         switch (attr->group) {
1698         case KVM_DEV_XICS_GRP_SOURCES:
1699                 return xive_get_source(xive, attr->attr, attr->addr);
1700         }
1701         return -ENXIO;
1702 }
1703
1704 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1705 {
1706         /* We honor the same limits as XICS, at least for now */
1707         switch (attr->group) {
1708         case KVM_DEV_XICS_GRP_SOURCES:
1709                 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1710                     attr->attr < KVMPPC_XICS_NR_IRQS)
1711                         return 0;
1712                 break;
1713         }
1714         return -ENXIO;
1715 }
1716
1717 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1718 {
1719         xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1720         xive_native_configure_irq(hw_num, 0, MASKED, 0);
1721         xive_cleanup_irq_data(xd);
1722 }
1723
1724 static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1725 {
1726         int i;
1727
1728         for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1729                 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1730
1731                 if (!state->valid)
1732                         continue;
1733
1734                 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1735                 xive_native_free_irq(state->ipi_number);
1736
1737                 /* Pass-through, cleanup too */
1738                 if (state->pt_number)
1739                         kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1740
1741                 state->valid = false;
1742         }
1743 }
1744
1745 static void kvmppc_xive_free(struct kvm_device *dev)
1746 {
1747         struct kvmppc_xive *xive = dev->private;
1748         struct kvm *kvm = xive->kvm;
1749         int i;
1750
1751         debugfs_remove(xive->dentry);
1752
1753         if (kvm)
1754                 kvm->arch.xive = NULL;
1755
1756         /* Mask and free interrupts */
1757         for (i = 0; i <= xive->max_sbid; i++) {
1758                 if (xive->src_blocks[i])
1759                         kvmppc_xive_free_sources(xive->src_blocks[i]);
1760                 kfree(xive->src_blocks[i]);
1761                 xive->src_blocks[i] = NULL;
1762         }
1763
1764         if (xive->vp_base != XIVE_INVALID_VP)
1765                 xive_native_free_vp_block(xive->vp_base);
1766
1767
1768         kfree(xive);
1769         kfree(dev);
1770 }
1771
1772 static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
1773 {
1774         struct kvmppc_xive *xive;
1775         struct kvm *kvm = dev->kvm;
1776         int ret = 0;
1777
1778         pr_devel("Creating xive for partition\n");
1779
1780         xive = kzalloc(sizeof(*xive), GFP_KERNEL);
1781         if (!xive)
1782                 return -ENOMEM;
1783
1784         dev->private = xive;
1785         xive->dev = dev;
1786         xive->kvm = kvm;
1787
1788         /* Already there ? */
1789         if (kvm->arch.xive)
1790                 ret = -EEXIST;
1791         else
1792                 kvm->arch.xive = xive;
1793
1794         /* We use the default queue size set by the host */
1795         xive->q_order = xive_native_default_eq_shift();
1796         if (xive->q_order < PAGE_SHIFT)
1797                 xive->q_page_order = 0;
1798         else
1799                 xive->q_page_order = xive->q_order - PAGE_SHIFT;
1800
1801         /* Allocate a bunch of VPs */
1802         xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
1803         pr_devel("VP_Base=%x\n", xive->vp_base);
1804
1805         if (xive->vp_base == XIVE_INVALID_VP)
1806                 ret = -ENOMEM;
1807
1808         xive->single_escalation = xive_native_has_single_escalation();
1809
1810         if (ret) {
1811                 kfree(xive);
1812                 return ret;
1813         }
1814
1815         return 0;
1816 }
1817
1818
1819 static int xive_debug_show(struct seq_file *m, void *private)
1820 {
1821         struct kvmppc_xive *xive = m->private;
1822         struct kvm *kvm = xive->kvm;
1823         struct kvm_vcpu *vcpu;
1824         u64 t_rm_h_xirr = 0;
1825         u64 t_rm_h_ipoll = 0;
1826         u64 t_rm_h_cppr = 0;
1827         u64 t_rm_h_eoi = 0;
1828         u64 t_rm_h_ipi = 0;
1829         u64 t_vm_h_xirr = 0;
1830         u64 t_vm_h_ipoll = 0;
1831         u64 t_vm_h_cppr = 0;
1832         u64 t_vm_h_eoi = 0;
1833         u64 t_vm_h_ipi = 0;
1834         unsigned int i;
1835
1836         if (!kvm)
1837                 return 0;
1838
1839         seq_printf(m, "=========\nVCPU state\n=========\n");
1840
1841         kvm_for_each_vcpu(i, vcpu, kvm) {
1842                 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1843                 unsigned int i;
1844
1845                 if (!xc)
1846                         continue;
1847
1848                 seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1849                            " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1850                            xc->server_num, xc->cppr, xc->hw_cppr,
1851                            xc->mfrr, xc->pending,
1852                            xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
1853                 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1854                         struct xive_q *q = &xc->queues[i];
1855                         u32 i0, i1, idx;
1856
1857                         if (!q->qpage && !xc->esc_virq[i])
1858                                 continue;
1859
1860                         seq_printf(m, " [q%d]: ", i);
1861
1862                         if (q->qpage) {
1863                                 idx = q->idx;
1864                                 i0 = be32_to_cpup(q->qpage + idx);
1865                                 idx = (idx + 1) & q->msk;
1866                                 i1 = be32_to_cpup(q->qpage + idx);
1867                                 seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
1868                         }
1869                         if (xc->esc_virq[i]) {
1870                                 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
1871                                 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1872                                 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
1873                                 seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
1874                                            (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
1875                                            (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
1876                                            xc->esc_virq[i], pq, xd->eoi_page);
1877                                 seq_printf(m, "\n");
1878                         }
1879                 }
1880
1881                 t_rm_h_xirr += xc->stat_rm_h_xirr;
1882                 t_rm_h_ipoll += xc->stat_rm_h_ipoll;
1883                 t_rm_h_cppr += xc->stat_rm_h_cppr;
1884                 t_rm_h_eoi += xc->stat_rm_h_eoi;
1885                 t_rm_h_ipi += xc->stat_rm_h_ipi;
1886                 t_vm_h_xirr += xc->stat_vm_h_xirr;
1887                 t_vm_h_ipoll += xc->stat_vm_h_ipoll;
1888                 t_vm_h_cppr += xc->stat_vm_h_cppr;
1889                 t_vm_h_eoi += xc->stat_vm_h_eoi;
1890                 t_vm_h_ipi += xc->stat_vm_h_ipi;
1891         }
1892
1893         seq_printf(m, "Hcalls totals\n");
1894         seq_printf(m, " H_XIRR  R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
1895         seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
1896         seq_printf(m, " H_CPPR  R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
1897         seq_printf(m, " H_EOI   R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
1898         seq_printf(m, " H_IPI   R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
1899
1900         return 0;
1901 }
1902
1903 static int xive_debug_open(struct inode *inode, struct file *file)
1904 {
1905         return single_open(file, xive_debug_show, inode->i_private);
1906 }
1907
1908 static const struct file_operations xive_debug_fops = {
1909         .open = xive_debug_open,
1910         .read = seq_read,
1911         .llseek = seq_lseek,
1912         .release = single_release,
1913 };
1914
1915 static void xive_debugfs_init(struct kvmppc_xive *xive)
1916 {
1917         char *name;
1918
1919         name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1920         if (!name) {
1921                 pr_err("%s: no memory for name\n", __func__);
1922                 return;
1923         }
1924
1925         xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
1926                                            xive, &xive_debug_fops);
1927
1928         pr_debug("%s: created %s\n", __func__, name);
1929         kfree(name);
1930 }
1931
1932 static void kvmppc_xive_init(struct kvm_device *dev)
1933 {
1934         struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1935
1936         /* Register some debug interfaces */
1937         xive_debugfs_init(xive);
1938 }
1939
1940 struct kvm_device_ops kvm_xive_ops = {
1941         .name = "kvm-xive",
1942         .create = kvmppc_xive_create,
1943         .init = kvmppc_xive_init,
1944         .destroy = kvmppc_xive_free,
1945         .set_attr = xive_set_attr,
1946         .get_attr = xive_get_attr,
1947         .has_attr = xive_has_attr,
1948 };
1949
1950 void kvmppc_xive_init_module(void)
1951 {
1952         __xive_vm_h_xirr = xive_vm_h_xirr;
1953         __xive_vm_h_ipoll = xive_vm_h_ipoll;
1954         __xive_vm_h_ipi = xive_vm_h_ipi;
1955         __xive_vm_h_cppr = xive_vm_h_cppr;
1956         __xive_vm_h_eoi = xive_vm_h_eoi;
1957 }
1958
1959 void kvmppc_xive_exit_module(void)
1960 {
1961         __xive_vm_h_xirr = NULL;
1962         __xive_vm_h_ipoll = NULL;
1963         __xive_vm_h_ipi = NULL;
1964         __xive_vm_h_cppr = NULL;
1965         __xive_vm_h_eoi = NULL;
1966 }