net/mlx4_core: drop useless LIST_HEAD
[linux-2.6-block.git] / drivers / irqchip / irq-gic-v4.c
CommitLineData
7de5c0af
MZ
1/*
2 * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/msi.h>
22#include <linux/sched.h>
23
24#include <linux/irqchip/arm-gic-v4.h>
25
7954907b
MZ
26/*
27 * WARNING: The blurb below assumes that you understand the
28 * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
29 * translated into GICv4 commands. So it effectively targets at most
30 * two individuals. You know who you are.
31 *
32 * The core GICv4 code is designed to *avoid* exposing too much of the
33 * core GIC code (that would in turn leak into the hypervisor code),
34 * and instead provide a hypervisor agnostic interface to the HW (of
35 * course, the astute reader will quickly realize that hypervisor
36 * agnostic actually means KVM-specific - what were you thinking?).
37 *
38 * In order to achieve a modicum of isolation, we try to hide most of
39 * the GICv4 "stuff" behind normal irqchip operations:
40 *
41 * - Any guest-visible VLPI is backed by a Linux interrupt (and a
42 * physical LPI which gets unmapped when the guest maps the
43 * VLPI). This allows the same DevID/EventID pair to be either
44 * mapped to the LPI (host) or the VLPI (guest). Note that this is
45 * exclusive, and you cannot have both.
46 *
47 * - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
48 *
49 * - Guest INT/CLEAR commands are implemented through
50 * irq_set_irqchip_state().
51 *
52 * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
53 * issuing an INV after changing a priority) gets shoved into the
54 * irq_set_vcpu_affinity() method. While this is quite horrible
55 * (let's face it, this is the irqchip version of an ioctl), it
56 * confines the crap to a single location. And map/unmap really is
57 * about setting the affinity of a VLPI to a vcpu, so only INV is
58 * majorly out of place. So there.
59 *
60 * A number of commands are simply not provided by this interface, as
61 * they do not make direct sense. For example, MAPD is purely local to
62 * the virtual ITS (because it references a virtual device, and the
63 * physical ITS is still very much in charge of the physical
64 * device). Same goes for things like MAPC (the physical ITS deals
65 * with the actual vPE affinity, and not the braindead concept of
66 * collection). SYNC is not provided either, as each and every command
67 * is followed by a VSYNC. This could be relaxed in the future, should
68 * this be seen as a bottleneck (yes, this means *never*).
69 *
70 * But handling VLPIs is only one side of the job of the GICv4
71 * code. The other (darker) side is to take care of the doorbell
72 * interrupts which are delivered when a VLPI targeting a non-running
73 * vcpu is being made pending.
74 *
75 * The choice made here is that each vcpu (VPE in old northern GICv4
76 * dialect) gets a single doorbell LPI, no matter how many interrupts
77 * are targeting it. This has a nice property, which is that the
78 * interrupt becomes a handle for the VPE, and that the hypervisor
79 * code can manipulate it through the normal interrupt API:
80 *
81 * - VMs (or rather the VM abstraction that matters to the GIC)
82 * contain an irq domain where each interrupt maps to a VPE. In
83 * turn, this domain sits on top of the normal LPI allocator, and a
84 * specially crafted irq_chip implementation.
85 *
86 * - mask/unmask do what is expected on the doorbell interrupt.
87 *
88 * - irq_set_affinity is used to move a VPE from one redistributor to
89 * another.
90 *
91 * - irq_set_vcpu_affinity once again gets hijacked for the purpose of
92 * creating a new sub-API, namely scheduling/descheduling a VPE
93 * (which involves programming GICR_V{PROP,PEND}BASER) and
94 * performing INVALL operations.
95 */
96
7de5c0af
MZ
97static struct irq_domain *gic_domain;
98static const struct irq_domain_ops *vpe_domain_ops;
99
100int its_alloc_vcpu_irqs(struct its_vm *vm)
101{
102 int vpe_base_irq, i;
103
104 vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
105 task_pid_nr(current));
106 if (!vm->fwnode)
107 goto err;
108
109 vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
110 vm->fwnode, vpe_domain_ops,
111 vm);
112 if (!vm->domain)
113 goto err;
114
115 for (i = 0; i < vm->nr_vpes; i++) {
116 vm->vpes[i]->its_vm = vm;
117 vm->vpes[i]->idai = true;
118 }
119
120 vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
121 NUMA_NO_NODE, vm,
122 false, NULL);
123 if (vpe_base_irq <= 0)
124 goto err;
125
126 for (i = 0; i < vm->nr_vpes; i++)
127 vm->vpes[i]->irq = vpe_base_irq + i;
128
129 return 0;
130
131err:
132 if (vm->domain)
133 irq_domain_remove(vm->domain);
134 if (vm->fwnode)
135 irq_domain_free_fwnode(vm->fwnode);
136
137 return -ENOMEM;
138}
139
140void its_free_vcpu_irqs(struct its_vm *vm)
141{
142 irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
143 irq_domain_remove(vm->domain);
144 irq_domain_free_fwnode(vm->fwnode);
145}
eab84318
MZ
146
147static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
148{
149 return irq_set_vcpu_affinity(vpe->irq, info);
150}
151
152int its_schedule_vpe(struct its_vpe *vpe, bool on)
153{
154 struct its_cmd_info info;
155
156 WARN_ON(preemptible());
157
158 info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
159
160 return its_send_vpe_cmd(vpe, &info);
161}
162
163int its_invall_vpe(struct its_vpe *vpe)
164{
165 struct its_cmd_info info = {
166 .cmd_type = INVALL_VPE,
167 };
168
169 return its_send_vpe_cmd(vpe, &info);
170}
f2eac75d
MZ
171
172int its_map_vlpi(int irq, struct its_vlpi_map *map)
173{
174 struct its_cmd_info info = {
175 .cmd_type = MAP_VLPI,
6c09ffd0
AB
176 {
177 .map = map,
178 },
f2eac75d 179 };
90dc7122 180 int ret;
f2eac75d
MZ
181
182 /*
183 * The host will never see that interrupt firing again, so it
184 * is vital that we don't do any lazy masking.
185 */
186 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
187
90dc7122
MZ
188 ret = irq_set_vcpu_affinity(irq, &info);
189 if (ret)
190 irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
191
192 return ret;
f2eac75d
MZ
193}
194
195int its_get_vlpi(int irq, struct its_vlpi_map *map)
196{
197 struct its_cmd_info info = {
198 .cmd_type = GET_VLPI,
6c09ffd0
AB
199 {
200 .map = map,
201 },
f2eac75d
MZ
202 };
203
204 return irq_set_vcpu_affinity(irq, &info);
205}
206
207int its_unmap_vlpi(int irq)
208{
209 irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
210 return irq_set_vcpu_affinity(irq, NULL);
211}
212
213int its_prop_update_vlpi(int irq, u8 config, bool inv)
214{
215 struct its_cmd_info info = {
216 .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
6c09ffd0
AB
217 {
218 .config = config,
219 },
f2eac75d
MZ
220 };
221
222 return irq_set_vcpu_affinity(irq, &info);
223}
3d63cb53
MZ
224
225int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
226{
227 if (domain) {
228 pr_info("ITS: Enabling GICv4 support\n");
229 gic_domain = domain;
230 vpe_domain_ops = ops;
231 return 0;
232 }
233
234 pr_err("ITS: No GICv4 VPE domain allocated\n");
235 return -ENODEV;
236}