Commit | Line | Data |
---|---|---|
64a959d6 CD |
1 | /* |
2 | * Copyright (C) 2015, 2016 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
41b87599 MR |
17 | #include <linux/interrupt.h> |
18 | #include <linux/irq.h> | |
64a959d6 CD |
19 | #include <linux/kvm.h> |
20 | #include <linux/kvm_host.h> | |
8e444745 | 21 | #include <linux/list_sort.h> |
41b87599 MR |
22 | #include <linux/nospec.h> |
23 | ||
771621b0 | 24 | #include <asm/kvm_hyp.h> |
64a959d6 CD |
25 | |
26 | #include "vgic.h" | |
27 | ||
81eeb95d | 28 | #define CREATE_TRACE_POINTS |
35d2d5d4 | 29 | #include "trace.h" |
81eeb95d CD |
30 | |
31 | #ifdef CONFIG_DEBUG_SPINLOCK | |
32 | #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) | |
33 | #else | |
34 | #define DEBUG_SPINLOCK_BUG_ON(p) | |
35 | #endif | |
36 | ||
63d7c6af AB |
37 | struct vgic_global kvm_vgic_global_state __ro_after_init = { |
38 | .gicv3_cpuif = STATIC_KEY_FALSE_INIT, | |
39 | }; | |
64a959d6 | 40 | |
81eeb95d CD |
41 | /* |
42 | * Locking order is always: | |
abd72296 CD |
43 | * kvm->lock (mutex) |
44 | * its->cmd_lock (mutex) | |
45 | * its->its_lock (mutex) | |
388d4359 AP |
46 | * vgic_cpu->ap_list_lock must be taken with IRQs disabled |
47 | * kvm->lpi_list_lock must be taken with IRQs disabled | |
48 | * vgic_irq->irq_lock must be taken with IRQs disabled | |
49 | * | |
50 | * As the ap_list_lock might be taken from the timer interrupt handler, | |
51 | * we have to disable IRQs before taking this lock and everything lower | |
52 | * than it. | |
81eeb95d | 53 | * |
424c3383 AP |
54 | * If you need to take multiple locks, always take the upper lock first, |
55 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. | |
56 | * If you are already holding a lock and need to take a higher one, you | |
57 | * have to drop the lower ranking lock first and re-aquire it after having | |
58 | * taken the upper one. | |
81eeb95d CD |
59 | * |
60 | * When taking more than one ap_list_lock at the same time, always take the | |
61 | * lowest numbered VCPU's ap_list_lock first, so: | |
62 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | |
63 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | |
64 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | |
006df0f3 CD |
65 | * |
66 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have | |
67 | * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer | |
68 | * spinlocks for any lock that may be taken while injecting an interrupt. | |
81eeb95d CD |
69 | */ |
70 | ||
3802411d AP |
71 | /* |
72 | * Iterate over the VM's list of mapped LPIs to find the one with a | |
73 | * matching interrupt ID and return a reference to the IRQ structure. | |
74 | */ | |
75 | static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |
76 | { | |
77 | struct vgic_dist *dist = &kvm->arch.vgic; | |
78 | struct vgic_irq *irq = NULL; | |
388d4359 | 79 | unsigned long flags; |
3802411d | 80 | |
388d4359 | 81 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
3802411d AP |
82 | |
83 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | |
84 | if (irq->intid != intid) | |
85 | continue; | |
86 | ||
87 | /* | |
88 | * This increases the refcount, the caller is expected to | |
89 | * call vgic_put_irq() later once it's finished with the IRQ. | |
90 | */ | |
d97594e6 | 91 | vgic_get_irq_kref(irq); |
3802411d AP |
92 | goto out_unlock; |
93 | } | |
94 | irq = NULL; | |
95 | ||
96 | out_unlock: | |
388d4359 | 97 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
3802411d AP |
98 | |
99 | return irq; | |
100 | } | |
101 | ||
102 | /* | |
103 | * This looks up the virtual interrupt ID to get the corresponding | |
104 | * struct vgic_irq. It also increases the refcount, so any caller is expected | |
105 | * to call vgic_put_irq() once it's finished with this IRQ. | |
106 | */ | |
64a959d6 CD |
107 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
108 | u32 intid) | |
109 | { | |
110 | /* SGIs and PPIs */ | |
41b87599 MR |
111 | if (intid <= VGIC_MAX_PRIVATE) { |
112 | intid = array_index_nospec(intid, VGIC_MAX_PRIVATE); | |
64a959d6 | 113 | return &vcpu->arch.vgic_cpu.private_irqs[intid]; |
41b87599 | 114 | } |
64a959d6 CD |
115 | |
116 | /* SPIs */ | |
41b87599 MR |
117 | if (intid <= VGIC_MAX_SPI) { |
118 | intid = array_index_nospec(intid, VGIC_MAX_SPI); | |
64a959d6 | 119 | return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; |
41b87599 | 120 | } |
64a959d6 | 121 | |
3802411d | 122 | /* LPIs */ |
64a959d6 | 123 | if (intid >= VGIC_MIN_LPI) |
3802411d | 124 | return vgic_get_lpi(kvm, intid); |
64a959d6 CD |
125 | |
126 | WARN(1, "Looking up struct vgic_irq for reserved INTID"); | |
127 | return NULL; | |
128 | } | |
81eeb95d | 129 | |
3802411d AP |
130 | /* |
131 | * We can't do anything in here, because we lack the kvm pointer to | |
132 | * lock and remove the item from the lpi_list. So we keep this function | |
133 | * empty and use the return value of kref_put() to trigger the freeing. | |
134 | */ | |
5dd4b924 AP |
135 | static void vgic_irq_release(struct kref *ref) |
136 | { | |
5dd4b924 AP |
137 | } |
138 | ||
139 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | |
140 | { | |
2cccbb36 | 141 | struct vgic_dist *dist = &kvm->arch.vgic; |
388d4359 | 142 | unsigned long flags; |
3802411d | 143 | |
5dd4b924 AP |
144 | if (irq->intid < VGIC_MIN_LPI) |
145 | return; | |
146 | ||
388d4359 | 147 | spin_lock_irqsave(&dist->lpi_list_lock, flags); |
2cccbb36 | 148 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
388d4359 | 149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
3802411d | 150 | return; |
2cccbb36 | 151 | }; |
3802411d | 152 | |
3802411d AP |
153 | list_del(&irq->lpi_list); |
154 | dist->lpi_list_count--; | |
388d4359 | 155 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
3802411d AP |
156 | |
157 | kfree(irq); | |
5dd4b924 AP |
158 | } |
159 | ||
df635c5b CD |
160 | void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) |
161 | { | |
162 | WARN_ON(irq_set_irqchip_state(irq->host_irq, | |
163 | IRQCHIP_STATE_PENDING, | |
164 | pending)); | |
165 | } | |
166 | ||
e40cc57b CD |
167 | bool vgic_get_phys_line_level(struct vgic_irq *irq) |
168 | { | |
169 | bool line_level; | |
170 | ||
171 | BUG_ON(!irq->hw); | |
172 | ||
b6909a65 CD |
173 | if (irq->get_input_level) |
174 | return irq->get_input_level(irq->intid); | |
175 | ||
e40cc57b CD |
176 | WARN_ON(irq_get_irqchip_state(irq->host_irq, |
177 | IRQCHIP_STATE_PENDING, | |
178 | &line_level)); | |
179 | return line_level; | |
180 | } | |
181 | ||
182 | /* Set/Clear the physical active state */ | |
183 | void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) | |
184 | { | |
185 | ||
186 | BUG_ON(!irq->hw); | |
187 | WARN_ON(irq_set_irqchip_state(irq->host_irq, | |
188 | IRQCHIP_STATE_ACTIVE, | |
189 | active)); | |
190 | } | |
191 | ||
81eeb95d CD |
192 | /** |
193 | * kvm_vgic_target_oracle - compute the target vcpu for an irq | |
194 | * | |
195 | * @irq: The irq to route. Must be already locked. | |
196 | * | |
197 | * Based on the current state of the interrupt (enabled, pending, | |
198 | * active, vcpu and target_vcpu), compute the next vcpu this should be | |
199 | * given to. Return NULL if this shouldn't be injected at all. | |
200 | * | |
201 | * Requires the IRQ lock to be held. | |
202 | */ | |
203 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) | |
204 | { | |
205 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
206 | ||
207 | /* If the interrupt is active, it must stay on the current vcpu */ | |
208 | if (irq->active) | |
209 | return irq->vcpu ? : irq->target_vcpu; | |
210 | ||
211 | /* | |
212 | * If the IRQ is not active but enabled and pending, we should direct | |
213 | * it to its configured target VCPU. | |
214 | * If the distributor is disabled, pending interrupts shouldn't be | |
215 | * forwarded. | |
216 | */ | |
8694e4da | 217 | if (irq->enabled && irq_is_pending(irq)) { |
81eeb95d CD |
218 | if (unlikely(irq->target_vcpu && |
219 | !irq->target_vcpu->kvm->arch.vgic.enabled)) | |
220 | return NULL; | |
221 | ||
222 | return irq->target_vcpu; | |
223 | } | |
224 | ||
225 | /* If neither active nor pending and enabled, then this IRQ should not | |
226 | * be queued to any VCPU. | |
227 | */ | |
228 | return NULL; | |
229 | } | |
230 | ||
8e444745 CD |
231 | /* |
232 | * The order of items in the ap_lists defines how we'll pack things in LRs as | |
233 | * well, the first items in the list being the first things populated in the | |
234 | * LRs. | |
235 | * | |
236 | * A hard rule is that active interrupts can never be pushed out of the LRs | |
237 | * (and therefore take priority) since we cannot reliably trap on deactivation | |
238 | * of IRQs and therefore they have to be present in the LRs. | |
239 | * | |
240 | * Otherwise things should be sorted by the priority field and the GIC | |
241 | * hardware support will take care of preemption of priority groups etc. | |
242 | * | |
243 | * Return negative if "a" sorts before "b", 0 to preserve order, and positive | |
244 | * to sort "b" before "a". | |
245 | */ | |
246 | static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |
247 | { | |
248 | struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); | |
249 | struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); | |
250 | bool penda, pendb; | |
251 | int ret; | |
252 | ||
253 | spin_lock(&irqa->irq_lock); | |
254 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); | |
255 | ||
256 | if (irqa->active || irqb->active) { | |
257 | ret = (int)irqb->active - (int)irqa->active; | |
258 | goto out; | |
259 | } | |
260 | ||
8694e4da CD |
261 | penda = irqa->enabled && irq_is_pending(irqa); |
262 | pendb = irqb->enabled && irq_is_pending(irqb); | |
8e444745 CD |
263 | |
264 | if (!penda || !pendb) { | |
265 | ret = (int)pendb - (int)penda; | |
266 | goto out; | |
267 | } | |
268 | ||
269 | /* Both pending and enabled, sort by priority */ | |
270 | ret = irqa->priority - irqb->priority; | |
271 | out: | |
272 | spin_unlock(&irqb->irq_lock); | |
273 | spin_unlock(&irqa->irq_lock); | |
274 | return ret; | |
275 | } | |
276 | ||
277 | /* Must be called with the ap_list_lock held */ | |
278 | static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) | |
279 | { | |
280 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
281 | ||
282 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
283 | ||
284 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); | |
285 | } | |
286 | ||
81eeb95d CD |
287 | /* |
288 | * Only valid injection if changing level for level-triggered IRQs or for a | |
cb3f0ad8 CD |
289 | * rising edge, and in-kernel connected IRQ lines can only be controlled by |
290 | * their owner. | |
81eeb95d | 291 | */ |
cb3f0ad8 | 292 | static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) |
81eeb95d | 293 | { |
cb3f0ad8 CD |
294 | if (irq->owner != owner) |
295 | return false; | |
296 | ||
81eeb95d CD |
297 | switch (irq->config) { |
298 | case VGIC_CONFIG_LEVEL: | |
299 | return irq->line_level != level; | |
300 | case VGIC_CONFIG_EDGE: | |
301 | return level; | |
302 | } | |
303 | ||
304 | return false; | |
305 | } | |
306 | ||
307 | /* | |
308 | * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. | |
309 | * Do the queuing if necessary, taking the right locks in the right order. | |
310 | * Returns true when the IRQ was queued, false otherwise. | |
311 | * | |
312 | * Needs to be entered with the IRQ lock already held, but will return | |
313 | * with all locks dropped. | |
314 | */ | |
006df0f3 CD |
315 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, |
316 | unsigned long flags) | |
81eeb95d CD |
317 | { |
318 | struct kvm_vcpu *vcpu; | |
319 | ||
320 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
321 | ||
322 | retry: | |
323 | vcpu = vgic_target_oracle(irq); | |
324 | if (irq->vcpu || !vcpu) { | |
325 | /* | |
326 | * If this IRQ is already on a VCPU's ap_list, then it | |
327 | * cannot be moved or modified and there is no more work for | |
328 | * us to do. | |
329 | * | |
330 | * Otherwise, if the irq is not pending and enabled, it does | |
331 | * not need to be inserted into an ap_list and there is also | |
332 | * no more work for us to do. | |
333 | */ | |
006df0f3 | 334 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
d42c7970 SWL |
335 | |
336 | /* | |
337 | * We have to kick the VCPU here, because we could be | |
338 | * queueing an edge-triggered interrupt for which we | |
339 | * get no EOI maintenance interrupt. In that case, | |
340 | * while the IRQ is already on the VCPU's AP list, the | |
341 | * VCPU could have EOI'ed the original interrupt and | |
342 | * won't see this one until it exits for some other | |
343 | * reason. | |
344 | */ | |
325f9c64 AJ |
345 | if (vcpu) { |
346 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | |
d42c7970 | 347 | kvm_vcpu_kick(vcpu); |
325f9c64 | 348 | } |
81eeb95d CD |
349 | return false; |
350 | } | |
351 | ||
352 | /* | |
353 | * We must unlock the irq lock to take the ap_list_lock where | |
354 | * we are going to insert this new pending interrupt. | |
355 | */ | |
006df0f3 | 356 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
81eeb95d CD |
357 | |
358 | /* someone can do stuff here, which we re-check below */ | |
359 | ||
006df0f3 | 360 | spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
81eeb95d CD |
361 | spin_lock(&irq->irq_lock); |
362 | ||
363 | /* | |
364 | * Did something change behind our backs? | |
365 | * | |
366 | * There are two cases: | |
367 | * 1) The irq lost its pending state or was disabled behind our | |
368 | * backs and/or it was queued to another VCPU's ap_list. | |
369 | * 2) Someone changed the affinity on this irq behind our | |
370 | * backs and we are now holding the wrong ap_list_lock. | |
371 | * | |
372 | * In both cases, drop the locks and retry. | |
373 | */ | |
374 | ||
375 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | |
376 | spin_unlock(&irq->irq_lock); | |
006df0f3 | 377 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
81eeb95d | 378 | |
006df0f3 | 379 | spin_lock_irqsave(&irq->irq_lock, flags); |
81eeb95d CD |
380 | goto retry; |
381 | } | |
382 | ||
5dd4b924 AP |
383 | /* |
384 | * Grab a reference to the irq to reflect the fact that it is | |
385 | * now in the ap_list. | |
386 | */ | |
387 | vgic_get_irq_kref(irq); | |
81eeb95d CD |
388 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
389 | irq->vcpu = vcpu; | |
390 | ||
391 | spin_unlock(&irq->irq_lock); | |
006df0f3 | 392 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
81eeb95d | 393 | |
325f9c64 | 394 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
81eeb95d CD |
395 | kvm_vcpu_kick(vcpu); |
396 | ||
397 | return true; | |
398 | } | |
399 | ||
11710dec CD |
400 | /** |
401 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic | |
402 | * @kvm: The VM structure pointer | |
403 | * @cpuid: The CPU for PPIs | |
404 | * @intid: The INTID to inject a new state to. | |
405 | * @level: Edge-triggered: true: to trigger the interrupt | |
406 | * false: to ignore the call | |
407 | * Level-sensitive true: raise the input signal | |
408 | * false: lower the input signal | |
cb3f0ad8 CD |
409 | * @owner: The opaque pointer to the owner of the IRQ being raised to verify |
410 | * that the caller is allowed to inject this IRQ. Userspace | |
411 | * injections will have owner == NULL. | |
11710dec CD |
412 | * |
413 | * The VGIC is not concerned with devices being active-LOW or active-HIGH for | |
414 | * level-sensitive interrupts. You can think of the level parameter as 1 | |
415 | * being HIGH and 0 being LOW and all devices being active-HIGH. | |
416 | */ | |
417 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |
cb3f0ad8 | 418 | bool level, void *owner) |
81eeb95d CD |
419 | { |
420 | struct kvm_vcpu *vcpu; | |
421 | struct vgic_irq *irq; | |
006df0f3 | 422 | unsigned long flags; |
81eeb95d CD |
423 | int ret; |
424 | ||
425 | trace_vgic_update_irq_pending(cpuid, intid, level); | |
426 | ||
ad275b8b EA |
427 | ret = vgic_lazy_init(kvm); |
428 | if (ret) | |
429 | return ret; | |
430 | ||
81eeb95d CD |
431 | vcpu = kvm_get_vcpu(kvm, cpuid); |
432 | if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) | |
433 | return -EINVAL; | |
434 | ||
435 | irq = vgic_get_irq(kvm, vcpu, intid); | |
436 | if (!irq) | |
437 | return -EINVAL; | |
438 | ||
006df0f3 | 439 | spin_lock_irqsave(&irq->irq_lock, flags); |
81eeb95d | 440 | |
cb3f0ad8 | 441 | if (!vgic_validate_injection(irq, level, owner)) { |
81eeb95d | 442 | /* Nothing to see here, move along... */ |
006df0f3 | 443 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 444 | vgic_put_irq(kvm, irq); |
81eeb95d CD |
445 | return 0; |
446 | } | |
447 | ||
8694e4da | 448 | if (irq->config == VGIC_CONFIG_LEVEL) |
81eeb95d | 449 | irq->line_level = level; |
8694e4da CD |
450 | else |
451 | irq->pending_latch = true; | |
81eeb95d | 452 | |
006df0f3 | 453 | vgic_queue_irq_unlock(kvm, irq, flags); |
5dd4b924 | 454 | vgic_put_irq(kvm, irq); |
81eeb95d CD |
455 | |
456 | return 0; | |
457 | } | |
458 | ||
47bbd31f EA |
459 | /* @irq->irq_lock must be held */ |
460 | static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |
b6909a65 CD |
461 | unsigned int host_irq, |
462 | bool (*get_input_level)(int vindid)) | |
568e8c90 | 463 | { |
47bbd31f EA |
464 | struct irq_desc *desc; |
465 | struct irq_data *data; | |
466 | ||
467 | /* | |
468 | * Find the physical IRQ number corresponding to @host_irq | |
469 | */ | |
470 | desc = irq_to_desc(host_irq); | |
471 | if (!desc) { | |
472 | kvm_err("%s: no interrupt descriptor\n", __func__); | |
473 | return -EINVAL; | |
474 | } | |
475 | data = irq_desc_get_irq_data(desc); | |
476 | while (data->parent_data) | |
477 | data = data->parent_data; | |
478 | ||
479 | irq->hw = true; | |
480 | irq->host_irq = host_irq; | |
481 | irq->hwintid = data->hwirq; | |
b6909a65 | 482 | irq->get_input_level = get_input_level; |
47bbd31f EA |
483 | return 0; |
484 | } | |
485 | ||
486 | /* @irq->irq_lock must be held */ | |
487 | static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) | |
488 | { | |
489 | irq->hw = false; | |
490 | irq->hwintid = 0; | |
b6909a65 | 491 | irq->get_input_level = NULL; |
47bbd31f EA |
492 | } |
493 | ||
494 | int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |
b6909a65 | 495 | u32 vintid, bool (*get_input_level)(int vindid)) |
47bbd31f EA |
496 | { |
497 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | |
006df0f3 | 498 | unsigned long flags; |
47bbd31f | 499 | int ret; |
568e8c90 AP |
500 | |
501 | BUG_ON(!irq); | |
502 | ||
006df0f3 | 503 | spin_lock_irqsave(&irq->irq_lock, flags); |
b6909a65 | 504 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); |
006df0f3 | 505 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 506 | vgic_put_irq(vcpu->kvm, irq); |
568e8c90 | 507 | |
47bbd31f | 508 | return ret; |
568e8c90 AP |
509 | } |
510 | ||
413aa807 CD |
511 | /** |
512 | * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ | |
513 | * @vcpu: The VCPU pointer | |
514 | * @vintid: The INTID of the interrupt | |
515 | * | |
516 | * Reset the active and pending states of a mapped interrupt. Kernel | |
517 | * subsystems injecting mapped interrupts should reset their interrupt lines | |
518 | * when we are doing a reset of the VM. | |
519 | */ | |
520 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | |
521 | { | |
522 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | |
523 | unsigned long flags; | |
524 | ||
525 | if (!irq->hw) | |
526 | goto out; | |
527 | ||
528 | spin_lock_irqsave(&irq->irq_lock, flags); | |
529 | irq->active = false; | |
530 | irq->pending_latch = false; | |
531 | irq->line_level = false; | |
532 | spin_unlock_irqrestore(&irq->irq_lock, flags); | |
533 | out: | |
534 | vgic_put_irq(vcpu->kvm, irq); | |
535 | } | |
536 | ||
47bbd31f | 537 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) |
568e8c90 | 538 | { |
5dd4b924 | 539 | struct vgic_irq *irq; |
006df0f3 | 540 | unsigned long flags; |
568e8c90 AP |
541 | |
542 | if (!vgic_initialized(vcpu->kvm)) | |
543 | return -EAGAIN; | |
544 | ||
47bbd31f | 545 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
5dd4b924 AP |
546 | BUG_ON(!irq); |
547 | ||
006df0f3 | 548 | spin_lock_irqsave(&irq->irq_lock, flags); |
47bbd31f | 549 | kvm_vgic_unmap_irq(irq); |
006df0f3 | 550 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 551 | vgic_put_irq(vcpu->kvm, irq); |
568e8c90 AP |
552 | |
553 | return 0; | |
554 | } | |
555 | ||
c6ccd30e CD |
556 | /** |
557 | * kvm_vgic_set_owner - Set the owner of an interrupt for a VM | |
558 | * | |
559 | * @vcpu: Pointer to the VCPU (used for PPIs) | |
560 | * @intid: The virtual INTID identifying the interrupt (PPI or SPI) | |
561 | * @owner: Opaque pointer to the owner | |
562 | * | |
563 | * Returns 0 if intid is not already used by another in-kernel device and the | |
564 | * owner is set, otherwise returns an error code. | |
565 | */ | |
566 | int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) | |
567 | { | |
568 | struct vgic_irq *irq; | |
7465894e | 569 | unsigned long flags; |
c6ccd30e CD |
570 | int ret = 0; |
571 | ||
572 | if (!vgic_initialized(vcpu->kvm)) | |
573 | return -EAGAIN; | |
574 | ||
575 | /* SGIs and LPIs cannot be wired up to any device */ | |
576 | if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid)) | |
577 | return -EINVAL; | |
578 | ||
579 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | |
7465894e | 580 | spin_lock_irqsave(&irq->irq_lock, flags); |
c6ccd30e CD |
581 | if (irq->owner && irq->owner != owner) |
582 | ret = -EEXIST; | |
583 | else | |
584 | irq->owner = owner; | |
7465894e | 585 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
c6ccd30e CD |
586 | |
587 | return ret; | |
588 | } | |
589 | ||
0919e84c MZ |
590 | /** |
591 | * vgic_prune_ap_list - Remove non-relevant interrupts from the list | |
592 | * | |
593 | * @vcpu: The VCPU pointer | |
594 | * | |
595 | * Go over the list of "interesting" interrupts, and prune those that we | |
596 | * won't have to consider in the near future. | |
597 | */ | |
598 | static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |
599 | { | |
600 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
601 | struct vgic_irq *irq, *tmp; | |
006df0f3 | 602 | unsigned long flags; |
0919e84c MZ |
603 | |
604 | retry: | |
006df0f3 | 605 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
0919e84c MZ |
606 | |
607 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | |
608 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | |
bf9a4137 | 609 | bool target_vcpu_needs_kick = false; |
0919e84c MZ |
610 | |
611 | spin_lock(&irq->irq_lock); | |
612 | ||
613 | BUG_ON(vcpu != irq->vcpu); | |
614 | ||
615 | target_vcpu = vgic_target_oracle(irq); | |
616 | ||
617 | if (!target_vcpu) { | |
618 | /* | |
619 | * We don't need to process this interrupt any | |
620 | * further, move it off the list. | |
621 | */ | |
622 | list_del(&irq->ap_list); | |
623 | irq->vcpu = NULL; | |
624 | spin_unlock(&irq->irq_lock); | |
5dd4b924 AP |
625 | |
626 | /* | |
627 | * This vgic_put_irq call matches the | |
628 | * vgic_get_irq_kref in vgic_queue_irq_unlock, | |
629 | * where we added the LPI to the ap_list. As | |
630 | * we remove the irq from the list, we drop | |
631 | * also drop the refcount. | |
632 | */ | |
633 | vgic_put_irq(vcpu->kvm, irq); | |
0919e84c MZ |
634 | continue; |
635 | } | |
636 | ||
637 | if (target_vcpu == vcpu) { | |
638 | /* We're on the right CPU */ | |
639 | spin_unlock(&irq->irq_lock); | |
640 | continue; | |
641 | } | |
642 | ||
643 | /* This interrupt looks like it has to be migrated. */ | |
644 | ||
645 | spin_unlock(&irq->irq_lock); | |
006df0f3 | 646 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
0919e84c MZ |
647 | |
648 | /* | |
649 | * Ensure locking order by always locking the smallest | |
650 | * ID first. | |
651 | */ | |
652 | if (vcpu->vcpu_id < target_vcpu->vcpu_id) { | |
653 | vcpuA = vcpu; | |
654 | vcpuB = target_vcpu; | |
655 | } else { | |
656 | vcpuA = target_vcpu; | |
657 | vcpuB = vcpu; | |
658 | } | |
659 | ||
006df0f3 | 660 | spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); |
0919e84c MZ |
661 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
662 | SINGLE_DEPTH_NESTING); | |
663 | spin_lock(&irq->irq_lock); | |
664 | ||
665 | /* | |
666 | * If the affinity has been preserved, move the | |
667 | * interrupt around. Otherwise, it means things have | |
668 | * changed while the interrupt was unlocked, and we | |
669 | * need to replay this. | |
670 | * | |
671 | * In all cases, we cannot trust the list not to have | |
672 | * changed, so we restart from the beginning. | |
673 | */ | |
674 | if (target_vcpu == vgic_target_oracle(irq)) { | |
675 | struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; | |
676 | ||
677 | list_del(&irq->ap_list); | |
678 | irq->vcpu = target_vcpu; | |
679 | list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); | |
bf9a4137 | 680 | target_vcpu_needs_kick = true; |
0919e84c MZ |
681 | } |
682 | ||
683 | spin_unlock(&irq->irq_lock); | |
684 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | |
006df0f3 | 685 | spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); |
bf9a4137 AP |
686 | |
687 | if (target_vcpu_needs_kick) { | |
688 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); | |
689 | kvm_vcpu_kick(target_vcpu); | |
690 | } | |
691 | ||
0919e84c MZ |
692 | goto retry; |
693 | } | |
694 | ||
006df0f3 | 695 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
0919e84c MZ |
696 | } |
697 | ||
0919e84c MZ |
698 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
699 | { | |
59529f69 MZ |
700 | if (kvm_vgic_global_state.type == VGIC_V2) |
701 | vgic_v2_fold_lr_state(vcpu); | |
702 | else | |
703 | vgic_v3_fold_lr_state(vcpu); | |
0919e84c MZ |
704 | } |
705 | ||
706 | /* Requires the irq_lock to be held. */ | |
707 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, | |
708 | struct vgic_irq *irq, int lr) | |
709 | { | |
710 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
140b086d | 711 | |
59529f69 MZ |
712 | if (kvm_vgic_global_state.type == VGIC_V2) |
713 | vgic_v2_populate_lr(vcpu, irq, lr); | |
714 | else | |
715 | vgic_v3_populate_lr(vcpu, irq, lr); | |
0919e84c MZ |
716 | } |
717 | ||
718 | static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
719 | { | |
59529f69 MZ |
720 | if (kvm_vgic_global_state.type == VGIC_V2) |
721 | vgic_v2_clear_lr(vcpu, lr); | |
722 | else | |
723 | vgic_v3_clear_lr(vcpu, lr); | |
0919e84c MZ |
724 | } |
725 | ||
726 | static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | |
727 | { | |
59529f69 MZ |
728 | if (kvm_vgic_global_state.type == VGIC_V2) |
729 | vgic_v2_set_underflow(vcpu); | |
730 | else | |
731 | vgic_v3_set_underflow(vcpu); | |
0919e84c MZ |
732 | } |
733 | ||
734 | /* Requires the ap_list_lock to be held. */ | |
16ca6a60 MZ |
735 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu, |
736 | bool *multi_sgi) | |
0919e84c MZ |
737 | { |
738 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
739 | struct vgic_irq *irq; | |
740 | int count = 0; | |
741 | ||
16ca6a60 MZ |
742 | *multi_sgi = false; |
743 | ||
0919e84c MZ |
744 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); |
745 | ||
746 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
53692908 MZ |
747 | int w; |
748 | ||
0919e84c MZ |
749 | spin_lock(&irq->irq_lock); |
750 | /* GICv2 SGIs can count for more than one... */ | |
53692908 | 751 | w = vgic_irq_get_lr_count(irq); |
0919e84c | 752 | spin_unlock(&irq->irq_lock); |
53692908 MZ |
753 | |
754 | count += w; | |
755 | *multi_sgi |= (w > 1); | |
0919e84c MZ |
756 | } |
757 | return count; | |
758 | } | |
759 | ||
760 | /* Requires the VCPU's ap_list_lock to be held. */ | |
761 | static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |
762 | { | |
763 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
764 | struct vgic_irq *irq; | |
16ca6a60 | 765 | int count; |
16ca6a60 MZ |
766 | bool multi_sgi; |
767 | u8 prio = 0xff; | |
0919e84c MZ |
768 | |
769 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
770 | ||
16ca6a60 MZ |
771 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
772 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | |
0919e84c | 773 | vgic_sort_ap_list(vcpu); |
0919e84c | 774 | |
16ca6a60 MZ |
775 | count = 0; |
776 | ||
0919e84c MZ |
777 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
778 | spin_lock(&irq->irq_lock); | |
779 | ||
0919e84c | 780 | /* |
16ca6a60 MZ |
781 | * If we have multi-SGIs in the pipeline, we need to |
782 | * guarantee that they are all seen before any IRQ of | |
783 | * lower priority. In that case, we need to filter out | |
784 | * these interrupts by exiting early. This is easy as | |
785 | * the AP list has been sorted already. | |
0919e84c | 786 | */ |
16ca6a60 MZ |
787 | if (multi_sgi && irq->priority > prio) { |
788 | spin_unlock(&irq->irq_lock); | |
789 | break; | |
790 | } | |
791 | ||
792 | if (likely(vgic_target_oracle(irq) == vcpu)) { | |
0919e84c | 793 | vgic_populate_lr(vcpu, irq, count++); |
0919e84c | 794 | |
53692908 | 795 | if (irq->source) |
16ca6a60 | 796 | prio = irq->priority; |
16ca6a60 MZ |
797 | } |
798 | ||
0919e84c MZ |
799 | spin_unlock(&irq->irq_lock); |
800 | ||
90cac1f5 CD |
801 | if (count == kvm_vgic_global_state.nr_lr) { |
802 | if (!list_is_last(&irq->ap_list, | |
803 | &vgic_cpu->ap_list_head)) | |
804 | vgic_set_underflow(vcpu); | |
0919e84c | 805 | break; |
90cac1f5 | 806 | } |
0919e84c MZ |
807 | } |
808 | ||
809 | vcpu->arch.vgic_cpu.used_lrs = count; | |
810 | ||
811 | /* Nuke remaining LRs */ | |
812 | for ( ; count < kvm_vgic_global_state.nr_lr; count++) | |
813 | vgic_clear_lr(vcpu, count); | |
814 | } | |
815 | ||
771621b0 CD |
816 | static inline bool can_access_vgic_from_kernel(void) |
817 | { | |
818 | /* | |
819 | * GICv2 can always be accessed from the kernel because it is | |
820 | * memory-mapped, and VHE systems can access GICv3 EL2 system | |
821 | * registers. | |
822 | */ | |
823 | return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); | |
824 | } | |
825 | ||
75174ba6 CD |
826 | static inline void vgic_save_state(struct kvm_vcpu *vcpu) |
827 | { | |
828 | if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) | |
829 | vgic_v2_save_state(vcpu); | |
771621b0 CD |
830 | else |
831 | __vgic_v3_save_state(vcpu); | |
75174ba6 CD |
832 | } |
833 | ||
0919e84c MZ |
834 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ |
835 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
836 | { | |
f6769581 SWL |
837 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
838 | ||
62775797 MZ |
839 | WARN_ON(vgic_v4_sync_hwstate(vcpu)); |
840 | ||
8ac76ef4 CD |
841 | /* An empty ap_list_head implies used_lrs == 0 */ |
842 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | |
0099b770 CD |
843 | return; |
844 | ||
2d0e63e0 CD |
845 | if (can_access_vgic_from_kernel()) |
846 | vgic_save_state(vcpu); | |
847 | ||
8ac76ef4 CD |
848 | if (vgic_cpu->used_lrs) |
849 | vgic_fold_lr_state(vcpu); | |
0919e84c MZ |
850 | vgic_prune_ap_list(vcpu); |
851 | } | |
852 | ||
75174ba6 CD |
853 | static inline void vgic_restore_state(struct kvm_vcpu *vcpu) |
854 | { | |
855 | if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) | |
856 | vgic_v2_restore_state(vcpu); | |
771621b0 CD |
857 | else |
858 | __vgic_v3_restore_state(vcpu); | |
75174ba6 CD |
859 | } |
860 | ||
0919e84c MZ |
861 | /* Flush our emulation state into the GIC hardware before entering the guest. */ |
862 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
863 | { | |
62775797 MZ |
864 | WARN_ON(vgic_v4_flush_hwstate(vcpu)); |
865 | ||
f6769581 SWL |
866 | /* |
867 | * If there are no virtual interrupts active or pending for this | |
868 | * VCPU, then there is no work to do and we can bail out without | |
869 | * taking any lock. There is a potential race with someone injecting | |
870 | * interrupts to the VCPU, but it is a benign race as the VCPU will | |
871 | * either observe the new interrupt before or after doing this check, | |
872 | * and introducing additional synchronization mechanism doesn't change | |
873 | * this. | |
874 | */ | |
875 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | |
2d0e63e0 | 876 | return; |
0099b770 | 877 | |
006df0f3 CD |
878 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
879 | ||
0919e84c MZ |
880 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
881 | vgic_flush_lr_state(vcpu); | |
882 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
75174ba6 | 883 | |
771621b0 CD |
884 | if (can_access_vgic_from_kernel()) |
885 | vgic_restore_state(vcpu); | |
0919e84c | 886 | } |
90eee56c | 887 | |
328e5664 CD |
888 | void kvm_vgic_load(struct kvm_vcpu *vcpu) |
889 | { | |
890 | if (unlikely(!vgic_initialized(vcpu->kvm))) | |
891 | return; | |
892 | ||
893 | if (kvm_vgic_global_state.type == VGIC_V2) | |
894 | vgic_v2_load(vcpu); | |
895 | else | |
896 | vgic_v3_load(vcpu); | |
897 | } | |
898 | ||
899 | void kvm_vgic_put(struct kvm_vcpu *vcpu) | |
900 | { | |
901 | if (unlikely(!vgic_initialized(vcpu->kvm))) | |
902 | return; | |
903 | ||
904 | if (kvm_vgic_global_state.type == VGIC_V2) | |
905 | vgic_v2_put(vcpu); | |
906 | else | |
907 | vgic_v3_put(vcpu); | |
908 | } | |
909 | ||
90eee56c EA |
910 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
911 | { | |
912 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
913 | struct vgic_irq *irq; | |
914 | bool pending = false; | |
006df0f3 | 915 | unsigned long flags; |
90eee56c EA |
916 | |
917 | if (!vcpu->kvm->arch.vgic.enabled) | |
918 | return false; | |
919 | ||
c9719680 MZ |
920 | if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last) |
921 | return true; | |
922 | ||
006df0f3 | 923 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
90eee56c EA |
924 | |
925 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
926 | spin_lock(&irq->irq_lock); | |
8694e4da | 927 | pending = irq_is_pending(irq) && irq->enabled; |
90eee56c EA |
928 | spin_unlock(&irq->irq_lock); |
929 | ||
930 | if (pending) | |
931 | break; | |
932 | } | |
933 | ||
006df0f3 | 934 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
90eee56c EA |
935 | |
936 | return pending; | |
937 | } | |
2b0cda87 MZ |
938 | |
939 | void vgic_kick_vcpus(struct kvm *kvm) | |
940 | { | |
941 | struct kvm_vcpu *vcpu; | |
942 | int c; | |
943 | ||
944 | /* | |
945 | * We've injected an interrupt, time to find out who deserves | |
946 | * a good kick... | |
947 | */ | |
948 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
325f9c64 AJ |
949 | if (kvm_vgic_vcpu_pending_irq(vcpu)) { |
950 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | |
2b0cda87 | 951 | kvm_vcpu_kick(vcpu); |
325f9c64 | 952 | } |
2b0cda87 MZ |
953 | } |
954 | } | |
568e8c90 | 955 | |
47bbd31f | 956 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) |
568e8c90 | 957 | { |
285a90e3 | 958 | struct vgic_irq *irq; |
568e8c90 | 959 | bool map_is_active; |
006df0f3 | 960 | unsigned long flags; |
568e8c90 | 961 | |
f39d16cb CD |
962 | if (!vgic_initialized(vcpu->kvm)) |
963 | return false; | |
964 | ||
285a90e3 | 965 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
006df0f3 | 966 | spin_lock_irqsave(&irq->irq_lock, flags); |
568e8c90 | 967 | map_is_active = irq->hw && irq->active; |
006df0f3 | 968 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 969 | vgic_put_irq(vcpu->kvm, irq); |
568e8c90 AP |
970 | |
971 | return map_is_active; | |
972 | } | |
0e4e82f1 | 973 |