Merge remote-tracking branches 'asoc/topic/ts3a227e', 'asoc/topic/tsc42xx', 'asoc...
[linux-2.6-block.git] / virt / kvm / arm / vgic / vgic-v3.c
CommitLineData
59529f69
MZ
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program. If not, see <http://www.gnu.org/licenses/>.
13 */
14
15#include <linux/irqchip/arm-gic-v3.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
90977732
EA
18#include <kvm/arm_vgic.h>
19#include <asm/kvm_mmu.h>
20#include <asm/kvm_asm.h>
59529f69
MZ
21
22#include "vgic.h"
23
abf55766 24static bool group0_trap;
9c7bfc28 25static bool group1_trap;
ff89511e 26static bool common_trap;
a7546054 27static bool gicv4_enable;
9c7bfc28 28
af061499 29void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
59529f69
MZ
30{
31 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
59529f69 32
af061499 33 cpuif->vgic_hcr |= ICH_HCR_UIE;
59529f69
MZ
34}
35
af061499 36static bool lr_signals_eoi_mi(u64 lr_val)
59529f69 37{
af061499
CD
38 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
39 !(lr_val & ICH_LR_HW);
59529f69
MZ
40}
41
42void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
43{
8ac76ef4
CD
44 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
45 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
59529f69
MZ
46 u32 model = vcpu->kvm->arch.vgic.vgic_model;
47 int lr;
006df0f3 48 unsigned long flags;
59529f69 49
af061499
CD
50 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
51
8ac76ef4 52 for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
59529f69
MZ
53 u64 val = cpuif->vgic_lr[lr];
54 u32 intid;
55 struct vgic_irq *irq;
56
57 if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
58 intid = val & ICH_LR_VIRTUAL_ID_MASK;
59 else
60 intid = val & GICH_LR_VIRTUALID;
af061499
CD
61
62 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
63 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
64 kvm_notify_acked_irq(vcpu->kvm, 0,
65 intid - VGIC_NR_PRIVATE_IRQS);
66
59529f69 67 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
3802411d
AP
68 if (!irq) /* An LPI could have been unmapped. */
69 continue;
59529f69 70
006df0f3 71 spin_lock_irqsave(&irq->irq_lock, flags);
59529f69
MZ
72
73 /* Always preserve the active bit */
74 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
75
76 /* Edge is the only case where we preserve the pending bit */
77 if (irq->config == VGIC_CONFIG_EDGE &&
78 (val & ICH_LR_PENDING_BIT)) {
8694e4da 79 irq->pending_latch = true;
59529f69
MZ
80
81 if (vgic_irq_is_sgi(intid) &&
82 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
83 u32 cpuid = val & GICH_LR_PHYSID_CPUID;
84
85 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
86 irq->source |= (1 << cpuid);
87 }
88 }
89
637d122b
MZ
90 /*
91 * Clear soft pending state when level irqs have been acked.
92 * Always regenerate the pending state.
93 */
94 if (irq->config == VGIC_CONFIG_LEVEL) {
95 if (!(val & ICH_LR_PENDING_BIT))
8694e4da 96 irq->pending_latch = false;
59529f69
MZ
97 }
98
006df0f3 99 spin_unlock_irqrestore(&irq->irq_lock, flags);
5dd4b924 100 vgic_put_irq(vcpu->kvm, irq);
59529f69 101 }
8ac76ef4
CD
102
103 vgic_cpu->used_lrs = 0;
59529f69
MZ
104}
105
106/* Requires the irq to be locked already */
107void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
108{
109 u32 model = vcpu->kvm->arch.vgic.vgic_model;
110 u64 val = irq->intid;
111
8694e4da 112 if (irq_is_pending(irq)) {
59529f69
MZ
113 val |= ICH_LR_PENDING_BIT;
114
115 if (irq->config == VGIC_CONFIG_EDGE)
8694e4da 116 irq->pending_latch = false;
59529f69
MZ
117
118 if (vgic_irq_is_sgi(irq->intid) &&
119 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
120 u32 src = ffs(irq->source);
121
122 BUG_ON(!src);
123 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
124 irq->source &= ~(1 << (src - 1));
125 if (irq->source)
8694e4da 126 irq->pending_latch = true;
59529f69
MZ
127 }
128 }
129
130 if (irq->active)
131 val |= ICH_LR_ACTIVE_BIT;
132
133 if (irq->hw) {
134 val |= ICH_LR_HW;
135 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
3d6e77ad
MZ
136 /*
137 * Never set pending+active on a HW interrupt, as the
138 * pending state is kept at the physical distributor
139 * level.
140 */
141 if (irq->active && irq_is_pending(irq))
142 val &= ~ICH_LR_PENDING_BIT;
59529f69
MZ
143 } else {
144 if (irq->config == VGIC_CONFIG_LEVEL)
145 val |= ICH_LR_EOI;
146 }
147
148 /*
149 * We currently only support Group1 interrupts, which is a
150 * known defect. This needs to be addressed at some point.
151 */
152 if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
153 val |= ICH_LR_GROUP;
154
155 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
156
157 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
158}
159
160void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
161{
162 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
163}
e4823a7a
AP
164
165void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
166{
328e5664 167 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
28232a43 168 u32 model = vcpu->kvm->arch.vgic.vgic_model;
e4823a7a
AP
169 u32 vmcr;
170
28232a43
CD
171 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
172 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
173 ICH_VMCR_ACK_CTL_MASK;
174 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
175 ICH_VMCR_FIQ_EN_MASK;
176 } else {
177 /*
178 * When emulating GICv3 on GICv3 with SRE=1 on the
179 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
180 */
181 vmcr = ICH_VMCR_FIQ_EN_MASK;
182 }
183
184 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
185 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
e4823a7a
AP
186 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
187 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
188 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
5fb247d7
VK
189 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
190 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
e4823a7a 191
328e5664 192 cpu_if->vgic_vmcr = vmcr;
e4823a7a
AP
193}
194
195void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
196{
328e5664 197 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
28232a43 198 u32 model = vcpu->kvm->arch.vgic.vgic_model;
328e5664
CD
199 u32 vmcr;
200
201 vmcr = cpu_if->vgic_vmcr;
e4823a7a 202
28232a43
CD
203 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
204 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
205 ICH_VMCR_ACK_CTL_SHIFT;
206 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
207 ICH_VMCR_FIQ_EN_SHIFT;
208 } else {
209 /*
210 * When emulating GICv3 on GICv3 with SRE=1 on the
211 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
212 */
213 vmcrp->fiqen = 1;
214 vmcrp->ackctl = 0;
215 }
216
217 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
218 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
e4823a7a
AP
219 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
220 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
221 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
5fb247d7
VK
222 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
223 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
e4823a7a 224}
90977732 225
0aa1de57
AP
226#define INITIAL_PENDBASER_VALUE \
227 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
228 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
229 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
230
ad275b8b
EA
231void vgic_v3_enable(struct kvm_vcpu *vcpu)
232{
f7b6985c
EA
233 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
234
235 /*
236 * By forcing VMCR to zero, the GIC will restore the binary
237 * points to their reset values. Anything else resets to zero
238 * anyway.
239 */
240 vgic_v3->vgic_vmcr = 0;
241 vgic_v3->vgic_elrsr = ~0;
242
243 /*
244 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
245 * way, so we force SRE to 1 to demonstrate this to the guest.
4dfc0505 246 * Also, we don't support any form of IRQ/FIQ bypass.
f7b6985c
EA
247 * This goes with the spec allowing the value to be RAO/WI.
248 */
0aa1de57 249 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
4dfc0505
MZ
250 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
251 ICC_SRE_EL1_DFB |
252 ICC_SRE_EL1_SRE);
0aa1de57
AP
253 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
254 } else {
f7b6985c 255 vgic_v3->vgic_sre = 0;
0aa1de57 256 }
f7b6985c 257
d017d7b0
VK
258 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
259 ICH_VTR_ID_BITS_MASK) >>
260 ICH_VTR_ID_BITS_SHIFT;
261 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
262 ICH_VTR_PRI_BITS_MASK) >>
263 ICH_VTR_PRI_BITS_SHIFT) + 1;
264
f7b6985c
EA
265 /* Get the show on the road... */
266 vgic_v3->vgic_hcr = ICH_HCR_EN;
abf55766
MZ
267 if (group0_trap)
268 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
9c7bfc28
MZ
269 if (group1_trap)
270 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
ff89511e
MZ
271 if (common_trap)
272 vgic_v3->vgic_hcr |= ICH_HCR_TC;
ad275b8b
EA
273}
274
44de9d68
EA
275int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
276{
277 struct kvm_vcpu *vcpu;
278 int byte_offset, bit_nr;
279 gpa_t pendbase, ptr;
280 bool status;
281 u8 val;
282 int ret;
006df0f3 283 unsigned long flags;
44de9d68
EA
284
285retry:
286 vcpu = irq->target_vcpu;
287 if (!vcpu)
288 return 0;
289
290 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
291
292 byte_offset = irq->intid / BITS_PER_BYTE;
293 bit_nr = irq->intid % BITS_PER_BYTE;
294 ptr = pendbase + byte_offset;
295
296 ret = kvm_read_guest(kvm, ptr, &val, 1);
297 if (ret)
298 return ret;
299
300 status = val & (1 << bit_nr);
301
006df0f3 302 spin_lock_irqsave(&irq->irq_lock, flags);
44de9d68 303 if (irq->target_vcpu != vcpu) {
006df0f3 304 spin_unlock_irqrestore(&irq->irq_lock, flags);
44de9d68
EA
305 goto retry;
306 }
307 irq->pending_latch = status;
006df0f3 308 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
44de9d68
EA
309
310 if (status) {
311 /* clear consumed data */
312 val &= ~(1 << bit_nr);
313 ret = kvm_write_guest(kvm, ptr, &val, 1);
314 if (ret)
315 return ret;
316 }
317 return 0;
318}
319
28077125
EA
320/**
321 * vgic_its_save_pending_tables - Save the pending tables into guest RAM
322 * kvm lock and all vcpu lock must be held
323 */
324int vgic_v3_save_pending_tables(struct kvm *kvm)
325{
326 struct vgic_dist *dist = &kvm->arch.vgic;
327 int last_byte_offset = -1;
328 struct vgic_irq *irq;
329 int ret;
ddb4b010 330 u8 val;
28077125
EA
331
332 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
333 int byte_offset, bit_nr;
334 struct kvm_vcpu *vcpu;
335 gpa_t pendbase, ptr;
336 bool stored;
28077125
EA
337
338 vcpu = irq->target_vcpu;
339 if (!vcpu)
340 continue;
341
342 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
343
344 byte_offset = irq->intid / BITS_PER_BYTE;
345 bit_nr = irq->intid % BITS_PER_BYTE;
346 ptr = pendbase + byte_offset;
347
348 if (byte_offset != last_byte_offset) {
349 ret = kvm_read_guest(kvm, ptr, &val, 1);
350 if (ret)
351 return ret;
352 last_byte_offset = byte_offset;
353 }
354
355 stored = val & (1U << bit_nr);
356 if (stored == irq->pending_latch)
357 continue;
358
359 if (irq->pending_latch)
360 val |= 1 << bit_nr;
361 else
362 val &= ~(1 << bit_nr);
363
364 ret = kvm_write_guest(kvm, ptr, &val, 1);
365 if (ret)
366 return ret;
367 }
368 return 0;
369}
370
9a746d75
CD
371/*
372 * Check for overlapping regions and for regions crossing the end of memory
373 * for base addresses which have already been set.
374 */
375bool vgic_v3_check_base(struct kvm *kvm)
b0442ee2
EA
376{
377 struct vgic_dist *d = &kvm->arch.vgic;
378 gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE;
379
380 redist_size *= atomic_read(&kvm->online_vcpus);
381
9a746d75
CD
382 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
383 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
b0442ee2 384 return false;
9a746d75
CD
385
386 if (!IS_VGIC_ADDR_UNDEF(d->vgic_redist_base) &&
387 d->vgic_redist_base + redist_size < d->vgic_redist_base)
b0442ee2
EA
388 return false;
389
9a746d75
CD
390 /* Both base addresses must be set to check if they overlap */
391 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) ||
392 IS_VGIC_ADDR_UNDEF(d->vgic_redist_base))
393 return true;
394
b0442ee2
EA
395 if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base)
396 return true;
397 if (d->vgic_redist_base + redist_size <= d->vgic_dist_base)
398 return true;
399
400 return false;
401}
402
403int vgic_v3_map_resources(struct kvm *kvm)
404{
405 int ret = 0;
406 struct vgic_dist *dist = &kvm->arch.vgic;
407
408 if (vgic_ready(kvm))
409 goto out;
410
411 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
412 IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
413 kvm_err("Need to set vgic distributor addresses first\n");
414 ret = -ENXIO;
415 goto out;
416 }
417
418 if (!vgic_v3_check_base(kvm)) {
419 kvm_err("VGIC redist and dist frames overlap\n");
420 ret = -EINVAL;
421 goto out;
422 }
423
424 /*
425 * For a VGICv3 we require the userland to explicitly initialize
426 * the VGIC before we need to use it.
427 */
428 if (!vgic_initialized(kvm)) {
429 ret = -EBUSY;
430 goto out;
431 }
432
433 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
434 if (ret) {
435 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
436 goto out;
437 }
438
b0442ee2
EA
439 dist->ready = true;
440
441out:
b0442ee2
EA
442 return ret;
443}
444
59da1cbf
MZ
445DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
446
e23f62f7
MZ
447static int __init early_group0_trap_cfg(char *buf)
448{
449 return strtobool(buf, &group0_trap);
450}
451early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
452
182936ee
MZ
453static int __init early_group1_trap_cfg(char *buf)
454{
455 return strtobool(buf, &group1_trap);
456}
457early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
458
ff89511e
MZ
459static int __init early_common_trap_cfg(char *buf)
460{
461 return strtobool(buf, &common_trap);
462}
463early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
464
a7546054
MZ
465static int __init early_gicv4_enable(char *buf)
466{
467 return strtobool(buf, &gicv4_enable);
468}
469early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
470
90977732
EA
471/**
472 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
473 * @node: pointer to the DT node
474 *
475 * Returns 0 if a GICv3 has been found, returns an error code otherwise
476 */
477int vgic_v3_probe(const struct gic_kvm_info *info)
478{
479 u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
42c8870f 480 int ret;
90977732
EA
481
482 /*
483 * The ListRegs field is 5 bits, but there is a architectural
484 * maximum of 16 list registers. Just ignore bit 4...
485 */
486 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
487 kvm_vgic_global_state.can_emulate_gicv2 = false;
d017d7b0 488 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
90977732 489
a7546054
MZ
490 /* GICv4 support? */
491 if (info->has_v4) {
492 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
493 kvm_info("GICv4 support %sabled\n",
494 gicv4_enable ? "en" : "dis");
495 }
496
90977732
EA
497 if (!info->vcpu.start) {
498 kvm_info("GICv3: no GICV resource entry\n");
499 kvm_vgic_global_state.vcpu_base = 0;
500 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
501 pr_warn("GICV physical address 0x%llx not page aligned\n",
502 (unsigned long long)info->vcpu.start);
503 kvm_vgic_global_state.vcpu_base = 0;
504 } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
505 pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
506 (unsigned long long)resource_size(&info->vcpu),
507 PAGE_SIZE);
508 kvm_vgic_global_state.vcpu_base = 0;
509 } else {
510 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
511 kvm_vgic_global_state.can_emulate_gicv2 = true;
42c8870f
AP
512 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
513 if (ret) {
514 kvm_err("Cannot register GICv2 KVM device.\n");
515 return ret;
516 }
90977732
EA
517 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
518 }
42c8870f
AP
519 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
520 if (ret) {
521 kvm_err("Cannot register GICv3 KVM device.\n");
522 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
523 return ret;
524 }
525
90977732
EA
526 if (kvm_vgic_global_state.vcpu_base == 0)
527 kvm_info("disabling GICv2 emulation\n");
90977732 528
690a3415
DD
529#ifdef CONFIG_ARM64
530 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
531 group0_trap = true;
532 group1_trap = true;
533 }
534#endif
535
ff89511e 536 if (group0_trap || group1_trap || common_trap) {
2873b508
MZ
537 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
538 group0_trap ? "G0" : "",
539 group1_trap ? "G1" : "",
540 common_trap ? "C" : "");
182936ee
MZ
541 static_branch_enable(&vgic_v3_cpuif_trap);
542 }
543
90977732
EA
544 kvm_vgic_global_state.vctrl_base = NULL;
545 kvm_vgic_global_state.type = VGIC_V3;
546 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
547
548 return 0;
549}
328e5664
CD
550
551void vgic_v3_load(struct kvm_vcpu *vcpu)
552{
553 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
554
ff567614
MZ
555 /*
556 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
557 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
558 * VMCR_EL2 save/restore in the world switch.
559 */
560 if (likely(cpu_if->vgic_sre))
561 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
328e5664
CD
562}
563
564void vgic_v3_put(struct kvm_vcpu *vcpu)
565{
566 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
567
ff567614
MZ
568 if (likely(cpu_if->vgic_sre))
569 cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
328e5664 570}