Merge tag 'amlogic-dt-2' of https://git.kernel.org/pub/scm/linux/kernel/git/khilman...
[linux-2.6-block.git] / virt / kvm / arm / vgic / vgic-mmio-v2.c
CommitLineData
fb848db3
AP
1/*
2 * VGICv2 MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/irqchip/arm-gic.h>
15#include <linux/kvm.h>
16#include <linux/kvm_host.h>
5e1ca5e2
MR
17#include <linux/nospec.h>
18
fb848db3
AP
19#include <kvm/iodev.h>
20#include <kvm/arm_vgic.h>
21
22#include "vgic.h"
23#include "vgic-mmio.h"
24
2b0cda87
MZ
25static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
27{
28 u32 value;
29
30 switch (addr & 0x0c) {
31 case GIC_DIST_CTRL:
32 value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
33 break;
34 case GIC_DIST_CTR:
35 value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
36 value = (value >> 5) - 1;
37 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
38 break;
39 case GIC_DIST_IIDR:
40 value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
41 break;
42 default:
43 return 0;
44 }
45
46 return value;
47}
48
49static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
50 gpa_t addr, unsigned int len,
51 unsigned long val)
52{
53 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
54 bool was_enabled = dist->enabled;
55
56 switch (addr & 0x0c) {
57 case GIC_DIST_CTRL:
58 dist->enabled = val & GICD_ENABLE;
59 if (!was_enabled && dist->enabled)
60 vgic_kick_vcpus(vcpu->kvm);
61 break;
62 case GIC_DIST_CTR:
63 case GIC_DIST_IIDR:
64 /* Nothing to do */
65 return;
66 }
67}
68
55cc01fb
AP
69static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
70 gpa_t addr, unsigned int len,
71 unsigned long val)
72{
73 int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
74 int intid = val & 0xf;
75 int targets = (val >> 16) & 0xff;
76 int mode = (val >> 24) & 0x03;
77 int c;
78 struct kvm_vcpu *vcpu;
006df0f3 79 unsigned long flags;
55cc01fb
AP
80
81 switch (mode) {
82 case 0x0: /* as specified by targets */
83 break;
84 case 0x1:
85 targets = (1U << nr_vcpus) - 1; /* all, ... */
86 targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
87 break;
88 case 0x2: /* this very vCPU only */
89 targets = (1U << source_vcpu->vcpu_id);
90 break;
91 case 0x3: /* reserved */
92 return;
93 }
94
95 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
96 struct vgic_irq *irq;
97
98 if (!(targets & (1U << c)))
99 continue;
100
101 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
102
006df0f3 103 spin_lock_irqsave(&irq->irq_lock, flags);
8694e4da 104 irq->pending_latch = true;
55cc01fb
AP
105 irq->source |= 1U << source_vcpu->vcpu_id;
106
006df0f3 107 vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
5dd4b924 108 vgic_put_irq(source_vcpu->kvm, irq);
55cc01fb
AP
109 }
110}
111
2c234d6f
AP
112static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
113 gpa_t addr, unsigned int len)
114{
115 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
116 int i;
117 u64 val = 0;
118
119 for (i = 0; i < len; i++) {
120 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
121
122 val |= (u64)irq->targets << (i * 8);
5dd4b924
AP
123
124 vgic_put_irq(vcpu->kvm, irq);
2c234d6f
AP
125 }
126
127 return val;
128}
129
130static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
131 gpa_t addr, unsigned int len,
132 unsigned long val)
133{
134 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
266068ea 135 u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
2c234d6f 136 int i;
006df0f3 137 unsigned long flags;
2c234d6f
AP
138
139 /* GICD_ITARGETSR[0-7] are read-only */
140 if (intid < VGIC_NR_PRIVATE_IRQS)
141 return;
142
143 for (i = 0; i < len; i++) {
144 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
145 int target;
146
006df0f3 147 spin_lock_irqsave(&irq->irq_lock, flags);
2c234d6f 148
266068ea 149 irq->targets = (val >> (i * 8)) & cpu_mask;
2c234d6f
AP
150 target = irq->targets ? __ffs(irq->targets) : 0;
151 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
152
006df0f3 153 spin_unlock_irqrestore(&irq->irq_lock, flags);
5dd4b924 154 vgic_put_irq(vcpu->kvm, irq);
2c234d6f
AP
155 }
156}
157
ed40213e
AP
158static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
159 gpa_t addr, unsigned int len)
160{
161 u32 intid = addr & 0x0f;
162 int i;
163 u64 val = 0;
164
165 for (i = 0; i < len; i++) {
166 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
167
168 val |= (u64)irq->source << (i * 8);
5dd4b924
AP
169
170 vgic_put_irq(vcpu->kvm, irq);
ed40213e
AP
171 }
172 return val;
173}
174
175static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
176 gpa_t addr, unsigned int len,
177 unsigned long val)
178{
179 u32 intid = addr & 0x0f;
180 int i;
006df0f3 181 unsigned long flags;
ed40213e
AP
182
183 for (i = 0; i < len; i++) {
184 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
185
006df0f3 186 spin_lock_irqsave(&irq->irq_lock, flags);
ed40213e
AP
187
188 irq->source &= ~((val >> (i * 8)) & 0xff);
189 if (!irq->source)
8694e4da 190 irq->pending_latch = false;
ed40213e 191
006df0f3 192 spin_unlock_irqrestore(&irq->irq_lock, flags);
5dd4b924 193 vgic_put_irq(vcpu->kvm, irq);
ed40213e
AP
194 }
195}
196
197static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
198 gpa_t addr, unsigned int len,
199 unsigned long val)
200{
201 u32 intid = addr & 0x0f;
202 int i;
006df0f3 203 unsigned long flags;
ed40213e
AP
204
205 for (i = 0; i < len; i++) {
206 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
207
006df0f3 208 spin_lock_irqsave(&irq->irq_lock, flags);
ed40213e
AP
209
210 irq->source |= (val >> (i * 8)) & 0xff;
211
212 if (irq->source) {
8694e4da 213 irq->pending_latch = true;
006df0f3 214 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
ed40213e 215 } else {
006df0f3 216 spin_unlock_irqrestore(&irq->irq_lock, flags);
ed40213e 217 }
5dd4b924 218 vgic_put_irq(vcpu->kvm, irq);
ed40213e
AP
219 }
220}
221
878c569e
AP
222#define GICC_ARCH_VERSION_V2 0x2
223
224/* These are for userland accesses only, there is no guest-facing emulation. */
225static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
226 gpa_t addr, unsigned int len)
227{
228 struct vgic_vmcr vmcr;
229 u32 val;
230
231 vgic_get_vmcr(vcpu, &vmcr);
232
233 switch (addr & 0xff) {
234 case GIC_CPU_CTRL:
28232a43
CD
235 val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
236 val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
237 val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
238 val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
239 val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
240 val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
241
878c569e
AP
242 break;
243 case GIC_CPU_PRIMASK:
6d56111c
CD
244 /*
245 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
246 * the PMR field as GICH_VMCR.VMPriMask rather than
247 * GICC_PMR.Priority, so we expose the upper five bits of
248 * priority mask to userspace using the lower bits in the
249 * unsigned long.
250 */
251 val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
252 GICV_PMR_PRIORITY_SHIFT;
878c569e
AP
253 break;
254 case GIC_CPU_BINPOINT:
255 val = vmcr.bpr;
256 break;
257 case GIC_CPU_ALIAS_BINPOINT:
258 val = vmcr.abpr;
259 break;
260 case GIC_CPU_IDENT:
261 val = ((PRODUCT_ID_KVM << 20) |
262 (GICC_ARCH_VERSION_V2 << 16) |
263 IMPLEMENTER_ARM);
264 break;
265 default:
266 return 0;
267 }
268
269 return val;
270}
271
272static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
273 gpa_t addr, unsigned int len,
274 unsigned long val)
275{
276 struct vgic_vmcr vmcr;
277
278 vgic_get_vmcr(vcpu, &vmcr);
279
280 switch (addr & 0xff) {
281 case GIC_CPU_CTRL:
28232a43
CD
282 vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
283 vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
284 vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
285 vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
286 vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
287 vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
288
878c569e
AP
289 break;
290 case GIC_CPU_PRIMASK:
6d56111c
CD
291 /*
292 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
293 * the PMR field as GICH_VMCR.VMPriMask rather than
294 * GICC_PMR.Priority, so we expose the upper five bits of
295 * priority mask to userspace using the lower bits in the
296 * unsigned long.
297 */
298 vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
299 GICV_PMR_PRIORITY_MASK;
878c569e
AP
300 break;
301 case GIC_CPU_BINPOINT:
302 vmcr.bpr = val;
303 break;
304 case GIC_CPU_ALIAS_BINPOINT:
305 vmcr.abpr = val;
306 break;
307 }
308
309 vgic_set_vmcr(vcpu, &vmcr);
310}
311
9b87e7a8
CD
312static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
313 gpa_t addr, unsigned int len)
314{
315 int n; /* which APRn is this */
316
317 n = (addr >> 2) & 0x3;
318
319 if (kvm_vgic_global_state.type == VGIC_V2) {
320 /* GICv2 hardware systems support max. 32 groups */
321 if (n != 0)
322 return 0;
323 return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
324 } else {
325 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
326
327 if (n > vgic_v3_max_apr_idx(vcpu))
328 return 0;
5e1ca5e2
MR
329
330 n = array_index_nospec(n, 4);
331
9b87e7a8
CD
332 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
333 return vgicv3->vgic_ap1r[n];
334 }
335}
336
337static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
338 gpa_t addr, unsigned int len,
339 unsigned long val)
340{
341 int n; /* which APRn is this */
342
343 n = (addr >> 2) & 0x3;
344
345 if (kvm_vgic_global_state.type == VGIC_V2) {
346 /* GICv2 hardware systems support max. 32 groups */
347 if (n != 0)
348 return;
349 vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
350 } else {
351 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
352
353 if (n > vgic_v3_max_apr_idx(vcpu))
354 return;
355 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
356 vgicv3->vgic_ap1r[n] = val;
357 }
358}
359
fb848db3
AP
360static const struct vgic_register_region vgic_v2_dist_registers[] = {
361 REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
2b0cda87 362 vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
fb848db3
AP
363 VGIC_ACCESS_32bit),
364 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
2602087e 365 vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
fb848db3
AP
366 VGIC_ACCESS_32bit),
367 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
2602087e 368 vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
fb848db3
AP
369 VGIC_ACCESS_32bit),
370 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
2602087e 371 vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
fb848db3
AP
372 VGIC_ACCESS_32bit),
373 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
2602087e 374 vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
fb848db3
AP
375 VGIC_ACCESS_32bit),
376 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
2602087e 377 vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
fb848db3
AP
378 VGIC_ACCESS_32bit),
379 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
3197191e
CD
380 vgic_mmio_read_active, vgic_mmio_write_sactive,
381 NULL, vgic_mmio_uaccess_write_sactive, 1,
fb848db3
AP
382 VGIC_ACCESS_32bit),
383 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
3197191e
CD
384 vgic_mmio_read_active, vgic_mmio_write_cactive,
385 NULL, vgic_mmio_uaccess_write_cactive, 1,
fb848db3
AP
386 VGIC_ACCESS_32bit),
387 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
2602087e
CD
388 vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
389 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
fb848db3 390 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
2602087e 391 vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
fb848db3
AP
392 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
393 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
2602087e 394 vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
fb848db3
AP
395 VGIC_ACCESS_32bit),
396 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
55cc01fb 397 vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
fb848db3
AP
398 VGIC_ACCESS_32bit),
399 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
ed40213e 400 vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
fb848db3
AP
401 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
402 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
ed40213e 403 vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
fb848db3
AP
404 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
405};
406
878c569e
AP
407static const struct vgic_register_region vgic_v2_cpu_registers[] = {
408 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
409 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
410 VGIC_ACCESS_32bit),
411 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
412 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
413 VGIC_ACCESS_32bit),
414 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
415 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
416 VGIC_ACCESS_32bit),
417 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
418 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
419 VGIC_ACCESS_32bit),
420 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
9b87e7a8 421 vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
878c569e
AP
422 VGIC_ACCESS_32bit),
423 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
424 vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
425 VGIC_ACCESS_32bit),
426};
427
fb848db3
AP
428unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
429{
430 dev->regions = vgic_v2_dist_registers;
431 dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
432
433 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
434
435 return SZ_4K;
436}
f94591e2
EA
437
438int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
439{
94574c94
VK
440 const struct vgic_register_region *region;
441 struct vgic_io_device iodev;
442 struct vgic_reg_attr reg_attr;
443 struct kvm_vcpu *vcpu;
f94591e2 444 gpa_t addr;
94574c94
VK
445 int ret;
446
447 ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
448 if (ret)
449 return ret;
f94591e2 450
94574c94
VK
451 vcpu = reg_attr.vcpu;
452 addr = reg_attr.addr;
f94591e2
EA
453
454 switch (attr->group) {
455 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
94574c94
VK
456 iodev.regions = vgic_v2_dist_registers;
457 iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
458 iodev.base_addr = 0;
f94591e2
EA
459 break;
460 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
94574c94
VK
461 iodev.regions = vgic_v2_cpu_registers;
462 iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
463 iodev.base_addr = 0;
878c569e 464 break;
f94591e2
EA
465 default:
466 return -ENXIO;
467 }
468
469 /* We only support aligned 32-bit accesses. */
470 if (addr & 3)
471 return -ENXIO;
472
94574c94
VK
473 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
474 if (!region)
475 return -ENXIO;
f94591e2 476
94574c94 477 return 0;
f94591e2 478}
c3199f28 479
878c569e
AP
480int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
481 int offset, u32 *val)
482{
483 struct vgic_io_device dev = {
484 .regions = vgic_v2_cpu_registers,
485 .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
9d5fcb9d 486 .iodev_type = IODEV_CPUIF,
878c569e
AP
487 };
488
489 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
490}
491
c3199f28
CD
492int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
493 int offset, u32 *val)
494{
495 struct vgic_io_device dev = {
496 .regions = vgic_v2_dist_registers,
497 .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
9d5fcb9d 498 .iodev_type = IODEV_DIST,
c3199f28
CD
499 };
500
501 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
502}