Commit | Line | Data |
---|---|---|
f68d2b1b MZ |
1 | /* |
2 | * Copyright (C) 2012-2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/irqchip/arm-gic-v3.h> | |
20 | #include <linux/kvm_host.h> | |
21 | ||
59da1cbf | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
f68d2b1b MZ |
24 | |
25 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | |
d68356cc | 26 | #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) |
132a324a | 27 | #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) |
f68d2b1b | 28 | |
1b8e83c0 MZ |
29 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
30 | { | |
31 | switch (lr & 0xf) { | |
32 | case 0: | |
33 | return read_gicreg(ICH_LR0_EL2); | |
34 | case 1: | |
35 | return read_gicreg(ICH_LR1_EL2); | |
36 | case 2: | |
37 | return read_gicreg(ICH_LR2_EL2); | |
38 | case 3: | |
39 | return read_gicreg(ICH_LR3_EL2); | |
40 | case 4: | |
41 | return read_gicreg(ICH_LR4_EL2); | |
42 | case 5: | |
43 | return read_gicreg(ICH_LR5_EL2); | |
44 | case 6: | |
45 | return read_gicreg(ICH_LR6_EL2); | |
46 | case 7: | |
47 | return read_gicreg(ICH_LR7_EL2); | |
48 | case 8: | |
49 | return read_gicreg(ICH_LR8_EL2); | |
50 | case 9: | |
51 | return read_gicreg(ICH_LR9_EL2); | |
52 | case 10: | |
53 | return read_gicreg(ICH_LR10_EL2); | |
54 | case 11: | |
55 | return read_gicreg(ICH_LR11_EL2); | |
56 | case 12: | |
57 | return read_gicreg(ICH_LR12_EL2); | |
58 | case 13: | |
59 | return read_gicreg(ICH_LR13_EL2); | |
60 | case 14: | |
61 | return read_gicreg(ICH_LR14_EL2); | |
62 | case 15: | |
63 | return read_gicreg(ICH_LR15_EL2); | |
64 | } | |
65 | ||
66 | unreachable(); | |
67 | } | |
68 | ||
69 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |
70 | { | |
71 | switch (lr & 0xf) { | |
72 | case 0: | |
73 | write_gicreg(val, ICH_LR0_EL2); | |
74 | break; | |
75 | case 1: | |
76 | write_gicreg(val, ICH_LR1_EL2); | |
77 | break; | |
78 | case 2: | |
79 | write_gicreg(val, ICH_LR2_EL2); | |
80 | break; | |
81 | case 3: | |
82 | write_gicreg(val, ICH_LR3_EL2); | |
83 | break; | |
84 | case 4: | |
85 | write_gicreg(val, ICH_LR4_EL2); | |
86 | break; | |
87 | case 5: | |
88 | write_gicreg(val, ICH_LR5_EL2); | |
89 | break; | |
90 | case 6: | |
91 | write_gicreg(val, ICH_LR6_EL2); | |
92 | break; | |
93 | case 7: | |
94 | write_gicreg(val, ICH_LR7_EL2); | |
95 | break; | |
96 | case 8: | |
97 | write_gicreg(val, ICH_LR8_EL2); | |
98 | break; | |
99 | case 9: | |
100 | write_gicreg(val, ICH_LR9_EL2); | |
101 | break; | |
102 | case 10: | |
103 | write_gicreg(val, ICH_LR10_EL2); | |
104 | break; | |
105 | case 11: | |
106 | write_gicreg(val, ICH_LR11_EL2); | |
107 | break; | |
108 | case 12: | |
109 | write_gicreg(val, ICH_LR12_EL2); | |
110 | break; | |
111 | case 13: | |
112 | write_gicreg(val, ICH_LR13_EL2); | |
113 | break; | |
114 | case 14: | |
115 | write_gicreg(val, ICH_LR14_EL2); | |
116 | break; | |
117 | case 15: | |
118 | write_gicreg(val, ICH_LR15_EL2); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
63000dd8 MZ |
123 | static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) |
124 | { | |
125 | switch (n) { | |
126 | case 0: | |
127 | write_gicreg(val, ICH_AP0R0_EL2); | |
128 | break; | |
129 | case 1: | |
130 | write_gicreg(val, ICH_AP0R1_EL2); | |
131 | break; | |
132 | case 2: | |
133 | write_gicreg(val, ICH_AP0R2_EL2); | |
134 | break; | |
135 | case 3: | |
136 | write_gicreg(val, ICH_AP0R3_EL2); | |
137 | break; | |
138 | } | |
139 | } | |
140 | ||
141 | static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) | |
142 | { | |
143 | switch (n) { | |
144 | case 0: | |
145 | write_gicreg(val, ICH_AP1R0_EL2); | |
146 | break; | |
147 | case 1: | |
148 | write_gicreg(val, ICH_AP1R1_EL2); | |
149 | break; | |
150 | case 2: | |
151 | write_gicreg(val, ICH_AP1R2_EL2); | |
152 | break; | |
153 | case 3: | |
154 | write_gicreg(val, ICH_AP1R3_EL2); | |
155 | break; | |
156 | } | |
157 | } | |
158 | ||
159 | static u32 __hyp_text __vgic_v3_read_ap0rn(int n) | |
160 | { | |
161 | u32 val; | |
162 | ||
163 | switch (n) { | |
164 | case 0: | |
165 | val = read_gicreg(ICH_AP0R0_EL2); | |
166 | break; | |
167 | case 1: | |
168 | val = read_gicreg(ICH_AP0R1_EL2); | |
169 | break; | |
170 | case 2: | |
171 | val = read_gicreg(ICH_AP0R2_EL2); | |
172 | break; | |
173 | case 3: | |
174 | val = read_gicreg(ICH_AP0R3_EL2); | |
175 | break; | |
176 | default: | |
177 | unreachable(); | |
178 | } | |
179 | ||
180 | return val; | |
181 | } | |
182 | ||
183 | static u32 __hyp_text __vgic_v3_read_ap1rn(int n) | |
184 | { | |
185 | u32 val; | |
186 | ||
187 | switch (n) { | |
188 | case 0: | |
189 | val = read_gicreg(ICH_AP1R0_EL2); | |
190 | break; | |
191 | case 1: | |
192 | val = read_gicreg(ICH_AP1R1_EL2); | |
193 | break; | |
194 | case 2: | |
195 | val = read_gicreg(ICH_AP1R2_EL2); | |
196 | break; | |
197 | case 3: | |
198 | val = read_gicreg(ICH_AP1R3_EL2); | |
199 | break; | |
200 | default: | |
201 | unreachable(); | |
202 | } | |
203 | ||
204 | return val; | |
205 | } | |
206 | ||
f68d2b1b MZ |
207 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
208 | { | |
209 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 210 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 211 | u64 val; |
f68d2b1b MZ |
212 | |
213 | /* | |
214 | * Make sure stores to the GIC via the memory mapped interface | |
215 | * are now visible to the system register interface. | |
216 | */ | |
ff567614 | 217 | if (!cpu_if->vgic_sre) { |
27e91ad1 MZ |
218 | dsb(sy); |
219 | isb(); | |
ff567614 MZ |
220 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
221 | } | |
f68d2b1b | 222 | |
00dafa0f | 223 | if (used_lrs) { |
1b8e83c0 | 224 | int i; |
15d2bffd | 225 | u32 nr_pre_bits; |
f68d2b1b | 226 | |
1b8e83c0 | 227 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
f68d2b1b | 228 | |
1b8e83c0 MZ |
229 | write_gicreg(0, ICH_HCR_EL2); |
230 | val = read_gicreg(ICH_VTR_EL2); | |
15d2bffd | 231 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 232 | |
cffcd9df | 233 | for (i = 0; i < used_lrs; i++) { |
fa89c77e | 234 | if (cpu_if->vgic_elrsr & (1 << i)) |
84e8b9c8 | 235 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
fa89c77e CD |
236 | else |
237 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | |
84e8b9c8 | 238 | |
b40c4892 | 239 | __gic_v3_set_lr(0, i); |
1b8e83c0 MZ |
240 | } |
241 | ||
15d2bffd | 242 | switch (nr_pre_bits) { |
1b8e83c0 | 243 | case 7: |
63000dd8 MZ |
244 | cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); |
245 | cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); | |
1b8e83c0 | 246 | case 6: |
63000dd8 | 247 | cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); |
1b8e83c0 | 248 | default: |
63000dd8 | 249 | cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); |
1b8e83c0 MZ |
250 | } |
251 | ||
15d2bffd | 252 | switch (nr_pre_bits) { |
1b8e83c0 | 253 | case 7: |
63000dd8 MZ |
254 | cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); |
255 | cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); | |
1b8e83c0 | 256 | case 6: |
63000dd8 | 257 | cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); |
1b8e83c0 | 258 | default: |
63000dd8 | 259 | cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); |
1b8e83c0 | 260 | } |
1b8e83c0 | 261 | } else { |
374be35e MZ |
262 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) || |
263 | cpu_if->its_vpe.its_vm) | |
9c7bfc28 MZ |
264 | write_gicreg(0, ICH_HCR_EL2); |
265 | ||
1b8e83c0 MZ |
266 | cpu_if->vgic_elrsr = 0xffff; |
267 | cpu_if->vgic_ap0r[0] = 0; | |
268 | cpu_if->vgic_ap0r[1] = 0; | |
269 | cpu_if->vgic_ap0r[2] = 0; | |
270 | cpu_if->vgic_ap0r[3] = 0; | |
271 | cpu_if->vgic_ap1r[0] = 0; | |
272 | cpu_if->vgic_ap1r[1] = 0; | |
273 | cpu_if->vgic_ap1r[2] = 0; | |
274 | cpu_if->vgic_ap1r[3] = 0; | |
f68d2b1b MZ |
275 | } |
276 | ||
277 | val = read_gicreg(ICC_SRE_EL2); | |
278 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | |
c5851328 MZ |
279 | |
280 | if (!cpu_if->vgic_sre) { | |
281 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | |
282 | isb(); | |
283 | write_gicreg(1, ICC_SRE_EL1); | |
284 | } | |
f68d2b1b MZ |
285 | } |
286 | ||
287 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |
288 | { | |
289 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 290 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 291 | u64 val; |
15d2bffd | 292 | u32 nr_pre_bits; |
1b8e83c0 | 293 | int i; |
f68d2b1b MZ |
294 | |
295 | /* | |
296 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | |
297 | * Group0 interrupt (as generated in GICv2 mode) to be | |
298 | * delivered as a FIQ to the guest, with potentially fatal | |
299 | * consequences. So we must make sure that ICC_SRE_EL1 has | |
300 | * been actually programmed with the value we want before | |
ff567614 MZ |
301 | * starting to mess with the rest of the GIC, and VMCR_EL2 in |
302 | * particular. | |
f68d2b1b | 303 | */ |
c5851328 MZ |
304 | if (!cpu_if->vgic_sre) { |
305 | write_gicreg(0, ICC_SRE_EL1); | |
306 | isb(); | |
ff567614 | 307 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
c5851328 | 308 | } |
f68d2b1b | 309 | |
f68d2b1b | 310 | val = read_gicreg(ICH_VTR_EL2); |
15d2bffd | 311 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 312 | |
00dafa0f | 313 | if (used_lrs) { |
1b8e83c0 MZ |
314 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
315 | ||
15d2bffd | 316 | switch (nr_pre_bits) { |
1b8e83c0 | 317 | case 7: |
63000dd8 MZ |
318 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); |
319 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); | |
1b8e83c0 | 320 | case 6: |
63000dd8 | 321 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); |
1b8e83c0 | 322 | default: |
63000dd8 | 323 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); |
1b8e83c0 MZ |
324 | } |
325 | ||
15d2bffd | 326 | switch (nr_pre_bits) { |
1b8e83c0 | 327 | case 7: |
63000dd8 MZ |
328 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); |
329 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); | |
1b8e83c0 | 330 | case 6: |
63000dd8 | 331 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); |
1b8e83c0 | 332 | default: |
63000dd8 | 333 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); |
1b8e83c0 MZ |
334 | } |
335 | ||
00dafa0f | 336 | for (i = 0; i < used_lrs; i++) |
b40c4892 | 337 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); |
9c7bfc28 MZ |
338 | } else { |
339 | /* | |
340 | * If we need to trap system registers, we must write | |
341 | * ICH_HCR_EL2 anyway, even if no interrupts are being | |
374be35e MZ |
342 | * injected. Same thing if GICv4 is used, as VLPI |
343 | * delivery is gated by ICH_HCR_EL2.En. | |
9c7bfc28 | 344 | */ |
374be35e MZ |
345 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) || |
346 | cpu_if->its_vpe.its_vm) | |
9c7bfc28 | 347 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
f68d2b1b MZ |
348 | } |
349 | ||
350 | /* | |
351 | * Ensures that the above will have reached the | |
352 | * (re)distributors. This ensure the guest will read the | |
353 | * correct values from the memory-mapped interface. | |
354 | */ | |
c5851328 MZ |
355 | if (!cpu_if->vgic_sre) { |
356 | isb(); | |
357 | dsb(sy); | |
358 | } | |
f68d2b1b MZ |
359 | |
360 | /* | |
361 | * Prevent the guest from touching the GIC system registers if | |
362 | * SRE isn't enabled for GICv3 emulation. | |
363 | */ | |
a057001e MZ |
364 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
365 | ICC_SRE_EL2); | |
f68d2b1b MZ |
366 | } |
367 | ||
0d98d00b MZ |
368 | void __hyp_text __vgic_v3_init_lrs(void) |
369 | { | |
370 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | |
371 | int i; | |
372 | ||
373 | for (i = 0; i <= max_lr_idx; i++) | |
374 | __gic_v3_set_lr(0, i); | |
375 | } | |
376 | ||
cf0ba18a | 377 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
f68d2b1b MZ |
378 | { |
379 | return read_gicreg(ICH_VTR_EL2); | |
380 | } | |
328e5664 CD |
381 | |
382 | u64 __hyp_text __vgic_v3_read_vmcr(void) | |
383 | { | |
384 | return read_gicreg(ICH_VMCR_EL2); | |
385 | } | |
386 | ||
387 | void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) | |
388 | { | |
389 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
390 | } | |
59da1cbf MZ |
391 | |
392 | #ifdef CONFIG_ARM64 | |
393 | ||
d70c7b31 MZ |
394 | static int __hyp_text __vgic_v3_bpr_min(void) |
395 | { | |
396 | /* See Pseudocode for VPriorityGroup */ | |
397 | return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); | |
398 | } | |
399 | ||
132a324a MZ |
400 | static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) |
401 | { | |
402 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
403 | u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
404 | ||
405 | return crm != 8; | |
406 | } | |
407 | ||
408 | #define GICv3_IDLE_PRIORITY 0xff | |
409 | ||
410 | static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, | |
411 | u32 vmcr, | |
412 | u64 *lr_val) | |
413 | { | |
414 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
415 | u8 priority = GICv3_IDLE_PRIORITY; | |
416 | int i, lr = -1; | |
417 | ||
418 | for (i = 0; i < used_lrs; i++) { | |
419 | u64 val = __gic_v3_get_lr(i); | |
420 | u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
421 | ||
422 | /* Not pending in the state? */ | |
423 | if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) | |
424 | continue; | |
425 | ||
426 | /* Group-0 interrupt, but Group-0 disabled? */ | |
427 | if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) | |
428 | continue; | |
429 | ||
430 | /* Group-1 interrupt, but Group-1 disabled? */ | |
431 | if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) | |
432 | continue; | |
433 | ||
434 | /* Not the highest priority? */ | |
435 | if (lr_prio >= priority) | |
436 | continue; | |
437 | ||
438 | /* This is a candidate */ | |
439 | priority = lr_prio; | |
440 | *lr_val = val; | |
441 | lr = i; | |
442 | } | |
443 | ||
444 | if (lr == -1) | |
445 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
446 | ||
447 | return lr; | |
448 | } | |
449 | ||
b6f49035 MZ |
450 | static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, |
451 | int intid, u64 *lr_val) | |
452 | { | |
453 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
454 | int i; | |
455 | ||
456 | for (i = 0; i < used_lrs; i++) { | |
457 | u64 val = __gic_v3_get_lr(i); | |
458 | ||
459 | if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid && | |
460 | (val & ICH_LR_ACTIVE_BIT)) { | |
461 | *lr_val = val; | |
462 | return i; | |
463 | } | |
464 | } | |
465 | ||
466 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
467 | return -1; | |
468 | } | |
469 | ||
132a324a MZ |
470 | static int __hyp_text __vgic_v3_get_highest_active_priority(void) |
471 | { | |
472 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
473 | u32 hap = 0; | |
474 | int i; | |
475 | ||
476 | for (i = 0; i < nr_apr_regs; i++) { | |
477 | u32 val; | |
478 | ||
479 | /* | |
480 | * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers | |
481 | * contain the active priority levels for this VCPU | |
482 | * for the maximum number of supported priority | |
483 | * levels, and we return the full priority level only | |
484 | * if the BPR is programmed to its minimum, otherwise | |
485 | * we return a combination of the priority level and | |
486 | * subpriority, as determined by the setting of the | |
487 | * BPR, but without the full subpriority. | |
488 | */ | |
489 | val = __vgic_v3_read_ap0rn(i); | |
490 | val |= __vgic_v3_read_ap1rn(i); | |
491 | if (!val) { | |
492 | hap += 32; | |
493 | continue; | |
494 | } | |
495 | ||
496 | return (hap + __ffs(val)) << __vgic_v3_bpr_min(); | |
497 | } | |
498 | ||
499 | return GICv3_IDLE_PRIORITY; | |
500 | } | |
501 | ||
d70c7b31 MZ |
502 | static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) |
503 | { | |
504 | return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
505 | } | |
506 | ||
507 | static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) | |
508 | { | |
509 | unsigned int bpr; | |
510 | ||
511 | if (vmcr & ICH_VMCR_CBPR_MASK) { | |
512 | bpr = __vgic_v3_get_bpr0(vmcr); | |
513 | if (bpr < 7) | |
514 | bpr++; | |
515 | } else { | |
516 | bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
517 | } | |
518 | ||
519 | return bpr; | |
520 | } | |
521 | ||
132a324a MZ |
522 | /* |
523 | * Convert a priority to a preemption level, taking the relevant BPR | |
524 | * into account by zeroing the sub-priority bits. | |
525 | */ | |
526 | static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) | |
527 | { | |
528 | unsigned int bpr; | |
529 | ||
530 | if (!grp) | |
531 | bpr = __vgic_v3_get_bpr0(vmcr) + 1; | |
532 | else | |
533 | bpr = __vgic_v3_get_bpr1(vmcr); | |
534 | ||
535 | return pri & (GENMASK(7, 0) << bpr); | |
536 | } | |
537 | ||
538 | /* | |
539 | * The priority value is independent of any of the BPR values, so we | |
540 | * normalize it using the minumal BPR value. This guarantees that no | |
541 | * matter what the guest does with its BPR, we can always set/get the | |
542 | * same value of a priority. | |
543 | */ | |
544 | static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) | |
545 | { | |
546 | u8 pre, ap; | |
547 | u32 val; | |
548 | int apr; | |
549 | ||
550 | pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); | |
551 | ap = pre >> __vgic_v3_bpr_min(); | |
552 | apr = ap / 32; | |
553 | ||
554 | if (!grp) { | |
555 | val = __vgic_v3_read_ap0rn(apr); | |
556 | __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); | |
557 | } else { | |
558 | val = __vgic_v3_read_ap1rn(apr); | |
559 | __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); | |
560 | } | |
561 | } | |
562 | ||
b6f49035 MZ |
563 | static int __hyp_text __vgic_v3_clear_highest_active_priority(void) |
564 | { | |
565 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
566 | u32 hap = 0; | |
567 | int i; | |
568 | ||
569 | for (i = 0; i < nr_apr_regs; i++) { | |
570 | u32 ap0, ap1; | |
571 | int c0, c1; | |
572 | ||
573 | ap0 = __vgic_v3_read_ap0rn(i); | |
574 | ap1 = __vgic_v3_read_ap1rn(i); | |
575 | if (!ap0 && !ap1) { | |
576 | hap += 32; | |
577 | continue; | |
578 | } | |
579 | ||
580 | c0 = ap0 ? __ffs(ap0) : 32; | |
581 | c1 = ap1 ? __ffs(ap1) : 32; | |
582 | ||
583 | /* Always clear the LSB, which is the highest priority */ | |
584 | if (c0 < c1) { | |
585 | ap0 &= ~BIT(c0); | |
586 | __vgic_v3_write_ap0rn(ap0, i); | |
587 | hap += c0; | |
588 | } else { | |
589 | ap1 &= ~BIT(c1); | |
590 | __vgic_v3_write_ap1rn(ap1, i); | |
591 | hap += c1; | |
592 | } | |
593 | ||
594 | /* Rescale to 8 bits of priority */ | |
595 | return hap << __vgic_v3_bpr_min(); | |
596 | } | |
597 | ||
598 | return GICv3_IDLE_PRIORITY; | |
599 | } | |
600 | ||
132a324a MZ |
601 | static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
602 | { | |
603 | u64 lr_val; | |
604 | u8 lr_prio, pmr; | |
605 | int lr, grp; | |
606 | ||
607 | grp = __vgic_v3_get_group(vcpu); | |
608 | ||
609 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
610 | if (lr < 0) | |
611 | goto spurious; | |
612 | ||
613 | if (grp != !!(lr_val & ICH_LR_GROUP)) | |
614 | goto spurious; | |
615 | ||
616 | pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
617 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
618 | if (pmr <= lr_prio) | |
619 | goto spurious; | |
620 | ||
621 | if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) | |
622 | goto spurious; | |
623 | ||
624 | lr_val &= ~ICH_LR_STATE; | |
625 | /* No active state for LPIs */ | |
626 | if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) | |
627 | lr_val |= ICH_LR_ACTIVE_BIT; | |
628 | __gic_v3_set_lr(lr_val, lr); | |
629 | __vgic_v3_set_active_priority(lr_prio, vmcr, grp); | |
630 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
631 | return; | |
632 | ||
633 | spurious: | |
634 | vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); | |
635 | } | |
636 | ||
b6f49035 MZ |
637 | static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val) |
638 | { | |
639 | lr_val &= ~ICH_LR_ACTIVE_BIT; | |
640 | if (lr_val & ICH_LR_HW) { | |
641 | u32 pid; | |
642 | ||
643 | pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT; | |
644 | gic_write_dir(pid); | |
645 | } | |
646 | ||
647 | __gic_v3_set_lr(lr_val, lr); | |
648 | } | |
649 | ||
650 | static void __hyp_text __vgic_v3_bump_eoicount(void) | |
651 | { | |
652 | u32 hcr; | |
653 | ||
654 | hcr = read_gicreg(ICH_HCR_EL2); | |
655 | hcr += 1 << ICH_HCR_EOIcount_SHIFT; | |
656 | write_gicreg(hcr, ICH_HCR_EL2); | |
657 | } | |
658 | ||
40228ba5 MZ |
659 | static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu, |
660 | u32 vmcr, int rt) | |
661 | { | |
662 | u32 vid = vcpu_get_reg(vcpu, rt); | |
663 | u64 lr_val; | |
664 | int lr; | |
665 | ||
666 | /* EOImode == 0, nothing to be done here */ | |
667 | if (!(vmcr & ICH_VMCR_EOIM_MASK)) | |
668 | return; | |
669 | ||
670 | /* No deactivate to be performed on an LPI */ | |
671 | if (vid >= VGIC_MIN_LPI) | |
672 | return; | |
673 | ||
674 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
675 | if (lr == -1) { | |
676 | __vgic_v3_bump_eoicount(); | |
677 | return; | |
678 | } | |
679 | ||
680 | __vgic_v3_clear_active_lr(lr, lr_val); | |
681 | } | |
682 | ||
b6f49035 MZ |
683 | static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
684 | { | |
685 | u32 vid = vcpu_get_reg(vcpu, rt); | |
686 | u64 lr_val; | |
687 | u8 lr_prio, act_prio; | |
688 | int lr, grp; | |
689 | ||
690 | grp = __vgic_v3_get_group(vcpu); | |
691 | ||
692 | /* Drop priority in any case */ | |
693 | act_prio = __vgic_v3_clear_highest_active_priority(); | |
694 | ||
695 | /* If EOIing an LPI, no deactivate to be performed */ | |
696 | if (vid >= VGIC_MIN_LPI) | |
697 | return; | |
698 | ||
699 | /* EOImode == 1, nothing to be done here */ | |
700 | if (vmcr & ICH_VMCR_EOIM_MASK) | |
701 | return; | |
702 | ||
703 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
704 | if (lr == -1) { | |
705 | __vgic_v3_bump_eoicount(); | |
706 | return; | |
707 | } | |
708 | ||
709 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
710 | ||
711 | /* If priorities or group do not match, the guest has fscked-up. */ | |
712 | if (grp != !!(lr_val & ICH_LR_GROUP) || | |
713 | __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) | |
714 | return; | |
715 | ||
716 | /* Let's now perform the deactivation */ | |
717 | __vgic_v3_clear_active_lr(lr, lr_val); | |
718 | } | |
719 | ||
fbc48a00 MZ |
720 | static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
721 | { | |
722 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); | |
723 | } | |
724 | ||
f8b630bc MZ |
725 | static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
726 | { | |
727 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); | |
728 | } | |
729 | ||
fbc48a00 MZ |
730 | static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
731 | { | |
732 | u64 val = vcpu_get_reg(vcpu, rt); | |
733 | ||
734 | if (val & 1) | |
735 | vmcr |= ICH_VMCR_ENG0_MASK; | |
736 | else | |
737 | vmcr &= ~ICH_VMCR_ENG0_MASK; | |
738 | ||
739 | __vgic_v3_write_vmcr(vmcr); | |
740 | } | |
741 | ||
f8b630bc MZ |
742 | static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
743 | { | |
744 | u64 val = vcpu_get_reg(vcpu, rt); | |
745 | ||
746 | if (val & 1) | |
747 | vmcr |= ICH_VMCR_ENG1_MASK; | |
748 | else | |
749 | vmcr &= ~ICH_VMCR_ENG1_MASK; | |
750 | ||
751 | __vgic_v3_write_vmcr(vmcr); | |
752 | } | |
753 | ||
423de85a MZ |
754 | static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
755 | { | |
756 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr)); | |
757 | } | |
758 | ||
d70c7b31 MZ |
759 | static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
760 | { | |
761 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); | |
762 | } | |
763 | ||
423de85a MZ |
764 | static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
765 | { | |
766 | u64 val = vcpu_get_reg(vcpu, rt); | |
767 | u8 bpr_min = __vgic_v3_bpr_min() - 1; | |
768 | ||
769 | /* Enforce BPR limiting */ | |
770 | if (val < bpr_min) | |
771 | val = bpr_min; | |
772 | ||
773 | val <<= ICH_VMCR_BPR0_SHIFT; | |
774 | val &= ICH_VMCR_BPR0_MASK; | |
775 | vmcr &= ~ICH_VMCR_BPR0_MASK; | |
776 | vmcr |= val; | |
777 | ||
778 | __vgic_v3_write_vmcr(vmcr); | |
779 | } | |
780 | ||
d70c7b31 MZ |
781 | static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
782 | { | |
783 | u64 val = vcpu_get_reg(vcpu, rt); | |
784 | u8 bpr_min = __vgic_v3_bpr_min(); | |
785 | ||
786 | if (vmcr & ICH_VMCR_CBPR_MASK) | |
787 | return; | |
788 | ||
789 | /* Enforce BPR limiting */ | |
790 | if (val < bpr_min) | |
791 | val = bpr_min; | |
792 | ||
793 | val <<= ICH_VMCR_BPR1_SHIFT; | |
794 | val &= ICH_VMCR_BPR1_MASK; | |
795 | vmcr &= ~ICH_VMCR_BPR1_MASK; | |
796 | vmcr |= val; | |
797 | ||
798 | __vgic_v3_write_vmcr(vmcr); | |
799 | } | |
800 | ||
f9e7449c MZ |
801 | static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) |
802 | { | |
803 | u32 val; | |
804 | ||
805 | if (!__vgic_v3_get_group(vcpu)) | |
806 | val = __vgic_v3_read_ap0rn(n); | |
807 | else | |
808 | val = __vgic_v3_read_ap1rn(n); | |
809 | ||
810 | vcpu_set_reg(vcpu, rt, val); | |
811 | } | |
812 | ||
813 | static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) | |
814 | { | |
815 | u32 val = vcpu_get_reg(vcpu, rt); | |
816 | ||
817 | if (!__vgic_v3_get_group(vcpu)) | |
818 | __vgic_v3_write_ap0rn(val, n); | |
819 | else | |
820 | __vgic_v3_write_ap1rn(val, n); | |
821 | } | |
822 | ||
823 | static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu, | |
824 | u32 vmcr, int rt) | |
825 | { | |
826 | __vgic_v3_read_apxrn(vcpu, rt, 0); | |
827 | } | |
828 | ||
829 | static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu, | |
830 | u32 vmcr, int rt) | |
831 | { | |
832 | __vgic_v3_read_apxrn(vcpu, rt, 1); | |
833 | } | |
834 | ||
835 | static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, | |
836 | u32 vmcr, int rt) | |
837 | { | |
838 | __vgic_v3_read_apxrn(vcpu, rt, 2); | |
839 | } | |
840 | ||
841 | static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, | |
842 | u32 vmcr, int rt) | |
843 | { | |
844 | __vgic_v3_read_apxrn(vcpu, rt, 3); | |
845 | } | |
846 | ||
847 | static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, | |
848 | u32 vmcr, int rt) | |
849 | { | |
850 | __vgic_v3_write_apxrn(vcpu, rt, 0); | |
851 | } | |
852 | ||
853 | static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, | |
854 | u32 vmcr, int rt) | |
855 | { | |
856 | __vgic_v3_write_apxrn(vcpu, rt, 1); | |
857 | } | |
858 | ||
859 | static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, | |
860 | u32 vmcr, int rt) | |
861 | { | |
862 | __vgic_v3_write_apxrn(vcpu, rt, 2); | |
863 | } | |
864 | ||
865 | static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, | |
866 | u32 vmcr, int rt) | |
867 | { | |
868 | __vgic_v3_write_apxrn(vcpu, rt, 3); | |
869 | } | |
870 | ||
2724c11a MZ |
871 | static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, |
872 | u32 vmcr, int rt) | |
873 | { | |
874 | u64 lr_val; | |
875 | int lr, lr_grp, grp; | |
876 | ||
877 | grp = __vgic_v3_get_group(vcpu); | |
878 | ||
879 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
880 | if (lr == -1) | |
881 | goto spurious; | |
882 | ||
883 | lr_grp = !!(lr_val & ICH_LR_GROUP); | |
884 | if (lr_grp != grp) | |
885 | lr_val = ICC_IAR1_EL1_SPURIOUS; | |
886 | ||
887 | spurious: | |
888 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
889 | } | |
890 | ||
6293d651 MZ |
891 | static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, |
892 | u32 vmcr, int rt) | |
893 | { | |
894 | vmcr &= ICH_VMCR_PMR_MASK; | |
895 | vmcr >>= ICH_VMCR_PMR_SHIFT; | |
896 | vcpu_set_reg(vcpu, rt, vmcr); | |
897 | } | |
898 | ||
899 | static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, | |
900 | u32 vmcr, int rt) | |
901 | { | |
902 | u32 val = vcpu_get_reg(vcpu, rt); | |
903 | ||
904 | val <<= ICH_VMCR_PMR_SHIFT; | |
905 | val &= ICH_VMCR_PMR_MASK; | |
906 | vmcr &= ~ICH_VMCR_PMR_MASK; | |
907 | vmcr |= val; | |
908 | ||
909 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
910 | } | |
911 | ||
43515894 MZ |
912 | static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, |
913 | u32 vmcr, int rt) | |
914 | { | |
915 | u32 val = __vgic_v3_get_highest_active_priority(); | |
916 | vcpu_set_reg(vcpu, rt, val); | |
917 | } | |
918 | ||
d840b2d3 MZ |
919 | static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, |
920 | u32 vmcr, int rt) | |
921 | { | |
922 | u32 vtr, val; | |
923 | ||
924 | vtr = read_gicreg(ICH_VTR_EL2); | |
925 | /* PRIbits */ | |
926 | val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT; | |
927 | /* IDbits */ | |
928 | val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT; | |
929 | /* SEIS */ | |
930 | val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT; | |
931 | /* A3V */ | |
932 | val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT; | |
933 | /* EOImode */ | |
934 | val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT; | |
935 | /* CBPR */ | |
936 | val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; | |
937 | ||
938 | vcpu_set_reg(vcpu, rt, val); | |
939 | } | |
940 | ||
941 | static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, | |
942 | u32 vmcr, int rt) | |
943 | { | |
944 | u32 val = vcpu_get_reg(vcpu, rt); | |
945 | ||
946 | if (val & ICC_CTLR_EL1_CBPR_MASK) | |
947 | vmcr |= ICH_VMCR_CBPR_MASK; | |
948 | else | |
949 | vmcr &= ~ICH_VMCR_CBPR_MASK; | |
950 | ||
951 | if (val & ICC_CTLR_EL1_EOImode_MASK) | |
952 | vmcr |= ICH_VMCR_EOIM_MASK; | |
953 | else | |
954 | vmcr &= ~ICH_VMCR_EOIM_MASK; | |
955 | ||
956 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
957 | } | |
958 | ||
59da1cbf MZ |
959 | int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) |
960 | { | |
961 | int rt; | |
962 | u32 esr; | |
963 | u32 vmcr; | |
964 | void (*fn)(struct kvm_vcpu *, u32, int); | |
965 | bool is_read; | |
966 | u32 sysreg; | |
967 | ||
968 | esr = kvm_vcpu_get_hsr(vcpu); | |
969 | if (vcpu_mode_is_32bit(vcpu)) { | |
970 | if (!kvm_condition_valid(vcpu)) | |
971 | return 1; | |
972 | ||
973 | sysreg = esr_cp15_to_sysreg(esr); | |
974 | } else { | |
975 | sysreg = esr_sys64_to_sysreg(esr); | |
976 | } | |
977 | ||
978 | is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; | |
979 | ||
980 | switch (sysreg) { | |
eab0b2dc | 981 | case SYS_ICC_IAR0_EL1: |
132a324a | 982 | case SYS_ICC_IAR1_EL1: |
7b1dba1f MZ |
983 | if (unlikely(!is_read)) |
984 | return 0; | |
132a324a MZ |
985 | fn = __vgic_v3_read_iar; |
986 | break; | |
eab0b2dc | 987 | case SYS_ICC_EOIR0_EL1: |
b6f49035 | 988 | case SYS_ICC_EOIR1_EL1: |
e7f1d1ee MZ |
989 | if (unlikely(is_read)) |
990 | return 0; | |
b6f49035 MZ |
991 | fn = __vgic_v3_write_eoir; |
992 | break; | |
21bc5281 | 993 | case SYS_ICC_IGRPEN1_EL1: |
f8b630bc MZ |
994 | if (is_read) |
995 | fn = __vgic_v3_read_igrpen1; | |
996 | else | |
997 | fn = __vgic_v3_write_igrpen1; | |
998 | break; | |
d70c7b31 MZ |
999 | case SYS_ICC_BPR1_EL1: |
1000 | if (is_read) | |
1001 | fn = __vgic_v3_read_bpr1; | |
1002 | else | |
1003 | fn = __vgic_v3_write_bpr1; | |
1004 | break; | |
eab0b2dc | 1005 | case SYS_ICC_AP0Rn_EL1(0): |
f9e7449c MZ |
1006 | case SYS_ICC_AP1Rn_EL1(0): |
1007 | if (is_read) | |
1008 | fn = __vgic_v3_read_apxr0; | |
1009 | else | |
1010 | fn = __vgic_v3_write_apxr0; | |
1011 | break; | |
eab0b2dc | 1012 | case SYS_ICC_AP0Rn_EL1(1): |
f9e7449c MZ |
1013 | case SYS_ICC_AP1Rn_EL1(1): |
1014 | if (is_read) | |
1015 | fn = __vgic_v3_read_apxr1; | |
1016 | else | |
1017 | fn = __vgic_v3_write_apxr1; | |
1018 | break; | |
eab0b2dc | 1019 | case SYS_ICC_AP0Rn_EL1(2): |
f9e7449c MZ |
1020 | case SYS_ICC_AP1Rn_EL1(2): |
1021 | if (is_read) | |
1022 | fn = __vgic_v3_read_apxr2; | |
1023 | else | |
1024 | fn = __vgic_v3_write_apxr2; | |
1025 | break; | |
eab0b2dc | 1026 | case SYS_ICC_AP0Rn_EL1(3): |
f9e7449c MZ |
1027 | case SYS_ICC_AP1Rn_EL1(3): |
1028 | if (is_read) | |
1029 | fn = __vgic_v3_read_apxr3; | |
1030 | else | |
1031 | fn = __vgic_v3_write_apxr3; | |
1032 | break; | |
eab0b2dc | 1033 | case SYS_ICC_HPPIR0_EL1: |
2724c11a | 1034 | case SYS_ICC_HPPIR1_EL1: |
7b1dba1f MZ |
1035 | if (unlikely(!is_read)) |
1036 | return 0; | |
2724c11a MZ |
1037 | fn = __vgic_v3_read_hppir; |
1038 | break; | |
21bc5281 | 1039 | case SYS_ICC_IGRPEN0_EL1: |
fbc48a00 MZ |
1040 | if (is_read) |
1041 | fn = __vgic_v3_read_igrpen0; | |
1042 | else | |
1043 | fn = __vgic_v3_write_igrpen0; | |
1044 | break; | |
423de85a MZ |
1045 | case SYS_ICC_BPR0_EL1: |
1046 | if (is_read) | |
1047 | fn = __vgic_v3_read_bpr0; | |
1048 | else | |
1049 | fn = __vgic_v3_write_bpr0; | |
1050 | break; | |
40228ba5 | 1051 | case SYS_ICC_DIR_EL1: |
e7f1d1ee MZ |
1052 | if (unlikely(is_read)) |
1053 | return 0; | |
40228ba5 MZ |
1054 | fn = __vgic_v3_write_dir; |
1055 | break; | |
43515894 | 1056 | case SYS_ICC_RPR_EL1: |
7b1dba1f MZ |
1057 | if (unlikely(!is_read)) |
1058 | return 0; | |
43515894 MZ |
1059 | fn = __vgic_v3_read_rpr; |
1060 | break; | |
d840b2d3 MZ |
1061 | case SYS_ICC_CTLR_EL1: |
1062 | if (is_read) | |
1063 | fn = __vgic_v3_read_ctlr; | |
1064 | else | |
1065 | fn = __vgic_v3_write_ctlr; | |
1066 | break; | |
6293d651 MZ |
1067 | case SYS_ICC_PMR_EL1: |
1068 | if (is_read) | |
1069 | fn = __vgic_v3_read_pmr; | |
1070 | else | |
1071 | fn = __vgic_v3_write_pmr; | |
1072 | break; | |
59da1cbf MZ |
1073 | default: |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | vmcr = __vgic_v3_read_vmcr(); | |
1078 | rt = kvm_vcpu_sys_get_rt(vcpu); | |
1079 | fn(vcpu, vmcr, rt); | |
1080 | ||
1081 | return 1; | |
1082 | } | |
1083 | ||
1084 | #endif |