Commit | Line | Data |
---|---|---|
f68d2b1b MZ |
1 | /* |
2 | * Copyright (C) 2012-2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/compiler.h> | |
19 | #include <linux/irqchip/arm-gic-v3.h> | |
20 | #include <linux/kvm_host.h> | |
21 | ||
59da1cbf | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
f68d2b1b MZ |
24 | |
25 | #define vtr_to_max_lr_idx(v) ((v) & 0xf) | |
d68356cc | 26 | #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) |
132a324a | 27 | #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5)) |
f68d2b1b | 28 | |
1b8e83c0 MZ |
29 | static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) |
30 | { | |
31 | switch (lr & 0xf) { | |
32 | case 0: | |
33 | return read_gicreg(ICH_LR0_EL2); | |
34 | case 1: | |
35 | return read_gicreg(ICH_LR1_EL2); | |
36 | case 2: | |
37 | return read_gicreg(ICH_LR2_EL2); | |
38 | case 3: | |
39 | return read_gicreg(ICH_LR3_EL2); | |
40 | case 4: | |
41 | return read_gicreg(ICH_LR4_EL2); | |
42 | case 5: | |
43 | return read_gicreg(ICH_LR5_EL2); | |
44 | case 6: | |
45 | return read_gicreg(ICH_LR6_EL2); | |
46 | case 7: | |
47 | return read_gicreg(ICH_LR7_EL2); | |
48 | case 8: | |
49 | return read_gicreg(ICH_LR8_EL2); | |
50 | case 9: | |
51 | return read_gicreg(ICH_LR9_EL2); | |
52 | case 10: | |
53 | return read_gicreg(ICH_LR10_EL2); | |
54 | case 11: | |
55 | return read_gicreg(ICH_LR11_EL2); | |
56 | case 12: | |
57 | return read_gicreg(ICH_LR12_EL2); | |
58 | case 13: | |
59 | return read_gicreg(ICH_LR13_EL2); | |
60 | case 14: | |
61 | return read_gicreg(ICH_LR14_EL2); | |
62 | case 15: | |
63 | return read_gicreg(ICH_LR15_EL2); | |
64 | } | |
65 | ||
66 | unreachable(); | |
67 | } | |
68 | ||
69 | static void __hyp_text __gic_v3_set_lr(u64 val, int lr) | |
70 | { | |
71 | switch (lr & 0xf) { | |
72 | case 0: | |
73 | write_gicreg(val, ICH_LR0_EL2); | |
74 | break; | |
75 | case 1: | |
76 | write_gicreg(val, ICH_LR1_EL2); | |
77 | break; | |
78 | case 2: | |
79 | write_gicreg(val, ICH_LR2_EL2); | |
80 | break; | |
81 | case 3: | |
82 | write_gicreg(val, ICH_LR3_EL2); | |
83 | break; | |
84 | case 4: | |
85 | write_gicreg(val, ICH_LR4_EL2); | |
86 | break; | |
87 | case 5: | |
88 | write_gicreg(val, ICH_LR5_EL2); | |
89 | break; | |
90 | case 6: | |
91 | write_gicreg(val, ICH_LR6_EL2); | |
92 | break; | |
93 | case 7: | |
94 | write_gicreg(val, ICH_LR7_EL2); | |
95 | break; | |
96 | case 8: | |
97 | write_gicreg(val, ICH_LR8_EL2); | |
98 | break; | |
99 | case 9: | |
100 | write_gicreg(val, ICH_LR9_EL2); | |
101 | break; | |
102 | case 10: | |
103 | write_gicreg(val, ICH_LR10_EL2); | |
104 | break; | |
105 | case 11: | |
106 | write_gicreg(val, ICH_LR11_EL2); | |
107 | break; | |
108 | case 12: | |
109 | write_gicreg(val, ICH_LR12_EL2); | |
110 | break; | |
111 | case 13: | |
112 | write_gicreg(val, ICH_LR13_EL2); | |
113 | break; | |
114 | case 14: | |
115 | write_gicreg(val, ICH_LR14_EL2); | |
116 | break; | |
117 | case 15: | |
118 | write_gicreg(val, ICH_LR15_EL2); | |
119 | break; | |
120 | } | |
121 | } | |
122 | ||
63000dd8 MZ |
123 | static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n) |
124 | { | |
125 | switch (n) { | |
126 | case 0: | |
127 | write_gicreg(val, ICH_AP0R0_EL2); | |
128 | break; | |
129 | case 1: | |
130 | write_gicreg(val, ICH_AP0R1_EL2); | |
131 | break; | |
132 | case 2: | |
133 | write_gicreg(val, ICH_AP0R2_EL2); | |
134 | break; | |
135 | case 3: | |
136 | write_gicreg(val, ICH_AP0R3_EL2); | |
137 | break; | |
138 | } | |
139 | } | |
140 | ||
141 | static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n) | |
142 | { | |
143 | switch (n) { | |
144 | case 0: | |
145 | write_gicreg(val, ICH_AP1R0_EL2); | |
146 | break; | |
147 | case 1: | |
148 | write_gicreg(val, ICH_AP1R1_EL2); | |
149 | break; | |
150 | case 2: | |
151 | write_gicreg(val, ICH_AP1R2_EL2); | |
152 | break; | |
153 | case 3: | |
154 | write_gicreg(val, ICH_AP1R3_EL2); | |
155 | break; | |
156 | } | |
157 | } | |
158 | ||
159 | static u32 __hyp_text __vgic_v3_read_ap0rn(int n) | |
160 | { | |
161 | u32 val; | |
162 | ||
163 | switch (n) { | |
164 | case 0: | |
165 | val = read_gicreg(ICH_AP0R0_EL2); | |
166 | break; | |
167 | case 1: | |
168 | val = read_gicreg(ICH_AP0R1_EL2); | |
169 | break; | |
170 | case 2: | |
171 | val = read_gicreg(ICH_AP0R2_EL2); | |
172 | break; | |
173 | case 3: | |
174 | val = read_gicreg(ICH_AP0R3_EL2); | |
175 | break; | |
176 | default: | |
177 | unreachable(); | |
178 | } | |
179 | ||
180 | return val; | |
181 | } | |
182 | ||
183 | static u32 __hyp_text __vgic_v3_read_ap1rn(int n) | |
184 | { | |
185 | u32 val; | |
186 | ||
187 | switch (n) { | |
188 | case 0: | |
189 | val = read_gicreg(ICH_AP1R0_EL2); | |
190 | break; | |
191 | case 1: | |
192 | val = read_gicreg(ICH_AP1R1_EL2); | |
193 | break; | |
194 | case 2: | |
195 | val = read_gicreg(ICH_AP1R2_EL2); | |
196 | break; | |
197 | case 3: | |
198 | val = read_gicreg(ICH_AP1R3_EL2); | |
199 | break; | |
200 | default: | |
201 | unreachable(); | |
202 | } | |
203 | ||
204 | return val; | |
205 | } | |
206 | ||
f68d2b1b MZ |
207 | void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) |
208 | { | |
209 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 210 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 211 | u64 val; |
f68d2b1b MZ |
212 | |
213 | /* | |
214 | * Make sure stores to the GIC via the memory mapped interface | |
215 | * are now visible to the system register interface. | |
216 | */ | |
ff567614 | 217 | if (!cpu_if->vgic_sre) { |
c5851328 | 218 | dsb(st); |
ff567614 MZ |
219 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
220 | } | |
f68d2b1b | 221 | |
00dafa0f | 222 | if (used_lrs) { |
1b8e83c0 | 223 | int i; |
15d2bffd | 224 | u32 nr_pre_bits; |
f68d2b1b | 225 | |
1b8e83c0 | 226 | cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2); |
f68d2b1b | 227 | |
1b8e83c0 MZ |
228 | write_gicreg(0, ICH_HCR_EL2); |
229 | val = read_gicreg(ICH_VTR_EL2); | |
15d2bffd | 230 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 231 | |
cffcd9df | 232 | for (i = 0; i < used_lrs; i++) { |
fa89c77e | 233 | if (cpu_if->vgic_elrsr & (1 << i)) |
84e8b9c8 | 234 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
fa89c77e CD |
235 | else |
236 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | |
84e8b9c8 | 237 | |
b40c4892 | 238 | __gic_v3_set_lr(0, i); |
1b8e83c0 MZ |
239 | } |
240 | ||
15d2bffd | 241 | switch (nr_pre_bits) { |
1b8e83c0 | 242 | case 7: |
63000dd8 MZ |
243 | cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); |
244 | cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); | |
1b8e83c0 | 245 | case 6: |
63000dd8 | 246 | cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); |
1b8e83c0 | 247 | default: |
63000dd8 | 248 | cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); |
1b8e83c0 MZ |
249 | } |
250 | ||
15d2bffd | 251 | switch (nr_pre_bits) { |
1b8e83c0 | 252 | case 7: |
63000dd8 MZ |
253 | cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); |
254 | cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); | |
1b8e83c0 | 255 | case 6: |
63000dd8 | 256 | cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); |
1b8e83c0 | 257 | default: |
63000dd8 | 258 | cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); |
1b8e83c0 | 259 | } |
1b8e83c0 | 260 | } else { |
9c7bfc28 MZ |
261 | if (static_branch_unlikely(&vgic_v3_cpuif_trap)) |
262 | write_gicreg(0, ICH_HCR_EL2); | |
263 | ||
1b8e83c0 MZ |
264 | cpu_if->vgic_elrsr = 0xffff; |
265 | cpu_if->vgic_ap0r[0] = 0; | |
266 | cpu_if->vgic_ap0r[1] = 0; | |
267 | cpu_if->vgic_ap0r[2] = 0; | |
268 | cpu_if->vgic_ap0r[3] = 0; | |
269 | cpu_if->vgic_ap1r[0] = 0; | |
270 | cpu_if->vgic_ap1r[1] = 0; | |
271 | cpu_if->vgic_ap1r[2] = 0; | |
272 | cpu_if->vgic_ap1r[3] = 0; | |
f68d2b1b MZ |
273 | } |
274 | ||
275 | val = read_gicreg(ICC_SRE_EL2); | |
276 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | |
c5851328 MZ |
277 | |
278 | if (!cpu_if->vgic_sre) { | |
279 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | |
280 | isb(); | |
281 | write_gicreg(1, ICC_SRE_EL1); | |
282 | } | |
f68d2b1b MZ |
283 | } |
284 | ||
285 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |
286 | { | |
287 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
00dafa0f | 288 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
f68d2b1b | 289 | u64 val; |
15d2bffd | 290 | u32 nr_pre_bits; |
1b8e83c0 | 291 | int i; |
f68d2b1b MZ |
292 | |
293 | /* | |
294 | * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a | |
295 | * Group0 interrupt (as generated in GICv2 mode) to be | |
296 | * delivered as a FIQ to the guest, with potentially fatal | |
297 | * consequences. So we must make sure that ICC_SRE_EL1 has | |
298 | * been actually programmed with the value we want before | |
ff567614 MZ |
299 | * starting to mess with the rest of the GIC, and VMCR_EL2 in |
300 | * particular. | |
f68d2b1b | 301 | */ |
c5851328 MZ |
302 | if (!cpu_if->vgic_sre) { |
303 | write_gicreg(0, ICC_SRE_EL1); | |
304 | isb(); | |
ff567614 | 305 | write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2); |
c5851328 | 306 | } |
f68d2b1b | 307 | |
f68d2b1b | 308 | val = read_gicreg(ICH_VTR_EL2); |
15d2bffd | 309 | nr_pre_bits = vtr_to_nr_pre_bits(val); |
f68d2b1b | 310 | |
00dafa0f | 311 | if (used_lrs) { |
1b8e83c0 MZ |
312 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); |
313 | ||
15d2bffd | 314 | switch (nr_pre_bits) { |
1b8e83c0 | 315 | case 7: |
63000dd8 MZ |
316 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); |
317 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); | |
1b8e83c0 | 318 | case 6: |
63000dd8 | 319 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); |
1b8e83c0 | 320 | default: |
63000dd8 | 321 | __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); |
1b8e83c0 MZ |
322 | } |
323 | ||
15d2bffd | 324 | switch (nr_pre_bits) { |
1b8e83c0 | 325 | case 7: |
63000dd8 MZ |
326 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); |
327 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); | |
1b8e83c0 | 328 | case 6: |
63000dd8 | 329 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); |
1b8e83c0 | 330 | default: |
63000dd8 | 331 | __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); |
1b8e83c0 MZ |
332 | } |
333 | ||
00dafa0f | 334 | for (i = 0; i < used_lrs; i++) |
b40c4892 | 335 | __gic_v3_set_lr(cpu_if->vgic_lr[i], i); |
9c7bfc28 MZ |
336 | } else { |
337 | /* | |
338 | * If we need to trap system registers, we must write | |
339 | * ICH_HCR_EL2 anyway, even if no interrupts are being | |
340 | * injected, | |
341 | */ | |
342 | if (static_branch_unlikely(&vgic_v3_cpuif_trap)) | |
343 | write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); | |
f68d2b1b MZ |
344 | } |
345 | ||
346 | /* | |
347 | * Ensures that the above will have reached the | |
348 | * (re)distributors. This ensure the guest will read the | |
349 | * correct values from the memory-mapped interface. | |
350 | */ | |
c5851328 MZ |
351 | if (!cpu_if->vgic_sre) { |
352 | isb(); | |
353 | dsb(sy); | |
354 | } | |
f68d2b1b MZ |
355 | |
356 | /* | |
357 | * Prevent the guest from touching the GIC system registers if | |
358 | * SRE isn't enabled for GICv3 emulation. | |
359 | */ | |
a057001e MZ |
360 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
361 | ICC_SRE_EL2); | |
f68d2b1b MZ |
362 | } |
363 | ||
0d98d00b MZ |
364 | void __hyp_text __vgic_v3_init_lrs(void) |
365 | { | |
366 | int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2)); | |
367 | int i; | |
368 | ||
369 | for (i = 0; i <= max_lr_idx; i++) | |
370 | __gic_v3_set_lr(0, i); | |
371 | } | |
372 | ||
cf0ba18a | 373 | u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void) |
f68d2b1b MZ |
374 | { |
375 | return read_gicreg(ICH_VTR_EL2); | |
376 | } | |
328e5664 CD |
377 | |
378 | u64 __hyp_text __vgic_v3_read_vmcr(void) | |
379 | { | |
380 | return read_gicreg(ICH_VMCR_EL2); | |
381 | } | |
382 | ||
383 | void __hyp_text __vgic_v3_write_vmcr(u32 vmcr) | |
384 | { | |
385 | write_gicreg(vmcr, ICH_VMCR_EL2); | |
386 | } | |
59da1cbf MZ |
387 | |
388 | #ifdef CONFIG_ARM64 | |
389 | ||
d70c7b31 MZ |
390 | static int __hyp_text __vgic_v3_bpr_min(void) |
391 | { | |
392 | /* See Pseudocode for VPriorityGroup */ | |
393 | return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2)); | |
394 | } | |
395 | ||
132a324a MZ |
396 | static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu) |
397 | { | |
398 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
399 | u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
400 | ||
401 | return crm != 8; | |
402 | } | |
403 | ||
404 | #define GICv3_IDLE_PRIORITY 0xff | |
405 | ||
406 | static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, | |
407 | u32 vmcr, | |
408 | u64 *lr_val) | |
409 | { | |
410 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
411 | u8 priority = GICv3_IDLE_PRIORITY; | |
412 | int i, lr = -1; | |
413 | ||
414 | for (i = 0; i < used_lrs; i++) { | |
415 | u64 val = __gic_v3_get_lr(i); | |
416 | u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
417 | ||
418 | /* Not pending in the state? */ | |
419 | if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT) | |
420 | continue; | |
421 | ||
422 | /* Group-0 interrupt, but Group-0 disabled? */ | |
423 | if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK)) | |
424 | continue; | |
425 | ||
426 | /* Group-1 interrupt, but Group-1 disabled? */ | |
427 | if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK)) | |
428 | continue; | |
429 | ||
430 | /* Not the highest priority? */ | |
431 | if (lr_prio >= priority) | |
432 | continue; | |
433 | ||
434 | /* This is a candidate */ | |
435 | priority = lr_prio; | |
436 | *lr_val = val; | |
437 | lr = i; | |
438 | } | |
439 | ||
440 | if (lr == -1) | |
441 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
442 | ||
443 | return lr; | |
444 | } | |
445 | ||
b6f49035 MZ |
446 | static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, |
447 | int intid, u64 *lr_val) | |
448 | { | |
449 | unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
450 | int i; | |
451 | ||
452 | for (i = 0; i < used_lrs; i++) { | |
453 | u64 val = __gic_v3_get_lr(i); | |
454 | ||
455 | if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid && | |
456 | (val & ICH_LR_ACTIVE_BIT)) { | |
457 | *lr_val = val; | |
458 | return i; | |
459 | } | |
460 | } | |
461 | ||
462 | *lr_val = ICC_IAR1_EL1_SPURIOUS; | |
463 | return -1; | |
464 | } | |
465 | ||
132a324a MZ |
466 | static int __hyp_text __vgic_v3_get_highest_active_priority(void) |
467 | { | |
468 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
469 | u32 hap = 0; | |
470 | int i; | |
471 | ||
472 | for (i = 0; i < nr_apr_regs; i++) { | |
473 | u32 val; | |
474 | ||
475 | /* | |
476 | * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers | |
477 | * contain the active priority levels for this VCPU | |
478 | * for the maximum number of supported priority | |
479 | * levels, and we return the full priority level only | |
480 | * if the BPR is programmed to its minimum, otherwise | |
481 | * we return a combination of the priority level and | |
482 | * subpriority, as determined by the setting of the | |
483 | * BPR, but without the full subpriority. | |
484 | */ | |
485 | val = __vgic_v3_read_ap0rn(i); | |
486 | val |= __vgic_v3_read_ap1rn(i); | |
487 | if (!val) { | |
488 | hap += 32; | |
489 | continue; | |
490 | } | |
491 | ||
492 | return (hap + __ffs(val)) << __vgic_v3_bpr_min(); | |
493 | } | |
494 | ||
495 | return GICv3_IDLE_PRIORITY; | |
496 | } | |
497 | ||
d70c7b31 MZ |
498 | static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr) |
499 | { | |
500 | return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
501 | } | |
502 | ||
503 | static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr) | |
504 | { | |
505 | unsigned int bpr; | |
506 | ||
507 | if (vmcr & ICH_VMCR_CBPR_MASK) { | |
508 | bpr = __vgic_v3_get_bpr0(vmcr); | |
509 | if (bpr < 7) | |
510 | bpr++; | |
511 | } else { | |
512 | bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
513 | } | |
514 | ||
515 | return bpr; | |
516 | } | |
517 | ||
132a324a MZ |
518 | /* |
519 | * Convert a priority to a preemption level, taking the relevant BPR | |
520 | * into account by zeroing the sub-priority bits. | |
521 | */ | |
522 | static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) | |
523 | { | |
524 | unsigned int bpr; | |
525 | ||
526 | if (!grp) | |
527 | bpr = __vgic_v3_get_bpr0(vmcr) + 1; | |
528 | else | |
529 | bpr = __vgic_v3_get_bpr1(vmcr); | |
530 | ||
531 | return pri & (GENMASK(7, 0) << bpr); | |
532 | } | |
533 | ||
534 | /* | |
535 | * The priority value is independent of any of the BPR values, so we | |
536 | * normalize it using the minumal BPR value. This guarantees that no | |
537 | * matter what the guest does with its BPR, we can always set/get the | |
538 | * same value of a priority. | |
539 | */ | |
540 | static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp) | |
541 | { | |
542 | u8 pre, ap; | |
543 | u32 val; | |
544 | int apr; | |
545 | ||
546 | pre = __vgic_v3_pri_to_pre(pri, vmcr, grp); | |
547 | ap = pre >> __vgic_v3_bpr_min(); | |
548 | apr = ap / 32; | |
549 | ||
550 | if (!grp) { | |
551 | val = __vgic_v3_read_ap0rn(apr); | |
552 | __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr); | |
553 | } else { | |
554 | val = __vgic_v3_read_ap1rn(apr); | |
555 | __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr); | |
556 | } | |
557 | } | |
558 | ||
b6f49035 MZ |
559 | static int __hyp_text __vgic_v3_clear_highest_active_priority(void) |
560 | { | |
561 | u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2)); | |
562 | u32 hap = 0; | |
563 | int i; | |
564 | ||
565 | for (i = 0; i < nr_apr_regs; i++) { | |
566 | u32 ap0, ap1; | |
567 | int c0, c1; | |
568 | ||
569 | ap0 = __vgic_v3_read_ap0rn(i); | |
570 | ap1 = __vgic_v3_read_ap1rn(i); | |
571 | if (!ap0 && !ap1) { | |
572 | hap += 32; | |
573 | continue; | |
574 | } | |
575 | ||
576 | c0 = ap0 ? __ffs(ap0) : 32; | |
577 | c1 = ap1 ? __ffs(ap1) : 32; | |
578 | ||
579 | /* Always clear the LSB, which is the highest priority */ | |
580 | if (c0 < c1) { | |
581 | ap0 &= ~BIT(c0); | |
582 | __vgic_v3_write_ap0rn(ap0, i); | |
583 | hap += c0; | |
584 | } else { | |
585 | ap1 &= ~BIT(c1); | |
586 | __vgic_v3_write_ap1rn(ap1, i); | |
587 | hap += c1; | |
588 | } | |
589 | ||
590 | /* Rescale to 8 bits of priority */ | |
591 | return hap << __vgic_v3_bpr_min(); | |
592 | } | |
593 | ||
594 | return GICv3_IDLE_PRIORITY; | |
595 | } | |
596 | ||
132a324a MZ |
597 | static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
598 | { | |
599 | u64 lr_val; | |
600 | u8 lr_prio, pmr; | |
601 | int lr, grp; | |
602 | ||
603 | grp = __vgic_v3_get_group(vcpu); | |
604 | ||
605 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
606 | if (lr < 0) | |
607 | goto spurious; | |
608 | ||
609 | if (grp != !!(lr_val & ICH_LR_GROUP)) | |
610 | goto spurious; | |
611 | ||
612 | pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
613 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
614 | if (pmr <= lr_prio) | |
615 | goto spurious; | |
616 | ||
617 | if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) | |
618 | goto spurious; | |
619 | ||
620 | lr_val &= ~ICH_LR_STATE; | |
621 | /* No active state for LPIs */ | |
622 | if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) | |
623 | lr_val |= ICH_LR_ACTIVE_BIT; | |
624 | __gic_v3_set_lr(lr_val, lr); | |
625 | __vgic_v3_set_active_priority(lr_prio, vmcr, grp); | |
626 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
627 | return; | |
628 | ||
629 | spurious: | |
630 | vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); | |
631 | } | |
632 | ||
b6f49035 MZ |
633 | static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val) |
634 | { | |
635 | lr_val &= ~ICH_LR_ACTIVE_BIT; | |
636 | if (lr_val & ICH_LR_HW) { | |
637 | u32 pid; | |
638 | ||
639 | pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT; | |
640 | gic_write_dir(pid); | |
641 | } | |
642 | ||
643 | __gic_v3_set_lr(lr_val, lr); | |
644 | } | |
645 | ||
646 | static void __hyp_text __vgic_v3_bump_eoicount(void) | |
647 | { | |
648 | u32 hcr; | |
649 | ||
650 | hcr = read_gicreg(ICH_HCR_EL2); | |
651 | hcr += 1 << ICH_HCR_EOIcount_SHIFT; | |
652 | write_gicreg(hcr, ICH_HCR_EL2); | |
653 | } | |
654 | ||
655 | static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt) | |
656 | { | |
657 | u32 vid = vcpu_get_reg(vcpu, rt); | |
658 | u64 lr_val; | |
659 | u8 lr_prio, act_prio; | |
660 | int lr, grp; | |
661 | ||
662 | grp = __vgic_v3_get_group(vcpu); | |
663 | ||
664 | /* Drop priority in any case */ | |
665 | act_prio = __vgic_v3_clear_highest_active_priority(); | |
666 | ||
667 | /* If EOIing an LPI, no deactivate to be performed */ | |
668 | if (vid >= VGIC_MIN_LPI) | |
669 | return; | |
670 | ||
671 | /* EOImode == 1, nothing to be done here */ | |
672 | if (vmcr & ICH_VMCR_EOIM_MASK) | |
673 | return; | |
674 | ||
675 | lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val); | |
676 | if (lr == -1) { | |
677 | __vgic_v3_bump_eoicount(); | |
678 | return; | |
679 | } | |
680 | ||
681 | lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; | |
682 | ||
683 | /* If priorities or group do not match, the guest has fscked-up. */ | |
684 | if (grp != !!(lr_val & ICH_LR_GROUP) || | |
685 | __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio) | |
686 | return; | |
687 | ||
688 | /* Let's now perform the deactivation */ | |
689 | __vgic_v3_clear_active_lr(lr, lr_val); | |
690 | } | |
691 | ||
fbc48a00 MZ |
692 | static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
693 | { | |
694 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK)); | |
695 | } | |
696 | ||
f8b630bc MZ |
697 | static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
698 | { | |
699 | vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK)); | |
700 | } | |
701 | ||
fbc48a00 MZ |
702 | static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
703 | { | |
704 | u64 val = vcpu_get_reg(vcpu, rt); | |
705 | ||
706 | if (val & 1) | |
707 | vmcr |= ICH_VMCR_ENG0_MASK; | |
708 | else | |
709 | vmcr &= ~ICH_VMCR_ENG0_MASK; | |
710 | ||
711 | __vgic_v3_write_vmcr(vmcr); | |
712 | } | |
713 | ||
f8b630bc MZ |
714 | static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
715 | { | |
716 | u64 val = vcpu_get_reg(vcpu, rt); | |
717 | ||
718 | if (val & 1) | |
719 | vmcr |= ICH_VMCR_ENG1_MASK; | |
720 | else | |
721 | vmcr &= ~ICH_VMCR_ENG1_MASK; | |
722 | ||
723 | __vgic_v3_write_vmcr(vmcr); | |
724 | } | |
725 | ||
423de85a MZ |
726 | static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
727 | { | |
728 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr)); | |
729 | } | |
730 | ||
d70c7b31 MZ |
731 | static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
732 | { | |
733 | vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr)); | |
734 | } | |
735 | ||
423de85a MZ |
736 | static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
737 | { | |
738 | u64 val = vcpu_get_reg(vcpu, rt); | |
739 | u8 bpr_min = __vgic_v3_bpr_min() - 1; | |
740 | ||
741 | /* Enforce BPR limiting */ | |
742 | if (val < bpr_min) | |
743 | val = bpr_min; | |
744 | ||
745 | val <<= ICH_VMCR_BPR0_SHIFT; | |
746 | val &= ICH_VMCR_BPR0_MASK; | |
747 | vmcr &= ~ICH_VMCR_BPR0_MASK; | |
748 | vmcr |= val; | |
749 | ||
750 | __vgic_v3_write_vmcr(vmcr); | |
751 | } | |
752 | ||
d70c7b31 MZ |
753 | static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt) |
754 | { | |
755 | u64 val = vcpu_get_reg(vcpu, rt); | |
756 | u8 bpr_min = __vgic_v3_bpr_min(); | |
757 | ||
758 | if (vmcr & ICH_VMCR_CBPR_MASK) | |
759 | return; | |
760 | ||
761 | /* Enforce BPR limiting */ | |
762 | if (val < bpr_min) | |
763 | val = bpr_min; | |
764 | ||
765 | val <<= ICH_VMCR_BPR1_SHIFT; | |
766 | val &= ICH_VMCR_BPR1_MASK; | |
767 | vmcr &= ~ICH_VMCR_BPR1_MASK; | |
768 | vmcr |= val; | |
769 | ||
770 | __vgic_v3_write_vmcr(vmcr); | |
771 | } | |
772 | ||
f9e7449c MZ |
773 | static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n) |
774 | { | |
775 | u32 val; | |
776 | ||
777 | if (!__vgic_v3_get_group(vcpu)) | |
778 | val = __vgic_v3_read_ap0rn(n); | |
779 | else | |
780 | val = __vgic_v3_read_ap1rn(n); | |
781 | ||
782 | vcpu_set_reg(vcpu, rt, val); | |
783 | } | |
784 | ||
785 | static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n) | |
786 | { | |
787 | u32 val = vcpu_get_reg(vcpu, rt); | |
788 | ||
789 | if (!__vgic_v3_get_group(vcpu)) | |
790 | __vgic_v3_write_ap0rn(val, n); | |
791 | else | |
792 | __vgic_v3_write_ap1rn(val, n); | |
793 | } | |
794 | ||
795 | static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu, | |
796 | u32 vmcr, int rt) | |
797 | { | |
798 | __vgic_v3_read_apxrn(vcpu, rt, 0); | |
799 | } | |
800 | ||
801 | static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu, | |
802 | u32 vmcr, int rt) | |
803 | { | |
804 | __vgic_v3_read_apxrn(vcpu, rt, 1); | |
805 | } | |
806 | ||
807 | static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, | |
808 | u32 vmcr, int rt) | |
809 | { | |
810 | __vgic_v3_read_apxrn(vcpu, rt, 2); | |
811 | } | |
812 | ||
813 | static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, | |
814 | u32 vmcr, int rt) | |
815 | { | |
816 | __vgic_v3_read_apxrn(vcpu, rt, 3); | |
817 | } | |
818 | ||
819 | static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, | |
820 | u32 vmcr, int rt) | |
821 | { | |
822 | __vgic_v3_write_apxrn(vcpu, rt, 0); | |
823 | } | |
824 | ||
825 | static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, | |
826 | u32 vmcr, int rt) | |
827 | { | |
828 | __vgic_v3_write_apxrn(vcpu, rt, 1); | |
829 | } | |
830 | ||
831 | static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, | |
832 | u32 vmcr, int rt) | |
833 | { | |
834 | __vgic_v3_write_apxrn(vcpu, rt, 2); | |
835 | } | |
836 | ||
837 | static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, | |
838 | u32 vmcr, int rt) | |
839 | { | |
840 | __vgic_v3_write_apxrn(vcpu, rt, 3); | |
841 | } | |
842 | ||
2724c11a MZ |
843 | static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, |
844 | u32 vmcr, int rt) | |
845 | { | |
846 | u64 lr_val; | |
847 | int lr, lr_grp, grp; | |
848 | ||
849 | grp = __vgic_v3_get_group(vcpu); | |
850 | ||
851 | lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); | |
852 | if (lr == -1) | |
853 | goto spurious; | |
854 | ||
855 | lr_grp = !!(lr_val & ICH_LR_GROUP); | |
856 | if (lr_grp != grp) | |
857 | lr_val = ICC_IAR1_EL1_SPURIOUS; | |
858 | ||
859 | spurious: | |
860 | vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); | |
861 | } | |
862 | ||
59da1cbf MZ |
863 | int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu) |
864 | { | |
865 | int rt; | |
866 | u32 esr; | |
867 | u32 vmcr; | |
868 | void (*fn)(struct kvm_vcpu *, u32, int); | |
869 | bool is_read; | |
870 | u32 sysreg; | |
871 | ||
872 | esr = kvm_vcpu_get_hsr(vcpu); | |
873 | if (vcpu_mode_is_32bit(vcpu)) { | |
874 | if (!kvm_condition_valid(vcpu)) | |
875 | return 1; | |
876 | ||
877 | sysreg = esr_cp15_to_sysreg(esr); | |
878 | } else { | |
879 | sysreg = esr_sys64_to_sysreg(esr); | |
880 | } | |
881 | ||
882 | is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ; | |
883 | ||
884 | switch (sysreg) { | |
132a324a MZ |
885 | case SYS_ICC_IAR1_EL1: |
886 | fn = __vgic_v3_read_iar; | |
887 | break; | |
b6f49035 MZ |
888 | case SYS_ICC_EOIR1_EL1: |
889 | fn = __vgic_v3_write_eoir; | |
890 | break; | |
f8b630bc MZ |
891 | case SYS_ICC_GRPEN1_EL1: |
892 | if (is_read) | |
893 | fn = __vgic_v3_read_igrpen1; | |
894 | else | |
895 | fn = __vgic_v3_write_igrpen1; | |
896 | break; | |
d70c7b31 MZ |
897 | case SYS_ICC_BPR1_EL1: |
898 | if (is_read) | |
899 | fn = __vgic_v3_read_bpr1; | |
900 | else | |
901 | fn = __vgic_v3_write_bpr1; | |
902 | break; | |
f9e7449c MZ |
903 | case SYS_ICC_AP1Rn_EL1(0): |
904 | if (is_read) | |
905 | fn = __vgic_v3_read_apxr0; | |
906 | else | |
907 | fn = __vgic_v3_write_apxr0; | |
908 | break; | |
909 | case SYS_ICC_AP1Rn_EL1(1): | |
910 | if (is_read) | |
911 | fn = __vgic_v3_read_apxr1; | |
912 | else | |
913 | fn = __vgic_v3_write_apxr1; | |
914 | break; | |
915 | case SYS_ICC_AP1Rn_EL1(2): | |
916 | if (is_read) | |
917 | fn = __vgic_v3_read_apxr2; | |
918 | else | |
919 | fn = __vgic_v3_write_apxr2; | |
920 | break; | |
921 | case SYS_ICC_AP1Rn_EL1(3): | |
922 | if (is_read) | |
923 | fn = __vgic_v3_read_apxr3; | |
924 | else | |
925 | fn = __vgic_v3_write_apxr3; | |
926 | break; | |
2724c11a MZ |
927 | case SYS_ICC_HPPIR1_EL1: |
928 | fn = __vgic_v3_read_hppir; | |
929 | break; | |
fbc48a00 MZ |
930 | case SYS_ICC_GRPEN0_EL1: |
931 | if (is_read) | |
932 | fn = __vgic_v3_read_igrpen0; | |
933 | else | |
934 | fn = __vgic_v3_write_igrpen0; | |
935 | break; | |
423de85a MZ |
936 | case SYS_ICC_BPR0_EL1: |
937 | if (is_read) | |
938 | fn = __vgic_v3_read_bpr0; | |
939 | else | |
940 | fn = __vgic_v3_write_bpr0; | |
941 | break; | |
59da1cbf MZ |
942 | default: |
943 | return 0; | |
944 | } | |
945 | ||
946 | vmcr = __vgic_v3_read_vmcr(); | |
947 | rt = kvm_vcpu_sys_get_rt(vcpu); | |
948 | fn(vcpu, vmcr, rt); | |
949 | ||
950 | return 1; | |
951 | } | |
952 | ||
953 | #endif |