Commit | Line | Data |
---|---|---|
83a49794 MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/include/kvm_emulate.h | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #ifndef __ARM64_KVM_EMULATE_H__ | |
23 | #define __ARM64_KVM_EMULATE_H__ | |
24 | ||
25 | #include <linux/kvm_host.h> | |
c6d01a94 MR |
26 | |
27 | #include <asm/esr.h> | |
83a49794 | 28 | #include <asm/kvm_arm.h> |
00536ec4 | 29 | #include <asm/kvm_hyp.h> |
83a49794 MZ |
30 | #include <asm/kvm_mmio.h> |
31 | #include <asm/ptrace.h> | |
4429fc64 | 32 | #include <asm/cputype.h> |
68908bf7 | 33 | #include <asm/virt.h> |
83a49794 | 34 | |
b547631f | 35 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); |
a8928195 CD |
36 | unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); |
37 | void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); | |
b547631f | 38 | |
27b190bd MZ |
39 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
40 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | |
41 | ||
83a49794 | 42 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
10cf3390 | 43 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
83a49794 MZ |
44 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
45 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | |
74a64a98 MZ |
46 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); |
47 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | |
48 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | |
83a49794 | 49 | |
e72341c5 CD |
50 | static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) |
51 | { | |
52 | return !(vcpu->arch.hcr_el2 & HCR_RW); | |
53 | } | |
54 | ||
b856a591 CD |
55 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
56 | { | |
57 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | |
68908bf7 MZ |
58 | if (is_kernel_in_hyp_mode()) |
59 | vcpu->arch.hcr_el2 |= HCR_E2H; | |
558daf69 DG |
60 | if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { |
61 | /* route synchronous external abort exceptions to EL2 */ | |
62 | vcpu->arch.hcr_el2 |= HCR_TEA; | |
63 | /* trap error record accesses */ | |
64 | vcpu->arch.hcr_el2 |= HCR_TERR; | |
65 | } | |
e48d53a9 MZ |
66 | if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) |
67 | vcpu->arch.hcr_el2 |= HCR_FWB; | |
558daf69 | 68 | |
801f6772 MZ |
69 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) |
70 | vcpu->arch.hcr_el2 &= ~HCR_RW; | |
005781be DM |
71 | |
72 | /* | |
73 | * TID3: trap feature register accesses that we virtualise. | |
74 | * For now this is conditional, since no AArch32 feature regs | |
75 | * are currently virtualised. | |
76 | */ | |
e72341c5 | 77 | if (!vcpu_el1_is_32bit(vcpu)) |
005781be | 78 | vcpu->arch.hcr_el2 |= HCR_TID3; |
b856a591 CD |
79 | } |
80 | ||
3df59d8d | 81 | static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) |
3c1e7165 | 82 | { |
3df59d8d | 83 | return (unsigned long *)&vcpu->arch.hcr_el2; |
3c1e7165 MZ |
84 | } |
85 | ||
de737089 MZ |
86 | static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) |
87 | { | |
88 | vcpu->arch.hcr_el2 &= ~HCR_TWE; | |
89 | } | |
90 | ||
91 | static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) | |
92 | { | |
93 | vcpu->arch.hcr_el2 |= HCR_TWE; | |
94 | } | |
95 | ||
b7b27fac DG |
96 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) |
97 | { | |
98 | return vcpu->arch.vsesr_el2; | |
99 | } | |
100 | ||
4715c14b JM |
101 | static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) |
102 | { | |
103 | vcpu->arch.vsesr_el2 = vsesr; | |
104 | } | |
105 | ||
83a49794 MZ |
106 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
107 | { | |
108 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | |
109 | } | |
110 | ||
6d4bd909 | 111 | static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) |
83a49794 MZ |
112 | { |
113 | return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; | |
114 | } | |
115 | ||
6d4bd909 CD |
116 | static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) |
117 | { | |
118 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
119 | return read_sysreg_el1(elr); | |
120 | else | |
121 | return *__vcpu_elr_el1(vcpu); | |
122 | } | |
123 | ||
124 | static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) | |
125 | { | |
126 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
127 | write_sysreg_el1(v, elr); | |
128 | else | |
129 | *__vcpu_elr_el1(vcpu) = v; | |
130 | } | |
131 | ||
83a49794 MZ |
132 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
133 | { | |
134 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; | |
135 | } | |
136 | ||
137 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) | |
138 | { | |
b547631f | 139 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); |
83a49794 MZ |
140 | } |
141 | ||
142 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | |
143 | { | |
27b190bd MZ |
144 | if (vcpu_mode_is_32bit(vcpu)) |
145 | return kvm_condition_valid32(vcpu); | |
146 | ||
147 | return true; | |
83a49794 MZ |
148 | } |
149 | ||
150 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | |
151 | { | |
27b190bd MZ |
152 | if (vcpu_mode_is_32bit(vcpu)) |
153 | kvm_skip_instr32(vcpu, is_wide_instr); | |
154 | else | |
155 | *vcpu_pc(vcpu) += 4; | |
83a49794 MZ |
156 | } |
157 | ||
158 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | |
159 | { | |
256c0960 | 160 | *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; |
83a49794 MZ |
161 | } |
162 | ||
c0f09634 | 163 | /* |
f6be563a PF |
164 | * vcpu_get_reg and vcpu_set_reg should always be passed a register number |
165 | * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on | |
166 | * AArch32 with banked registers. | |
c0f09634 | 167 | */ |
bc45a516 PF |
168 | static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, |
169 | u8 reg_num) | |
170 | { | |
171 | return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | |
172 | } | |
173 | ||
174 | static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |
175 | unsigned long val) | |
176 | { | |
177 | if (reg_num != 31) | |
178 | vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; | |
179 | } | |
180 | ||
00536ec4 | 181 | static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) |
83a49794 | 182 | { |
a8928195 CD |
183 | if (vcpu_mode_is_32bit(vcpu)) |
184 | return vcpu_read_spsr32(vcpu); | |
00536ec4 CD |
185 | |
186 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
187 | return read_sysreg_el1(spsr); | |
188 | else | |
a8928195 | 189 | return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; |
00536ec4 | 190 | } |
b547631f | 191 | |
a8928195 | 192 | static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) |
00536ec4 | 193 | { |
00536ec4 | 194 | if (vcpu_mode_is_32bit(vcpu)) { |
a8928195 CD |
195 | vcpu_write_spsr32(vcpu, v); |
196 | return; | |
00536ec4 CD |
197 | } |
198 | ||
199 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
200 | write_sysreg_el1(v, spsr); | |
201 | else | |
a8928195 | 202 | vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; |
83a49794 MZ |
203 | } |
204 | ||
205 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) | |
206 | { | |
9586a2ea | 207 | u32 mode; |
83a49794 | 208 | |
9586a2ea | 209 | if (vcpu_mode_is_32bit(vcpu)) { |
256c0960 MR |
210 | mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; |
211 | return mode > PSR_AA32_MODE_USR; | |
9586a2ea SZ |
212 | } |
213 | ||
214 | mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; | |
b547631f | 215 | |
83a49794 MZ |
216 | return mode != PSR_MODE_EL0t; |
217 | } | |
218 | ||
219 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) | |
220 | { | |
221 | return vcpu->arch.fault.esr_el2; | |
222 | } | |
223 | ||
3e51d435 MZ |
224 | static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) |
225 | { | |
226 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
227 | ||
228 | if (esr & ESR_ELx_CV) | |
229 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; | |
230 | ||
231 | return -1; | |
232 | } | |
233 | ||
83a49794 MZ |
234 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
235 | { | |
236 | return vcpu->arch.fault.far_el2; | |
237 | } | |
238 | ||
239 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | |
240 | { | |
241 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; | |
242 | } | |
243 | ||
0067df41 JM |
244 | static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) |
245 | { | |
246 | return vcpu->arch.fault.disr_el1; | |
247 | } | |
248 | ||
0d97f884 WH |
249 | static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) |
250 | { | |
1c6007d5 | 251 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; |
0d97f884 WH |
252 | } |
253 | ||
83a49794 MZ |
254 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
255 | { | |
c6d01a94 | 256 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
83a49794 MZ |
257 | } |
258 | ||
83a49794 MZ |
259 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
260 | { | |
c6d01a94 | 261 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
83a49794 MZ |
262 | } |
263 | ||
264 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | |
265 | { | |
c6d01a94 | 266 | return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
83a49794 MZ |
267 | } |
268 | ||
83a49794 MZ |
269 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
270 | { | |
c6d01a94 | 271 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
83a49794 MZ |
272 | } |
273 | ||
60e21a0e WD |
274 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
275 | { | |
276 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || | |
277 | kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ | |
278 | } | |
279 | ||
57c841f1 MZ |
280 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
281 | { | |
282 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); | |
283 | } | |
284 | ||
83a49794 MZ |
285 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
286 | { | |
c6d01a94 | 287 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
83a49794 MZ |
288 | } |
289 | ||
290 | /* This one is not specific to Data Abort */ | |
291 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | |
292 | { | |
c6d01a94 | 293 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); |
83a49794 MZ |
294 | } |
295 | ||
296 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | |
297 | { | |
561454e2 | 298 | return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); |
83a49794 MZ |
299 | } |
300 | ||
301 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | |
302 | { | |
c6d01a94 | 303 | return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
83a49794 MZ |
304 | } |
305 | ||
306 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | |
0496daa5 | 307 | { |
c6d01a94 | 308 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
0496daa5 CD |
309 | } |
310 | ||
311 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | |
83a49794 | 312 | { |
c6d01a94 | 313 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; |
83a49794 MZ |
314 | } |
315 | ||
bb428921 JM |
316 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
317 | { | |
a2b83133 | 318 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
bb428921 JM |
319 | case FSC_SEA: |
320 | case FSC_SEA_TTW0: | |
321 | case FSC_SEA_TTW1: | |
322 | case FSC_SEA_TTW2: | |
323 | case FSC_SEA_TTW3: | |
324 | case FSC_SECC: | |
325 | case FSC_SECC_TTW0: | |
326 | case FSC_SECC_TTW1: | |
327 | case FSC_SECC_TTW2: | |
328 | case FSC_SECC_TTW3: | |
329 | return true; | |
330 | default: | |
331 | return false; | |
332 | } | |
333 | } | |
334 | ||
c667186f MZ |
335 | static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) |
336 | { | |
337 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
338 | return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | |
339 | } | |
340 | ||
4429fc64 | 341 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
79c64880 | 342 | { |
8d404c4c | 343 | return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; |
79c64880 MZ |
344 | } |
345 | ||
ce94fe93 MZ |
346 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
347 | { | |
8d404c4c | 348 | if (vcpu_mode_is_32bit(vcpu)) { |
256c0960 | 349 | *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; |
8d404c4c CD |
350 | } else { |
351 | u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); | |
352 | sctlr |= (1 << 25); | |
1975fa56 | 353 | vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); |
8d404c4c | 354 | } |
ce94fe93 MZ |
355 | } |
356 | ||
6d89d2d9 MZ |
357 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
358 | { | |
359 | if (vcpu_mode_is_32bit(vcpu)) | |
256c0960 | 360 | return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); |
6d89d2d9 | 361 | |
8d404c4c | 362 | return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); |
6d89d2d9 MZ |
363 | } |
364 | ||
365 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | |
366 | unsigned long data, | |
367 | unsigned int len) | |
368 | { | |
369 | if (kvm_vcpu_is_be(vcpu)) { | |
370 | switch (len) { | |
371 | case 1: | |
372 | return data & 0xff; | |
373 | case 2: | |
374 | return be16_to_cpu(data & 0xffff); | |
375 | case 4: | |
376 | return be32_to_cpu(data & 0xffffffff); | |
377 | default: | |
378 | return be64_to_cpu(data); | |
379 | } | |
b3007086 VK |
380 | } else { |
381 | switch (len) { | |
382 | case 1: | |
383 | return data & 0xff; | |
384 | case 2: | |
385 | return le16_to_cpu(data & 0xffff); | |
386 | case 4: | |
387 | return le32_to_cpu(data & 0xffffffff); | |
388 | default: | |
389 | return le64_to_cpu(data); | |
390 | } | |
6d89d2d9 MZ |
391 | } |
392 | ||
393 | return data; /* Leave LE untouched */ | |
394 | } | |
395 | ||
396 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | |
397 | unsigned long data, | |
398 | unsigned int len) | |
399 | { | |
400 | if (kvm_vcpu_is_be(vcpu)) { | |
401 | switch (len) { | |
402 | case 1: | |
403 | return data & 0xff; | |
404 | case 2: | |
405 | return cpu_to_be16(data & 0xffff); | |
406 | case 4: | |
407 | return cpu_to_be32(data & 0xffffffff); | |
408 | default: | |
409 | return cpu_to_be64(data); | |
410 | } | |
b3007086 VK |
411 | } else { |
412 | switch (len) { | |
413 | case 1: | |
414 | return data & 0xff; | |
415 | case 2: | |
416 | return cpu_to_le16(data & 0xffff); | |
417 | case 4: | |
418 | return cpu_to_le32(data & 0xffffffff); | |
419 | default: | |
420 | return cpu_to_le64(data); | |
421 | } | |
6d89d2d9 MZ |
422 | } |
423 | ||
424 | return data; /* Leave LE untouched */ | |
425 | } | |
426 | ||
83a49794 | 427 | #endif /* __ARM64_KVM_EMULATE_H__ */ |