Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
18 | ||
19 | #ifndef __ARM_KVM_EMULATE_H__ | |
20 | #define __ARM_KVM_EMULATE_H__ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
23 | #include <asm/kvm_asm.h> | |
45e96ea6 | 24 | #include <asm/kvm_mmio.h> |
7393b599 | 25 | #include <asm/kvm_arm.h> |
4429fc64 | 26 | #include <asm/cputype.h> |
749cf76c | 27 | |
74a64a98 | 28 | /* arm64 compatibility macros */ |
256c0960 MR |
29 | #define PSR_AA32_MODE_ABT ABT_MODE |
30 | #define PSR_AA32_MODE_UND UND_MODE | |
31 | #define PSR_AA32_T_BIT PSR_T_BIT | |
32 | #define PSR_AA32_I_BIT PSR_I_BIT | |
33 | #define PSR_AA32_A_BIT PSR_A_BIT | |
34 | #define PSR_AA32_E_BIT PSR_E_BIT | |
35 | #define PSR_AA32_IT_MASK PSR_IT_MASK | |
74a64a98 | 36 | |
db730d8d | 37 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
74a64a98 MZ |
38 | |
39 | static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num) | |
40 | { | |
41 | return vcpu_reg(vcpu, reg_num); | |
42 | } | |
43 | ||
00536ec4 CD |
44 | unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu); |
45 | ||
46 | static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu) | |
47 | { | |
48 | return *__vcpu_spsr(vcpu); | |
49 | } | |
50 | ||
51 | static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) | |
52 | { | |
53 | *__vcpu_spsr(vcpu) = v; | |
54 | } | |
749cf76c | 55 | |
bc45a516 PF |
56 | static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, |
57 | u8 reg_num) | |
58 | { | |
59 | return *vcpu_reg(vcpu, reg_num); | |
60 | } | |
61 | ||
62 | static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |
63 | unsigned long val) | |
64 | { | |
65 | *vcpu_reg(vcpu, reg_num) = val; | |
66 | } | |
67 | ||
3aedd5c4 MZ |
68 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
69 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | |
74a64a98 MZ |
70 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); |
71 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | |
72 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | |
bfb78b5c | 73 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
74a64a98 MZ |
74 | |
75 | static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu) | |
76 | { | |
77 | kvm_inject_undef32(vcpu); | |
78 | } | |
79 | ||
80 | static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |
81 | { | |
82 | kvm_inject_dabt32(vcpu, addr); | |
83 | } | |
84 | ||
85 | static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |
86 | { | |
87 | kvm_inject_pabt32(vcpu, addr); | |
88 | } | |
5b3e5e5b | 89 | |
3aedd5c4 MZ |
90 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
91 | { | |
92 | return kvm_condition_valid32(vcpu); | |
93 | } | |
94 | ||
95 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | |
96 | { | |
97 | kvm_skip_instr32(vcpu, is_wide_instr); | |
98 | } | |
99 | ||
b856a591 CD |
100 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
101 | { | |
102 | vcpu->arch.hcr = HCR_GUEST_MASK; | |
103 | } | |
104 | ||
3df59d8d | 105 | static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu) |
3c1e7165 | 106 | { |
3df59d8d | 107 | return (unsigned long *)&vcpu->arch.hcr; |
3c1e7165 MZ |
108 | } |
109 | ||
de737089 MZ |
110 | static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) |
111 | { | |
112 | vcpu->arch.hcr &= ~HCR_TWE; | |
113 | } | |
114 | ||
115 | static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) | |
116 | { | |
117 | vcpu->arch.hcr |= HCR_TWE; | |
118 | } | |
119 | ||
3aedd5c4 | 120 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
aa024c2f | 121 | { |
062a585e | 122 | return true; |
aa024c2f MZ |
123 | } |
124 | ||
db730d8d | 125 | static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) |
749cf76c | 126 | { |
c2a8dab5 | 127 | return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; |
749cf76c CD |
128 | } |
129 | ||
3aedd5c4 | 130 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
749cf76c | 131 | { |
3aedd5c4 | 132 | return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; |
749cf76c CD |
133 | } |
134 | ||
aa024c2f MZ |
135 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
136 | { | |
137 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | |
138 | } | |
139 | ||
749cf76c CD |
140 | static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) |
141 | { | |
c2a8dab5 | 142 | unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; |
749cf76c CD |
143 | return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); |
144 | } | |
145 | ||
146 | static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | |
147 | { | |
c2a8dab5 | 148 | unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; |
b276f1b3 | 149 | return cpsr_mode > USR_MODE; |
749cf76c CD |
150 | } |
151 | ||
3aedd5c4 | 152 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) |
7393b599 MZ |
153 | { |
154 | return vcpu->arch.fault.hsr; | |
155 | } | |
156 | ||
3aedd5c4 MZ |
157 | static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) |
158 | { | |
159 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | |
160 | ||
161 | if (hsr & HSR_CV) | |
162 | return (hsr & HSR_COND) >> HSR_COND_SHIFT; | |
163 | ||
164 | return -1; | |
165 | } | |
166 | ||
7393b599 MZ |
167 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) |
168 | { | |
169 | return vcpu->arch.fault.hxfar; | |
170 | } | |
171 | ||
172 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) | |
173 | { | |
174 | return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; | |
175 | } | |
176 | ||
4a1df28a MZ |
177 | static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) |
178 | { | |
179 | return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; | |
180 | } | |
181 | ||
023cc964 MZ |
182 | static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) |
183 | { | |
184 | return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; | |
185 | } | |
186 | ||
7c511b88 MZ |
187 | static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) |
188 | { | |
189 | return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; | |
190 | } | |
191 | ||
d0adf747 MZ |
192 | static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) |
193 | { | |
194 | return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | |
195 | } | |
196 | ||
b37670b0 MZ |
197 | static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) |
198 | { | |
199 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; | |
200 | } | |
201 | ||
57c841f1 MZ |
202 | static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) |
203 | { | |
204 | return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); | |
205 | } | |
206 | ||
a7123377 MZ |
207 | /* Get Access Size from a data abort */ |
208 | static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) | |
209 | { | |
210 | switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { | |
211 | case 0: | |
212 | return 1; | |
213 | case 1: | |
214 | return 2; | |
215 | case 2: | |
216 | return 4; | |
217 | default: | |
218 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | |
219 | return -EFAULT; | |
220 | } | |
221 | } | |
222 | ||
23b415d6 MZ |
223 | /* This one is not specific to Data Abort */ |
224 | static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) | |
225 | { | |
226 | return kvm_vcpu_get_hsr(vcpu) & HSR_IL; | |
227 | } | |
228 | ||
4926d445 MZ |
229 | static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) |
230 | { | |
231 | return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; | |
232 | } | |
233 | ||
52d1dba9 MZ |
234 | static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) |
235 | { | |
236 | return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; | |
237 | } | |
238 | ||
1cc287dd | 239 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) |
0496daa5 CD |
240 | { |
241 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; | |
242 | } | |
243 | ||
244 | static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) | |
1cc287dd MZ |
245 | { |
246 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; | |
247 | } | |
248 | ||
bb428921 JM |
249 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) |
250 | { | |
a2b83133 | 251 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
bb428921 JM |
252 | case FSC_SEA: |
253 | case FSC_SEA_TTW0: | |
254 | case FSC_SEA_TTW1: | |
255 | case FSC_SEA_TTW2: | |
256 | case FSC_SEA_TTW3: | |
257 | case FSC_SECC: | |
258 | case FSC_SECC_TTW0: | |
259 | case FSC_SECC_TTW1: | |
260 | case FSC_SECC_TTW2: | |
261 | case FSC_SECC_TTW3: | |
262 | return true; | |
263 | default: | |
264 | return false; | |
265 | } | |
266 | } | |
267 | ||
64cf98fa CD |
268 | static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
269 | { | |
270 | if (kvm_vcpu_trap_is_iabt(vcpu)) | |
271 | return false; | |
272 | ||
273 | return kvm_vcpu_dabt_iswrite(vcpu); | |
274 | } | |
275 | ||
c088f8f0 CD |
276 | static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) |
277 | { | |
278 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | |
279 | } | |
280 | ||
4429fc64 | 281 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
79c64880 | 282 | { |
fb32a52a | 283 | return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; |
79c64880 MZ |
284 | } |
285 | ||
ce94fe93 MZ |
286 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
287 | { | |
288 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | |
289 | } | |
290 | ||
6d89d2d9 MZ |
291 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
292 | { | |
293 | return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); | |
294 | } | |
295 | ||
296 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | |
297 | unsigned long data, | |
298 | unsigned int len) | |
299 | { | |
300 | if (kvm_vcpu_is_be(vcpu)) { | |
301 | switch (len) { | |
302 | case 1: | |
303 | return data & 0xff; | |
304 | case 2: | |
305 | return be16_to_cpu(data & 0xffff); | |
306 | default: | |
307 | return be32_to_cpu(data); | |
308 | } | |
27f194fd VK |
309 | } else { |
310 | switch (len) { | |
311 | case 1: | |
312 | return data & 0xff; | |
313 | case 2: | |
314 | return le16_to_cpu(data & 0xffff); | |
315 | default: | |
316 | return le32_to_cpu(data); | |
317 | } | |
6d89d2d9 | 318 | } |
6d89d2d9 MZ |
319 | } |
320 | ||
321 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | |
322 | unsigned long data, | |
323 | unsigned int len) | |
324 | { | |
325 | if (kvm_vcpu_is_be(vcpu)) { | |
326 | switch (len) { | |
327 | case 1: | |
328 | return data & 0xff; | |
329 | case 2: | |
330 | return cpu_to_be16(data & 0xffff); | |
331 | default: | |
332 | return cpu_to_be32(data); | |
333 | } | |
27f194fd VK |
334 | } else { |
335 | switch (len) { | |
336 | case 1: | |
337 | return data & 0xff; | |
338 | case 2: | |
339 | return cpu_to_le16(data & 0xffff); | |
340 | default: | |
341 | return cpu_to_le32(data); | |
342 | } | |
6d89d2d9 | 343 | } |
6d89d2d9 MZ |
344 | } |
345 | ||
384b40ca MR |
346 | static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} |
347 | ||
749cf76c | 348 | #endif /* __ARM_KVM_EMULATE_H__ */ |