Linux 4.18-rc4
[linux-2.6-block.git] / arch / arm64 / include / asm / kvm_emulate.h
CommitLineData
83a49794
MZ
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
c6d01a94
MR
26
27#include <asm/esr.h>
83a49794 28#include <asm/kvm_arm.h>
00536ec4 29#include <asm/kvm_hyp.h>
83a49794
MZ
30#include <asm/kvm_mmio.h>
31#include <asm/ptrace.h>
4429fc64 32#include <asm/cputype.h>
68908bf7 33#include <asm/virt.h>
83a49794 34
b547631f 35unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
a8928195
CD
36unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
37void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
b547631f 38
27b190bd
MZ
39bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
40void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
41
83a49794 42void kvm_inject_undefined(struct kvm_vcpu *vcpu);
10cf3390 43void kvm_inject_vabt(struct kvm_vcpu *vcpu);
83a49794
MZ
44void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
45void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
74a64a98
MZ
46void kvm_inject_undef32(struct kvm_vcpu *vcpu);
47void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
48void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
83a49794 49
e72341c5
CD
50static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
51{
52 return !(vcpu->arch.hcr_el2 & HCR_RW);
53}
54
b856a591
CD
55static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
56{
57 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
68908bf7
MZ
58 if (is_kernel_in_hyp_mode())
59 vcpu->arch.hcr_el2 |= HCR_E2H;
558daf69
DG
60 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
61 /* route synchronous external abort exceptions to EL2 */
62 vcpu->arch.hcr_el2 |= HCR_TEA;
63 /* trap error record accesses */
64 vcpu->arch.hcr_el2 |= HCR_TERR;
65 }
66
801f6772
MZ
67 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
68 vcpu->arch.hcr_el2 &= ~HCR_RW;
005781be
DM
69
70 /*
71 * TID3: trap feature register accesses that we virtualise.
72 * For now this is conditional, since no AArch32 feature regs
73 * are currently virtualised.
74 */
e72341c5 75 if (!vcpu_el1_is_32bit(vcpu))
005781be 76 vcpu->arch.hcr_el2 |= HCR_TID3;
b856a591
CD
77}
78
3df59d8d 79static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
3c1e7165 80{
3df59d8d 81 return (unsigned long *)&vcpu->arch.hcr_el2;
3c1e7165
MZ
82}
83
4715c14b
JM
84static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
85{
86 vcpu->arch.vsesr_el2 = vsesr;
87}
88
83a49794
MZ
89static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
90{
91 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
92}
93
6d4bd909 94static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
83a49794
MZ
95{
96 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
97}
98
6d4bd909
CD
99static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
100{
101 if (vcpu->arch.sysregs_loaded_on_cpu)
102 return read_sysreg_el1(elr);
103 else
104 return *__vcpu_elr_el1(vcpu);
105}
106
107static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
108{
109 if (vcpu->arch.sysregs_loaded_on_cpu)
110 write_sysreg_el1(v, elr);
111 else
112 *__vcpu_elr_el1(vcpu) = v;
113}
114
83a49794
MZ
115static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
116{
117 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
118}
119
120static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
121{
b547631f 122 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
83a49794
MZ
123}
124
125static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
126{
27b190bd
MZ
127 if (vcpu_mode_is_32bit(vcpu))
128 return kvm_condition_valid32(vcpu);
129
130 return true;
83a49794
MZ
131}
132
133static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
134{
27b190bd
MZ
135 if (vcpu_mode_is_32bit(vcpu))
136 kvm_skip_instr32(vcpu, is_wide_instr);
137 else
138 *vcpu_pc(vcpu) += 4;
83a49794
MZ
139}
140
141static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
142{
b547631f 143 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
83a49794
MZ
144}
145
c0f09634 146/*
f6be563a
PF
147 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
148 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
149 * AArch32 with banked registers.
c0f09634 150 */
bc45a516
PF
151static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
152 u8 reg_num)
153{
154 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
155}
156
157static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
158 unsigned long val)
159{
160 if (reg_num != 31)
161 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
162}
163
00536ec4 164static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
83a49794 165{
a8928195
CD
166 if (vcpu_mode_is_32bit(vcpu))
167 return vcpu_read_spsr32(vcpu);
00536ec4
CD
168
169 if (vcpu->arch.sysregs_loaded_on_cpu)
170 return read_sysreg_el1(spsr);
171 else
a8928195 172 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
00536ec4 173}
b547631f 174
a8928195 175static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
00536ec4 176{
00536ec4 177 if (vcpu_mode_is_32bit(vcpu)) {
a8928195
CD
178 vcpu_write_spsr32(vcpu, v);
179 return;
00536ec4
CD
180 }
181
182 if (vcpu->arch.sysregs_loaded_on_cpu)
183 write_sysreg_el1(v, spsr);
184 else
a8928195 185 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
83a49794
MZ
186}
187
188static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
189{
9586a2ea 190 u32 mode;
83a49794 191
9586a2ea
SZ
192 if (vcpu_mode_is_32bit(vcpu)) {
193 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
b547631f 194 return mode > COMPAT_PSR_MODE_USR;
9586a2ea
SZ
195 }
196
197 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
b547631f 198
83a49794
MZ
199 return mode != PSR_MODE_EL0t;
200}
201
202static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
203{
204 return vcpu->arch.fault.esr_el2;
205}
206
3e51d435
MZ
207static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
208{
209 u32 esr = kvm_vcpu_get_hsr(vcpu);
210
211 if (esr & ESR_ELx_CV)
212 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
213
214 return -1;
215}
216
83a49794
MZ
217static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
218{
219 return vcpu->arch.fault.far_el2;
220}
221
222static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
223{
224 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
225}
226
0067df41
JM
227static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
228{
229 return vcpu->arch.fault.disr_el1;
230}
231
0d97f884
WH
232static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
233{
1c6007d5 234 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
0d97f884
WH
235}
236
83a49794
MZ
237static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
238{
c6d01a94 239 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
83a49794
MZ
240}
241
83a49794
MZ
242static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
243{
c6d01a94 244 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
83a49794
MZ
245}
246
247static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
248{
c6d01a94 249 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
83a49794
MZ
250}
251
83a49794
MZ
252static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
253{
c6d01a94 254 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
83a49794
MZ
255}
256
60e21a0e
WD
257static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
258{
259 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
260 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
261}
262
57c841f1
MZ
263static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
264{
265 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
266}
267
83a49794
MZ
268static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
269{
c6d01a94 270 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
83a49794
MZ
271}
272
273/* This one is not specific to Data Abort */
274static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
275{
c6d01a94 276 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
83a49794
MZ
277}
278
279static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
280{
561454e2 281 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
83a49794
MZ
282}
283
284static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
285{
c6d01a94 286 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
83a49794
MZ
287}
288
289static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
0496daa5 290{
c6d01a94 291 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
0496daa5
CD
292}
293
294static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
83a49794 295{
c6d01a94 296 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
83a49794
MZ
297}
298
bb428921
JM
299static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
300{
a2b83133 301 switch (kvm_vcpu_trap_get_fault(vcpu)) {
bb428921
JM
302 case FSC_SEA:
303 case FSC_SEA_TTW0:
304 case FSC_SEA_TTW1:
305 case FSC_SEA_TTW2:
306 case FSC_SEA_TTW3:
307 case FSC_SECC:
308 case FSC_SECC_TTW0:
309 case FSC_SECC_TTW1:
310 case FSC_SECC_TTW2:
311 case FSC_SECC_TTW3:
312 return true;
313 default:
314 return false;
315 }
316}
317
c667186f
MZ
318static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
319{
320 u32 esr = kvm_vcpu_get_hsr(vcpu);
321 return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
322}
323
4429fc64 324static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
79c64880 325{
8d404c4c 326 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
79c64880
MZ
327}
328
ce94fe93
MZ
329static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
330{
8d404c4c 331 if (vcpu_mode_is_32bit(vcpu)) {
ce94fe93 332 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
8d404c4c
CD
333 } else {
334 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
335 sctlr |= (1 << 25);
1975fa56 336 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
8d404c4c 337 }
ce94fe93
MZ
338}
339
6d89d2d9
MZ
340static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
341{
342 if (vcpu_mode_is_32bit(vcpu))
343 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
344
8d404c4c 345 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
6d89d2d9
MZ
346}
347
348static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
349 unsigned long data,
350 unsigned int len)
351{
352 if (kvm_vcpu_is_be(vcpu)) {
353 switch (len) {
354 case 1:
355 return data & 0xff;
356 case 2:
357 return be16_to_cpu(data & 0xffff);
358 case 4:
359 return be32_to_cpu(data & 0xffffffff);
360 default:
361 return be64_to_cpu(data);
362 }
b3007086
VK
363 } else {
364 switch (len) {
365 case 1:
366 return data & 0xff;
367 case 2:
368 return le16_to_cpu(data & 0xffff);
369 case 4:
370 return le32_to_cpu(data & 0xffffffff);
371 default:
372 return le64_to_cpu(data);
373 }
6d89d2d9
MZ
374 }
375
376 return data; /* Leave LE untouched */
377}
378
379static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
380 unsigned long data,
381 unsigned int len)
382{
383 if (kvm_vcpu_is_be(vcpu)) {
384 switch (len) {
385 case 1:
386 return data & 0xff;
387 case 2:
388 return cpu_to_be16(data & 0xffff);
389 case 4:
390 return cpu_to_be32(data & 0xffffffff);
391 default:
392 return cpu_to_be64(data);
393 }
b3007086
VK
394 } else {
395 switch (len) {
396 case 1:
397 return data & 0xff;
398 case 2:
399 return cpu_to_le16(data & 0xffff);
400 case 4:
401 return cpu_to_le32(data & 0xffffffff);
402 default:
403 return cpu_to_le64(data);
404 }
6d89d2d9
MZ
405 }
406
407 return data; /* Leave LE untouched */
408}
409
83a49794 410#endif /* __ARM64_KVM_EMULATE_H__ */