Commit | Line | Data |
---|---|---|
bafc6c2a MZ |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/linkage.h> | |
20 | #include <asm/kvm_arm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | ||
23 | .arch_extension virt | |
24 | ||
25 | .text | |
26 | .pushsection .hyp.text, "ax" | |
27 | ||
28 | .macro load_vcpu reg | |
29 | mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR | |
30 | .endm | |
31 | ||
32 | /******************************************************************** | |
33 | * Hypervisor exception vector and handlers | |
34 | * | |
35 | * | |
36 | * The KVM/ARM Hypervisor ABI is defined as follows: | |
37 | * | |
38 | * Entry to Hyp mode from the host kernel will happen _only_ when an HVC | |
39 | * instruction is issued since all traps are disabled when running the host | |
40 | * kernel as per the Hyp-mode initialization at boot time. | |
41 | * | |
42 | * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc | |
43 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the | |
44 | * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC | |
45 | * instructions are called from within Hyp-mode. | |
46 | * | |
47 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): | |
48 | * Switching to Hyp mode is done through a simple HVC #0 instruction. The | |
49 | * exception vector code will check that the HVC comes from VMID==0. | |
50 | * - r0 contains a pointer to a HYP function | |
51 | * - r1, r2, and r3 contain arguments to the above function. | |
52 | * - The HYP function will be called with its arguments in r0, r1 and r2. | |
53 | * On HYP function return, we return directly to SVC. | |
54 | * | |
55 | * Note that the above is used to execute code in Hyp-mode from a host-kernel | |
56 | * point of view, and is a different concept from performing a world-switch and | |
57 | * executing guest code SVC mode (with a VMID != 0). | |
58 | */ | |
59 | ||
60 | .align 5 | |
bafc6c2a | 61 | __kvm_hyp_vector: |
fa85e25d | 62 | .global __kvm_hyp_vector |
bafc6c2a MZ |
63 | |
64 | @ Hyp-mode exception vector | |
65 | W(b) hyp_reset | |
66 | W(b) hyp_undef | |
67 | W(b) hyp_svc | |
68 | W(b) hyp_pabt | |
69 | W(b) hyp_dabt | |
70 | W(b) hyp_hvc | |
71 | W(b) hyp_irq | |
72 | W(b) hyp_fiq | |
73 | ||
3f7e8e2e MZ |
74 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
75 | .align 5 | |
76 | __kvm_hyp_vector_bp_inv: | |
77 | .global __kvm_hyp_vector_bp_inv | |
78 | ||
79 | /* | |
80 | * We encode the exception entry in the bottom 3 bits of | |
81 | * SP, and we have to guarantee to be 8 bytes aligned. | |
82 | */ | |
83 | W(add) sp, sp, #1 /* Reset 7 */ | |
84 | W(add) sp, sp, #1 /* Undef 6 */ | |
85 | W(add) sp, sp, #1 /* Syscall 5 */ | |
86 | W(add) sp, sp, #1 /* Prefetch abort 4 */ | |
87 | W(add) sp, sp, #1 /* Data abort 3 */ | |
88 | W(add) sp, sp, #1 /* HVC 2 */ | |
89 | W(add) sp, sp, #1 /* IRQ 1 */ | |
90 | W(nop) /* FIQ 0 */ | |
91 | ||
92 | mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ | |
93 | isb | |
94 | ||
95 | #ifdef CONFIG_THUMB2_KERNEL | |
96 | /* | |
97 | * Yet another silly hack: Use VPIDR as a temp register. | |
98 | * Thumb2 is really a pain, as SP cannot be used with most | |
99 | * of the bitwise instructions. The vect_br macro ensures | |
100 | * things gets cleaned-up. | |
101 | */ | |
102 | mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ | |
103 | mov r0, sp | |
104 | and r0, r0, #7 | |
105 | sub sp, sp, r0 | |
106 | push {r1, r2} | |
107 | mov r1, r0 | |
108 | mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ | |
109 | mrc p15, 0, r2, c0, c0, 0 /* MIDR */ | |
110 | mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ | |
111 | #endif | |
112 | ||
113 | .macro vect_br val, targ | |
114 | ARM( eor sp, sp, #\val ) | |
115 | ARM( tst sp, #7 ) | |
116 | ARM( eorne sp, sp, #\val ) | |
117 | ||
118 | THUMB( cmp r1, #\val ) | |
119 | THUMB( popeq {r1, r2} ) | |
120 | ||
121 | beq \targ | |
122 | .endm | |
123 | ||
124 | vect_br 0, hyp_fiq | |
125 | vect_br 1, hyp_irq | |
126 | vect_br 2, hyp_hvc | |
127 | vect_br 3, hyp_dabt | |
128 | vect_br 4, hyp_pabt | |
129 | vect_br 5, hyp_svc | |
130 | vect_br 6, hyp_undef | |
131 | vect_br 7, hyp_reset | |
132 | #endif | |
133 | ||
bafc6c2a MZ |
134 | .macro invalid_vector label, cause |
135 | .align | |
c36b6db5 MZ |
136 | \label: mov r0, #\cause |
137 | b __hyp_panic | |
bafc6c2a MZ |
138 | .endm |
139 | ||
c36b6db5 MZ |
140 | invalid_vector hyp_reset ARM_EXCEPTION_RESET |
141 | invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED | |
142 | invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE | |
143 | invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT | |
c36b6db5 MZ |
144 | invalid_vector hyp_fiq ARM_EXCEPTION_FIQ |
145 | ||
146 | ENTRY(__hyp_do_panic) | |
147 | mrs lr, cpsr | |
148 | bic lr, lr, #MODE_MASK | |
149 | orr lr, lr, #SVC_MODE | |
150 | THUMB( orr lr, lr, #PSR_T_BIT ) | |
151 | msr spsr_cxsf, lr | |
152 | ldr lr, =panic | |
153 | msr ELR_hyp, lr | |
154 | ldr lr, =kvm_call_hyp | |
155 | clrex | |
156 | eret | |
157 | ENDPROC(__hyp_do_panic) | |
bafc6c2a MZ |
158 | |
159 | hyp_hvc: | |
160 | /* | |
161 | * Getting here is either because of a trap from a guest, | |
162 | * or from executing HVC from the host kernel, which means | |
163 | * "do something in Hyp mode". | |
164 | */ | |
165 | push {r0, r1, r2} | |
166 | ||
167 | @ Check syndrome register | |
168 | mrc p15, 4, r1, c5, c2, 0 @ HSR | |
169 | lsr r0, r1, #HSR_EC_SHIFT | |
170 | cmp r0, #HSR_EC_HVC | |
171 | bne guest_trap @ Not HVC instr. | |
172 | ||
173 | /* | |
174 | * Let's check if the HVC came from VMID 0 and allow simple | |
175 | * switch to Hyp mode | |
176 | */ | |
177 | mrrc p15, 6, r0, r2, c2 | |
178 | lsr r2, r2, #16 | |
179 | and r2, r2, #0xff | |
180 | cmp r2, #0 | |
181 | bne guest_trap @ Guest called HVC | |
182 | ||
183 | /* | |
184 | * Getting here means host called HVC, we shift parameters and branch | |
185 | * to Hyp function. | |
186 | */ | |
187 | pop {r0, r1, r2} | |
188 | ||
6bebcecb MZ |
189 | /* |
190 | * Check if we have a kernel function, which is guaranteed to be | |
191 | * bigger than the maximum hyp stub hypercall | |
192 | */ | |
193 | cmp r0, #HVC_STUB_HCALL_NR | |
194 | bhs 1f | |
bafc6c2a | 195 | |
6bebcecb MZ |
196 | /* |
197 | * Not a kernel function, treat it as a stub hypercall. | |
198 | * Compute the physical address for __kvm_handle_stub_hvc | |
199 | * (as the code lives in the idmaped page) and branch there. | |
200 | * We hijack ip (r12) as a tmp register. | |
201 | */ | |
202 | push {r1} | |
203 | ldr r1, =kimage_voffset | |
204 | ldr r1, [r1] | |
205 | ldr ip, =__kvm_handle_stub_hvc | |
206 | sub ip, ip, r1 | |
6bebcecb MZ |
207 | pop {r1} |
208 | ||
209 | bx ip | |
210 | ||
211 | 1: | |
3f7e8e2e MZ |
212 | /* |
213 | * Pushing r2 here is just a way of keeping the stack aligned to | |
214 | * 8 bytes on any path that can trigger a HYP exception. Here, | |
215 | * we may well be about to jump into the guest, and the guest | |
216 | * exit would otherwise be badly decoded by our fancy | |
217 | * "decode-exception-without-a-branch" code... | |
218 | */ | |
219 | push {r2, lr} | |
bafc6c2a MZ |
220 | |
221 | mov lr, r0 | |
222 | mov r0, r1 | |
223 | mov r1, r2 | |
224 | mov r2, r3 | |
225 | ||
226 | THUMB( orr lr, #1) | |
227 | blx lr @ Call the HYP function | |
228 | ||
3f7e8e2e | 229 | pop {r2, lr} |
6bebcecb | 230 | eret |
bafc6c2a MZ |
231 | |
232 | guest_trap: | |
233 | load_vcpu r0 @ Load VCPU pointer to r0 | |
234 | ||
235 | #ifdef CONFIG_VFPv3 | |
236 | @ Check for a VFP access | |
237 | lsr r1, r1, #HSR_EC_SHIFT | |
238 | cmp r1, #HSR_EC_CP_0_13 | |
239 | beq __vfp_guest_restore | |
240 | #endif | |
241 | ||
242 | mov r1, #ARM_EXCEPTION_HVC | |
243 | b __guest_exit | |
244 | ||
245 | hyp_irq: | |
246 | push {r0, r1, r2} | |
247 | mov r1, #ARM_EXCEPTION_IRQ | |
248 | load_vcpu r0 @ Load VCPU pointer to r0 | |
249 | b __guest_exit | |
250 | ||
c39798f4 MZ |
251 | hyp_dabt: |
252 | push {r0, r1} | |
253 | mrs r0, ELR_hyp | |
254 | ldr r1, =abort_guest_exit_start | |
255 | THUMB( add r1, r1, #1) | |
256 | cmp r0, r1 | |
257 | ldrne r1, =abort_guest_exit_end | |
258 | THUMB( addne r1, r1, #1) | |
259 | cmpne r0, r1 | |
260 | pop {r0, r1} | |
261 | bne __hyp_panic | |
262 | ||
263 | orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT) | |
264 | eret | |
265 | ||
bafc6c2a MZ |
266 | .ltorg |
267 | ||
268 | .popsection |