Commit | Line | Data |
---|---|---|
29eb61bc AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/page.h> | |
24 | #include <asm/asm-offsets.h> | |
25 | #include <asm/exception-64s.h> | |
26 | ||
27 | #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit | |
28 | #define ULONG_SIZE 8 | |
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | |
30 | ||
29eb61bc AG |
31 | .macro DISABLE_INTERRUPTS |
32 | mfmsr r0 | |
33 | rldicl r0,r0,48,1 | |
34 | rotldi r0,r0,16 | |
35 | mtmsrd r0,1 | |
36 | .endm | |
37 | ||
97c4cfbe AG |
38 | #define VCPU_LOAD_NVGPRS(vcpu) \ |
39 | ld r14, VCPU_GPR(r14)(vcpu); \ | |
40 | ld r15, VCPU_GPR(r15)(vcpu); \ | |
41 | ld r16, VCPU_GPR(r16)(vcpu); \ | |
42 | ld r17, VCPU_GPR(r17)(vcpu); \ | |
43 | ld r18, VCPU_GPR(r18)(vcpu); \ | |
44 | ld r19, VCPU_GPR(r19)(vcpu); \ | |
45 | ld r20, VCPU_GPR(r20)(vcpu); \ | |
46 | ld r21, VCPU_GPR(r21)(vcpu); \ | |
47 | ld r22, VCPU_GPR(r22)(vcpu); \ | |
48 | ld r23, VCPU_GPR(r23)(vcpu); \ | |
49 | ld r24, VCPU_GPR(r24)(vcpu); \ | |
50 | ld r25, VCPU_GPR(r25)(vcpu); \ | |
51 | ld r26, VCPU_GPR(r26)(vcpu); \ | |
52 | ld r27, VCPU_GPR(r27)(vcpu); \ | |
53 | ld r28, VCPU_GPR(r28)(vcpu); \ | |
54 | ld r29, VCPU_GPR(r29)(vcpu); \ | |
55 | ld r30, VCPU_GPR(r30)(vcpu); \ | |
56 | ld r31, VCPU_GPR(r31)(vcpu); \ | |
57 | ||
29eb61bc AG |
58 | /***************************************************************************** |
59 | * * | |
60 | * Guest entry / exit code that is in kernel module memory (highmem) * | |
61 | * * | |
62 | ****************************************************************************/ | |
63 | ||
64 | /* Registers: | |
65 | * r3: kvm_run pointer | |
66 | * r4: vcpu pointer | |
67 | */ | |
68 | _GLOBAL(__kvmppc_vcpu_entry) | |
69 | ||
70 | kvm_start_entry: | |
71 | /* Write correct stack frame */ | |
72 | mflr r0 | |
73 | std r0,16(r1) | |
74 | ||
75 | /* Save host state to the stack */ | |
76 | stdu r1, -SWITCH_FRAME_SIZE(r1) | |
77 | ||
78 | /* Save r3 (kvm_run) and r4 (vcpu) */ | |
79 | SAVE_2GPRS(3, r1) | |
80 | ||
81 | /* Save non-volatile registers (r14 - r31) */ | |
82 | SAVE_NVGPRS(r1) | |
83 | ||
84 | /* Save LR */ | |
97c4cfbe AG |
85 | std r0, _LINK(r1) |
86 | ||
87 | /* Load non-volatile guest state from the vcpu */ | |
88 | VCPU_LOAD_NVGPRS(r4) | |
29eb61bc | 89 | |
29eb61bc | 90 | /* Save R1/R2 in the PACA */ |
7e57cba0 AG |
91 | std r1, PACA_KVM_HOST_R1(r13) |
92 | std r2, PACA_KVM_HOST_R2(r13) | |
93 | ||
94 | /* XXX swap in/out on load? */ | |
29eb61bc | 95 | ld r3, VCPU_HIGHMEM_HANDLER(r4) |
7e57cba0 | 96 | std r3, PACA_KVM_VMHANDLER(r13) |
29eb61bc | 97 | |
7e57cba0 | 98 | kvm_start_lightweight: |
29eb61bc | 99 | |
7e57cba0 AG |
100 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ |
101 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | |
29eb61bc | 102 | |
7e57cba0 | 103 | /* Load some guest state in the respective registers */ |
021ec9c6 AG |
104 | ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ |
105 | /* will be swapped in by rmcall */ | |
29eb61bc AG |
106 | |
107 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | |
108 | mtlr r3 /* LR = r3 */ | |
109 | ||
7e57cba0 | 110 | DISABLE_INTERRUPTS |
29eb61bc AG |
111 | |
112 | /* Some guests may need to have dcbz set to 32 byte length. | |
113 | * | |
114 | * Usually we ensure that by patching the guest's instructions | |
115 | * to trap on dcbz and emulate it in the hypervisor. | |
116 | * | |
117 | * If we can, we should tell the CPU to use 32 byte dcbz though, | |
118 | * because that's a lot faster. | |
119 | */ | |
120 | ||
121 | ld r3, VCPU_HFLAGS(r4) | |
122 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ | |
123 | beq no_dcbz32_on | |
124 | ||
125 | mfspr r3,SPRN_HID5 | |
126 | ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | |
127 | mtspr SPRN_HID5,r3 | |
128 | ||
129 | no_dcbz32_on: | |
29eb61bc | 130 | |
021ec9c6 AG |
131 | ld r6, VCPU_RMCALL(r4) |
132 | mtctr r6 | |
7e57cba0 | 133 | |
021ec9c6 AG |
134 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) |
135 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | |
7e57cba0 | 136 | |
29eb61bc | 137 | /* Jump to SLB patching handlder and into our guest */ |
021ec9c6 | 138 | bctr |
29eb61bc AG |
139 | |
140 | /* | |
141 | * This is the handler in module memory. It gets jumped at from the | |
142 | * lowmem trampoline code, so it's basically the guest exit code. | |
143 | * | |
144 | */ | |
145 | ||
146 | .global kvmppc_handler_highmem | |
147 | kvmppc_handler_highmem: | |
148 | ||
149 | /* | |
150 | * Register usage at this point: | |
151 | * | |
7e57cba0 AG |
152 | * R0 = guest last inst |
153 | * R1 = host R1 | |
154 | * R2 = host R2 | |
155 | * R3 = guest PC | |
156 | * R4 = guest MSR | |
157 | * R5 = guest DAR | |
158 | * R6 = guest DSISR | |
159 | * R13 = PACA | |
160 | * PACA.KVM.* = guest * | |
29eb61bc AG |
161 | * |
162 | */ | |
163 | ||
7e57cba0 AG |
164 | /* R7 = vcpu */ |
165 | ld r7, GPR4(r1) | |
29eb61bc | 166 | |
7e57cba0 | 167 | /* Now save the guest state */ |
29eb61bc | 168 | |
7e57cba0 | 169 | stw r0, VCPU_LAST_INST(r7) |
29eb61bc | 170 | |
7e57cba0 | 171 | std r3, VCPU_PC(r7) |
f7adbba1 | 172 | std r4, VCPU_SHADOW_SRR1(r7) |
7e57cba0 | 173 | std r5, VCPU_FAULT_DEAR(r7) |
c8027f16 | 174 | stw r6, VCPU_FAULT_DSISR(r7) |
29eb61bc | 175 | |
7e57cba0 | 176 | ld r5, VCPU_HFLAGS(r7) |
29eb61bc AG |
177 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ |
178 | beq no_dcbz32_off | |
179 | ||
d35feb26 | 180 | li r4, 0 |
29eb61bc | 181 | mfspr r5,SPRN_HID5 |
d35feb26 | 182 | rldimi r5,r4,6,56 |
29eb61bc AG |
183 | mtspr SPRN_HID5,r5 |
184 | ||
185 | no_dcbz32_off: | |
186 | ||
7e57cba0 AG |
187 | std r14, VCPU_GPR(r14)(r7) |
188 | std r15, VCPU_GPR(r15)(r7) | |
189 | std r16, VCPU_GPR(r16)(r7) | |
190 | std r17, VCPU_GPR(r17)(r7) | |
191 | std r18, VCPU_GPR(r18)(r7) | |
192 | std r19, VCPU_GPR(r19)(r7) | |
193 | std r20, VCPU_GPR(r20)(r7) | |
194 | std r21, VCPU_GPR(r21)(r7) | |
195 | std r22, VCPU_GPR(r22)(r7) | |
196 | std r23, VCPU_GPR(r23)(r7) | |
197 | std r24, VCPU_GPR(r24)(r7) | |
198 | std r25, VCPU_GPR(r25)(r7) | |
199 | std r26, VCPU_GPR(r26)(r7) | |
200 | std r27, VCPU_GPR(r27)(r7) | |
201 | std r28, VCPU_GPR(r28)(r7) | |
202 | std r29, VCPU_GPR(r29)(r7) | |
203 | std r30, VCPU_GPR(r30)(r7) | |
204 | std r31, VCPU_GPR(r31)(r7) | |
205 | ||
206 | /* Save guest CTR */ | |
29eb61bc | 207 | mfctr r5 |
7e57cba0 | 208 | std r5, VCPU_CTR(r7) |
29eb61bc AG |
209 | |
210 | /* Save guest LR */ | |
211 | mflr r5 | |
7e57cba0 | 212 | std r5, VCPU_LR(r7) |
29eb61bc | 213 | |
29eb61bc | 214 | /* Restore host msr -> SRR1 */ |
7e57cba0 | 215 | ld r6, VCPU_HOST_MSR(r7) |
29eb61bc AG |
216 | |
217 | /* | |
218 | * For some interrupts, we need to call the real Linux | |
219 | * handler, so it can do work for us. This has to happen | |
220 | * as if the interrupt arrived from the kernel though, | |
221 | * so let's fake it here where most state is restored. | |
222 | * | |
223 | * Call Linux for hardware interrupts/decrementer | |
224 | * r3 = address of interrupt handler (exit reason) | |
225 | */ | |
226 | ||
7e57cba0 | 227 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
29eb61bc | 228 | beq call_linux_handler |
7e57cba0 | 229 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER |
29eb61bc AG |
230 | beq call_linux_handler |
231 | ||
bc90923e AG |
232 | /* Back to EE=1 */ |
233 | mtmsr r6 | |
234 | b kvm_return_point | |
29eb61bc AG |
235 | |
236 | call_linux_handler: | |
237 | ||
238 | /* | |
239 | * If we land here we need to jump back to the handler we | |
240 | * came from. | |
241 | * | |
242 | * We have a page that we can access from real mode, so let's | |
243 | * jump back to that and use it as a trampoline to get back into the | |
244 | * interrupt handler! | |
245 | * | |
246 | * R3 still contains the exit code, | |
bc90923e AG |
247 | * R5 VCPU_HOST_RETIP and |
248 | * R6 VCPU_HOST_MSR | |
29eb61bc AG |
249 | */ |
250 | ||
bc90923e AG |
251 | /* Restore host IP -> SRR0 */ |
252 | ld r5, VCPU_HOST_RETIP(r7) | |
253 | ||
254 | /* XXX Better move to a safe function? | |
255 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ | |
256 | ||
7e57cba0 | 257 | mtlr r12 |
29eb61bc | 258 | |
7e57cba0 AG |
259 | ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) |
260 | mtsrr0 r4 | |
261 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | |
262 | mtsrr1 r3 | |
29eb61bc AG |
263 | |
264 | RFI | |
265 | ||
266 | .global kvm_return_point | |
267 | kvm_return_point: | |
268 | ||
269 | /* Jump back to lightweight entry if we're supposed to */ | |
270 | /* go back into the guest */ | |
97c4cfbe AG |
271 | |
272 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | |
7e57cba0 | 273 | mr r5, r12 |
97c4cfbe | 274 | |
29eb61bc AG |
275 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
276 | REST_2GPRS(3, r1) | |
277 | bl KVMPPC_HANDLE_EXIT | |
278 | ||
97c4cfbe | 279 | /* If RESUME_GUEST, get back in the loop */ |
29eb61bc | 280 | cmpwi r3, RESUME_GUEST |
97c4cfbe | 281 | beq kvm_loop_lightweight |
29eb61bc | 282 | |
97c4cfbe AG |
283 | cmpwi r3, RESUME_GUEST_NV |
284 | beq kvm_loop_heavyweight | |
29eb61bc | 285 | |
97c4cfbe | 286 | kvm_exit_loop: |
29eb61bc | 287 | |
29eb61bc AG |
288 | ld r4, _LINK(r1) |
289 | mtlr r4 | |
290 | ||
97c4cfbe AG |
291 | /* Restore non-volatile host registers (r14 - r31) */ |
292 | REST_NVGPRS(r1) | |
293 | ||
294 | addi r1, r1, SWITCH_FRAME_SIZE | |
295 | blr | |
296 | ||
297 | kvm_loop_heavyweight: | |
29eb61bc | 298 | |
97c4cfbe AG |
299 | ld r4, _LINK(r1) |
300 | std r4, (16 + SWITCH_FRAME_SIZE)(r1) | |
301 | ||
302 | /* Load vcpu and cpu_run */ | |
29eb61bc AG |
303 | REST_2GPRS(3, r1) |
304 | ||
97c4cfbe AG |
305 | /* Load non-volatile guest state from the vcpu */ |
306 | VCPU_LOAD_NVGPRS(r4) | |
29eb61bc | 307 | |
97c4cfbe AG |
308 | /* Jump back into the beginning of this function */ |
309 | b kvm_start_lightweight | |
29eb61bc | 310 | |
97c4cfbe | 311 | kvm_loop_lightweight: |
29eb61bc | 312 | |
97c4cfbe AG |
313 | /* We'll need the vcpu pointer */ |
314 | REST_GPR(4, r1) | |
315 | ||
316 | /* Jump back into the beginning of this function */ | |
317 | b kvm_start_lightweight |