Commit | Line | Data |
---|---|---|
29eb61bc AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/page.h> | |
24 | #include <asm/asm-offsets.h> | |
25 | #include <asm/exception-64s.h> | |
26 | ||
27 | #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit | |
28 | #define ULONG_SIZE 8 | |
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | |
30 | ||
31 | .macro mfpaca tmp_reg, src_reg, offset, vcpu_reg | |
32 | ld \tmp_reg, (PACA_EXMC+\offset)(r13) | |
33 | std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg) | |
34 | .endm | |
35 | ||
36 | .macro DISABLE_INTERRUPTS | |
37 | mfmsr r0 | |
38 | rldicl r0,r0,48,1 | |
39 | rotldi r0,r0,16 | |
40 | mtmsrd r0,1 | |
41 | .endm | |
42 | ||
43 | /***************************************************************************** | |
44 | * * | |
45 | * Guest entry / exit code that is in kernel module memory (highmem) * | |
46 | * * | |
47 | ****************************************************************************/ | |
48 | ||
49 | /* Registers: | |
50 | * r3: kvm_run pointer | |
51 | * r4: vcpu pointer | |
52 | */ | |
53 | _GLOBAL(__kvmppc_vcpu_entry) | |
54 | ||
55 | kvm_start_entry: | |
56 | /* Write correct stack frame */ | |
57 | mflr r0 | |
58 | std r0,16(r1) | |
59 | ||
60 | /* Save host state to the stack */ | |
61 | stdu r1, -SWITCH_FRAME_SIZE(r1) | |
62 | ||
63 | /* Save r3 (kvm_run) and r4 (vcpu) */ | |
64 | SAVE_2GPRS(3, r1) | |
65 | ||
66 | /* Save non-volatile registers (r14 - r31) */ | |
67 | SAVE_NVGPRS(r1) | |
68 | ||
69 | /* Save LR */ | |
70 | mflr r14 | |
71 | std r14, _LINK(r1) | |
72 | ||
73 | /* XXX optimize non-volatile loading away */ | |
74 | kvm_start_lightweight: | |
75 | ||
76 | DISABLE_INTERRUPTS | |
77 | ||
78 | /* Save R1/R2 in the PACA */ | |
79 | std r1, PACAR1(r13) | |
80 | std r2, (PACA_EXMC+EX_SRR0)(r13) | |
81 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | |
82 | std r3, PACASAVEDMSR(r13) | |
83 | ||
84 | /* Load non-volatile guest state from the vcpu */ | |
85 | ld r14, VCPU_GPR(r14)(r4) | |
86 | ld r15, VCPU_GPR(r15)(r4) | |
87 | ld r16, VCPU_GPR(r16)(r4) | |
88 | ld r17, VCPU_GPR(r17)(r4) | |
89 | ld r18, VCPU_GPR(r18)(r4) | |
90 | ld r19, VCPU_GPR(r19)(r4) | |
91 | ld r20, VCPU_GPR(r20)(r4) | |
92 | ld r21, VCPU_GPR(r21)(r4) | |
93 | ld r22, VCPU_GPR(r22)(r4) | |
94 | ld r23, VCPU_GPR(r23)(r4) | |
95 | ld r24, VCPU_GPR(r24)(r4) | |
96 | ld r25, VCPU_GPR(r25)(r4) | |
97 | ld r26, VCPU_GPR(r26)(r4) | |
98 | ld r27, VCPU_GPR(r27)(r4) | |
99 | ld r28, VCPU_GPR(r28)(r4) | |
100 | ld r29, VCPU_GPR(r29)(r4) | |
101 | ld r30, VCPU_GPR(r30)(r4) | |
102 | ld r31, VCPU_GPR(r31)(r4) | |
103 | ||
104 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | |
105 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | |
106 | ||
107 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | |
108 | mtsrr0 r3 | |
109 | ||
110 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | |
111 | mtsrr1 r3 | |
112 | ||
113 | /* Load guest state in the respective registers */ | |
114 | lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */ | |
115 | stw r3, (PACA_EXMC + EX_CCR)(r13) | |
116 | ||
117 | ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ | |
118 | mtctr r3 /* CTR = r3 */ | |
119 | ||
120 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | |
121 | mtlr r3 /* LR = r3 */ | |
122 | ||
123 | ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ | |
124 | std r3, (PACA_EXMC + EX_R3)(r13) | |
125 | ||
126 | /* Some guests may need to have dcbz set to 32 byte length. | |
127 | * | |
128 | * Usually we ensure that by patching the guest's instructions | |
129 | * to trap on dcbz and emulate it in the hypervisor. | |
130 | * | |
131 | * If we can, we should tell the CPU to use 32 byte dcbz though, | |
132 | * because that's a lot faster. | |
133 | */ | |
134 | ||
135 | ld r3, VCPU_HFLAGS(r4) | |
136 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ | |
137 | beq no_dcbz32_on | |
138 | ||
139 | mfspr r3,SPRN_HID5 | |
140 | ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | |
141 | mtspr SPRN_HID5,r3 | |
142 | ||
143 | no_dcbz32_on: | |
144 | /* Load guest GPRs */ | |
145 | ||
146 | ld r3, VCPU_GPR(r9)(r4) | |
147 | std r3, (PACA_EXMC + EX_R9)(r13) | |
148 | ld r3, VCPU_GPR(r10)(r4) | |
149 | std r3, (PACA_EXMC + EX_R10)(r13) | |
150 | ld r3, VCPU_GPR(r11)(r4) | |
151 | std r3, (PACA_EXMC + EX_R11)(r13) | |
152 | ld r3, VCPU_GPR(r12)(r4) | |
153 | std r3, (PACA_EXMC + EX_R12)(r13) | |
154 | ld r3, VCPU_GPR(r13)(r4) | |
155 | std r3, (PACA_EXMC + EX_R13)(r13) | |
156 | ||
157 | ld r0, VCPU_GPR(r0)(r4) | |
158 | ld r1, VCPU_GPR(r1)(r4) | |
159 | ld r2, VCPU_GPR(r2)(r4) | |
160 | ld r3, VCPU_GPR(r3)(r4) | |
161 | ld r5, VCPU_GPR(r5)(r4) | |
162 | ld r6, VCPU_GPR(r6)(r4) | |
163 | ld r7, VCPU_GPR(r7)(r4) | |
164 | ld r8, VCPU_GPR(r8)(r4) | |
165 | ld r4, VCPU_GPR(r4)(r4) | |
166 | ||
167 | /* This sets the Magic value for the trampoline */ | |
168 | ||
169 | li r11, 1 | |
170 | stb r11, PACA_KVM_IN_GUEST(r13) | |
171 | ||
172 | /* Jump to SLB patching handlder and into our guest */ | |
173 | RFI | |
174 | ||
175 | /* | |
176 | * This is the handler in module memory. It gets jumped at from the | |
177 | * lowmem trampoline code, so it's basically the guest exit code. | |
178 | * | |
179 | */ | |
180 | ||
181 | .global kvmppc_handler_highmem | |
182 | kvmppc_handler_highmem: | |
183 | ||
184 | /* | |
185 | * Register usage at this point: | |
186 | * | |
187 | * R00 = guest R13 | |
188 | * R01 = host R1 | |
189 | * R02 = host R2 | |
190 | * R10 = guest PC | |
191 | * R11 = guest MSR | |
192 | * R12 = exit handler id | |
193 | * R13 = PACA | |
194 | * PACA.exmc.R9 = guest R1 | |
195 | * PACA.exmc.R10 = guest R10 | |
196 | * PACA.exmc.R11 = guest R11 | |
197 | * PACA.exmc.R12 = guest R12 | |
198 | * PACA.exmc.R13 = guest R2 | |
199 | * PACA.exmc.DAR = guest DAR | |
200 | * PACA.exmc.DSISR = guest DSISR | |
201 | * PACA.exmc.LR = guest instruction | |
202 | * PACA.exmc.CCR = guest CR | |
203 | * PACA.exmc.SRR0 = guest R0 | |
204 | * | |
205 | */ | |
206 | ||
207 | std r3, (PACA_EXMC+EX_R3)(r13) | |
208 | ||
209 | /* save the exit id in R3 */ | |
210 | mr r3, r12 | |
211 | ||
212 | /* R12 = vcpu */ | |
213 | ld r12, GPR4(r1) | |
214 | ||
215 | /* Now save the guest state */ | |
216 | ||
217 | std r0, VCPU_GPR(r13)(r12) | |
218 | std r4, VCPU_GPR(r4)(r12) | |
219 | std r5, VCPU_GPR(r5)(r12) | |
220 | std r6, VCPU_GPR(r6)(r12) | |
221 | std r7, VCPU_GPR(r7)(r12) | |
222 | std r8, VCPU_GPR(r8)(r12) | |
223 | std r9, VCPU_GPR(r9)(r12) | |
224 | ||
225 | /* get registers from PACA */ | |
226 | mfpaca r5, r0, EX_SRR0, r12 | |
227 | mfpaca r5, r3, EX_R3, r12 | |
228 | mfpaca r5, r1, EX_R9, r12 | |
229 | mfpaca r5, r10, EX_R10, r12 | |
230 | mfpaca r5, r11, EX_R11, r12 | |
231 | mfpaca r5, r12, EX_R12, r12 | |
232 | mfpaca r5, r2, EX_R13, r12 | |
233 | ||
234 | lwz r5, (PACA_EXMC+EX_LR)(r13) | |
235 | stw r5, VCPU_LAST_INST(r12) | |
236 | ||
237 | lwz r5, (PACA_EXMC+EX_CCR)(r13) | |
238 | stw r5, VCPU_CR(r12) | |
239 | ||
240 | ld r5, VCPU_HFLAGS(r12) | |
241 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ | |
242 | beq no_dcbz32_off | |
243 | ||
244 | mfspr r5,SPRN_HID5 | |
245 | rldimi r5,r5,6,56 | |
246 | mtspr SPRN_HID5,r5 | |
247 | ||
248 | no_dcbz32_off: | |
249 | ||
250 | /* XXX maybe skip on lightweight? */ | |
251 | std r14, VCPU_GPR(r14)(r12) | |
252 | std r15, VCPU_GPR(r15)(r12) | |
253 | std r16, VCPU_GPR(r16)(r12) | |
254 | std r17, VCPU_GPR(r17)(r12) | |
255 | std r18, VCPU_GPR(r18)(r12) | |
256 | std r19, VCPU_GPR(r19)(r12) | |
257 | std r20, VCPU_GPR(r20)(r12) | |
258 | std r21, VCPU_GPR(r21)(r12) | |
259 | std r22, VCPU_GPR(r22)(r12) | |
260 | std r23, VCPU_GPR(r23)(r12) | |
261 | std r24, VCPU_GPR(r24)(r12) | |
262 | std r25, VCPU_GPR(r25)(r12) | |
263 | std r26, VCPU_GPR(r26)(r12) | |
264 | std r27, VCPU_GPR(r27)(r12) | |
265 | std r28, VCPU_GPR(r28)(r12) | |
266 | std r29, VCPU_GPR(r29)(r12) | |
267 | std r30, VCPU_GPR(r30)(r12) | |
268 | std r31, VCPU_GPR(r31)(r12) | |
269 | ||
270 | /* Restore non-volatile host registers (r14 - r31) */ | |
271 | REST_NVGPRS(r1) | |
272 | ||
273 | /* Save guest PC (R10) */ | |
274 | std r10, VCPU_PC(r12) | |
275 | ||
276 | /* Save guest msr (R11) */ | |
277 | std r11, VCPU_SHADOW_MSR(r12) | |
278 | ||
279 | /* Save guest CTR (in R12) */ | |
280 | mfctr r5 | |
281 | std r5, VCPU_CTR(r12) | |
282 | ||
283 | /* Save guest LR */ | |
284 | mflr r5 | |
285 | std r5, VCPU_LR(r12) | |
286 | ||
287 | /* Save guest XER */ | |
288 | mfxer r5 | |
289 | std r5, VCPU_XER(r12) | |
290 | ||
291 | /* Save guest DAR */ | |
292 | ld r5, (PACA_EXMC+EX_DAR)(r13) | |
293 | std r5, VCPU_FAULT_DEAR(r12) | |
294 | ||
295 | /* Save guest DSISR */ | |
296 | lwz r5, (PACA_EXMC+EX_DSISR)(r13) | |
297 | std r5, VCPU_FAULT_DSISR(r12) | |
298 | ||
299 | /* Restore host msr -> SRR1 */ | |
300 | ld r7, VCPU_HOST_MSR(r12) | |
301 | mtsrr1 r7 | |
302 | ||
303 | /* Restore host IP -> SRR0 */ | |
304 | ld r6, VCPU_HOST_RETIP(r12) | |
305 | mtsrr0 r6 | |
306 | ||
307 | /* | |
308 | * For some interrupts, we need to call the real Linux | |
309 | * handler, so it can do work for us. This has to happen | |
310 | * as if the interrupt arrived from the kernel though, | |
311 | * so let's fake it here where most state is restored. | |
312 | * | |
313 | * Call Linux for hardware interrupts/decrementer | |
314 | * r3 = address of interrupt handler (exit reason) | |
315 | */ | |
316 | ||
317 | cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL | |
318 | beq call_linux_handler | |
319 | cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER | |
320 | beq call_linux_handler | |
321 | ||
322 | /* Back to Interruptable Mode! (goto kvm_return_point) */ | |
323 | RFI | |
324 | ||
325 | call_linux_handler: | |
326 | ||
327 | /* | |
328 | * If we land here we need to jump back to the handler we | |
329 | * came from. | |
330 | * | |
331 | * We have a page that we can access from real mode, so let's | |
332 | * jump back to that and use it as a trampoline to get back into the | |
333 | * interrupt handler! | |
334 | * | |
335 | * R3 still contains the exit code, | |
336 | * R6 VCPU_HOST_RETIP and | |
337 | * R7 VCPU_HOST_MSR | |
338 | */ | |
339 | ||
340 | mtlr r3 | |
341 | ||
342 | ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) | |
343 | mtsrr0 r5 | |
344 | LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | |
345 | mtsrr1 r5 | |
346 | ||
347 | RFI | |
348 | ||
349 | .global kvm_return_point | |
350 | kvm_return_point: | |
351 | ||
352 | /* Jump back to lightweight entry if we're supposed to */ | |
353 | /* go back into the guest */ | |
354 | mr r5, r3 | |
355 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | |
356 | REST_2GPRS(3, r1) | |
357 | bl KVMPPC_HANDLE_EXIT | |
358 | ||
359 | #if 0 /* XXX get lightweight exits back */ | |
360 | cmpwi r3, RESUME_GUEST | |
361 | bne kvm_exit_heavyweight | |
362 | ||
363 | /* put VCPU and KVM_RUN back into place and roll again! */ | |
364 | REST_2GPRS(3, r1) | |
365 | b kvm_start_lightweight | |
366 | ||
367 | kvm_exit_heavyweight: | |
368 | /* Restore non-volatile host registers */ | |
369 | ld r14, _LINK(r1) | |
370 | mtlr r14 | |
371 | REST_NVGPRS(r1) | |
372 | ||
373 | addi r1, r1, SWITCH_FRAME_SIZE | |
374 | #else | |
375 | ld r4, _LINK(r1) | |
376 | mtlr r4 | |
377 | ||
378 | cmpwi r3, RESUME_GUEST | |
379 | bne kvm_exit_heavyweight | |
380 | ||
381 | REST_2GPRS(3, r1) | |
382 | ||
383 | addi r1, r1, SWITCH_FRAME_SIZE | |
384 | ||
385 | b kvm_start_entry | |
386 | ||
387 | kvm_exit_heavyweight: | |
388 | ||
389 | addi r1, r1, SWITCH_FRAME_SIZE | |
390 | #endif | |
391 | ||
392 | blr |