Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
23 | #include <asm/mmu-44x.h> | |
24 | #include <asm/page.h> | |
25 | #include <asm/asm-offsets.h> | |
26 | ||
27 | #define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS) | |
28 | ||
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * 4)) | |
30 | ||
31 | /* The host stack layout: */ | |
32 | #define HOST_R1 0 /* Implied by stwu. */ | |
33 | #define HOST_CALLEE_LR 4 | |
34 | #define HOST_RUN 8 | |
35 | /* r2 is special: it holds 'current', and it made nonvolatile in the | |
36 | * kernel with the -ffixed-r2 gcc option. */ | |
37 | #define HOST_R2 12 | |
38 | #define HOST_NV_GPRS 16 | |
39 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) | |
40 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) | |
41 | #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ | |
42 | #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ | |
43 | ||
44 | #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \ | |
6a0ab738 HB |
45 | (1<<BOOKE_INTERRUPT_DTLB_MISS) | \ |
46 | (1<<BOOKE_INTERRUPT_DEBUG)) | |
bbf45ba5 HB |
47 | |
48 | #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | |
49 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | |
50 | ||
51 | #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \ | |
52 | (1<<BOOKE_INTERRUPT_INST_STORAGE) | \ | |
53 | (1<<BOOKE_INTERRUPT_PROGRAM) | \ | |
54 | (1<<BOOKE_INTERRUPT_DTLB_MISS)) | |
55 | ||
56 | .macro KVM_HANDLER ivor_nr | |
57 | _GLOBAL(kvmppc_handler_\ivor_nr) | |
58 | /* Get pointer to vcpu and record exit number. */ | |
59 | mtspr SPRN_SPRG0, r4 | |
60 | mfspr r4, SPRN_SPRG1 | |
61 | stw r5, VCPU_GPR(r5)(r4) | |
62 | stw r6, VCPU_GPR(r6)(r4) | |
63 | mfctr r5 | |
64 | lis r6, kvmppc_resume_host@h | |
65 | stw r5, VCPU_CTR(r4) | |
66 | li r5, \ivor_nr | |
67 | ori r6, r6, kvmppc_resume_host@l | |
68 | mtctr r6 | |
69 | bctr | |
70 | .endm | |
71 | ||
72 | _GLOBAL(kvmppc_handlers_start) | |
73 | KVM_HANDLER BOOKE_INTERRUPT_CRITICAL | |
74 | KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK | |
75 | KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE | |
76 | KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE | |
77 | KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL | |
78 | KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT | |
79 | KVM_HANDLER BOOKE_INTERRUPT_PROGRAM | |
80 | KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL | |
81 | KVM_HANDLER BOOKE_INTERRUPT_SYSCALL | |
82 | KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL | |
83 | KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER | |
84 | KVM_HANDLER BOOKE_INTERRUPT_FIT | |
85 | KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG | |
86 | KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS | |
87 | KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS | |
88 | KVM_HANDLER BOOKE_INTERRUPT_DEBUG | |
89 | ||
90 | _GLOBAL(kvmppc_handler_len) | |
91 | .long kvmppc_handler_1 - kvmppc_handler_0 | |
92 | ||
93 | ||
94 | /* Registers: | |
95 | * SPRG0: guest r4 | |
96 | * r4: vcpu pointer | |
97 | * r5: KVM exit number | |
98 | */ | |
99 | _GLOBAL(kvmppc_resume_host) | |
100 | stw r3, VCPU_GPR(r3)(r4) | |
101 | mfcr r3 | |
102 | stw r3, VCPU_CR(r4) | |
103 | stw r7, VCPU_GPR(r7)(r4) | |
104 | stw r8, VCPU_GPR(r8)(r4) | |
105 | stw r9, VCPU_GPR(r9)(r4) | |
106 | ||
107 | li r6, 1 | |
108 | slw r6, r6, r5 | |
109 | ||
110 | /* Save the faulting instruction and all GPRs for emulation. */ | |
111 | andi. r7, r6, NEED_INST_MASK | |
112 | beq ..skip_inst_copy | |
113 | mfspr r9, SPRN_SRR0 | |
114 | mfmsr r8 | |
115 | ori r7, r8, MSR_DS | |
116 | mtmsr r7 | |
117 | isync | |
118 | lwz r9, 0(r9) | |
119 | mtmsr r8 | |
120 | isync | |
121 | stw r9, VCPU_LAST_INST(r4) | |
122 | ||
123 | stw r15, VCPU_GPR(r15)(r4) | |
124 | stw r16, VCPU_GPR(r16)(r4) | |
125 | stw r17, VCPU_GPR(r17)(r4) | |
126 | stw r18, VCPU_GPR(r18)(r4) | |
127 | stw r19, VCPU_GPR(r19)(r4) | |
128 | stw r20, VCPU_GPR(r20)(r4) | |
129 | stw r21, VCPU_GPR(r21)(r4) | |
130 | stw r22, VCPU_GPR(r22)(r4) | |
131 | stw r23, VCPU_GPR(r23)(r4) | |
132 | stw r24, VCPU_GPR(r24)(r4) | |
133 | stw r25, VCPU_GPR(r25)(r4) | |
134 | stw r26, VCPU_GPR(r26)(r4) | |
135 | stw r27, VCPU_GPR(r27)(r4) | |
136 | stw r28, VCPU_GPR(r28)(r4) | |
137 | stw r29, VCPU_GPR(r29)(r4) | |
138 | stw r30, VCPU_GPR(r30)(r4) | |
139 | stw r31, VCPU_GPR(r31)(r4) | |
140 | ..skip_inst_copy: | |
141 | ||
142 | /* Also grab DEAR and ESR before the host can clobber them. */ | |
143 | ||
144 | andi. r7, r6, NEED_DEAR_MASK | |
145 | beq ..skip_dear | |
146 | mfspr r9, SPRN_DEAR | |
147 | stw r9, VCPU_FAULT_DEAR(r4) | |
148 | ..skip_dear: | |
149 | ||
150 | andi. r7, r6, NEED_ESR_MASK | |
151 | beq ..skip_esr | |
152 | mfspr r9, SPRN_ESR | |
153 | stw r9, VCPU_FAULT_ESR(r4) | |
154 | ..skip_esr: | |
155 | ||
156 | /* Save remaining volatile guest register state to vcpu. */ | |
157 | stw r0, VCPU_GPR(r0)(r4) | |
158 | stw r1, VCPU_GPR(r1)(r4) | |
159 | stw r2, VCPU_GPR(r2)(r4) | |
160 | stw r10, VCPU_GPR(r10)(r4) | |
161 | stw r11, VCPU_GPR(r11)(r4) | |
162 | stw r12, VCPU_GPR(r12)(r4) | |
163 | stw r13, VCPU_GPR(r13)(r4) | |
164 | stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ | |
165 | mflr r3 | |
166 | stw r3, VCPU_LR(r4) | |
167 | mfxer r3 | |
168 | stw r3, VCPU_XER(r4) | |
169 | mfspr r3, SPRN_SPRG0 | |
170 | stw r3, VCPU_GPR(r4)(r4) | |
171 | mfspr r3, SPRN_SRR0 | |
172 | stw r3, VCPU_PC(r4) | |
173 | ||
174 | /* Restore host stack pointer and PID before IVPR, since the host | |
175 | * exception handlers use them. */ | |
176 | lwz r1, VCPU_HOST_STACK(r4) | |
177 | lwz r3, VCPU_HOST_PID(r4) | |
178 | mtspr SPRN_PID, r3 | |
179 | ||
180 | /* Restore host IVPR before re-enabling interrupts. We cheat and know | |
181 | * that Linux IVPR is always 0xc0000000. */ | |
182 | lis r3, 0xc000 | |
183 | mtspr SPRN_IVPR, r3 | |
184 | ||
185 | /* Switch to kernel stack and jump to handler. */ | |
186 | LOAD_REG_ADDR(r3, kvmppc_handle_exit) | |
187 | mtctr r3 | |
188 | lwz r3, HOST_RUN(r1) | |
189 | lwz r2, HOST_R2(r1) | |
190 | mr r14, r4 /* Save vcpu pointer. */ | |
191 | ||
192 | bctrl /* kvmppc_handle_exit() */ | |
193 | ||
194 | /* Restore vcpu pointer and the nonvolatiles we used. */ | |
195 | mr r4, r14 | |
196 | lwz r14, VCPU_GPR(r14)(r4) | |
197 | ||
198 | /* Sometimes instruction emulation must restore complete GPR state. */ | |
199 | andi. r5, r3, RESUME_FLAG_NV | |
200 | beq ..skip_nv_load | |
201 | lwz r15, VCPU_GPR(r15)(r4) | |
202 | lwz r16, VCPU_GPR(r16)(r4) | |
203 | lwz r17, VCPU_GPR(r17)(r4) | |
204 | lwz r18, VCPU_GPR(r18)(r4) | |
205 | lwz r19, VCPU_GPR(r19)(r4) | |
206 | lwz r20, VCPU_GPR(r20)(r4) | |
207 | lwz r21, VCPU_GPR(r21)(r4) | |
208 | lwz r22, VCPU_GPR(r22)(r4) | |
209 | lwz r23, VCPU_GPR(r23)(r4) | |
210 | lwz r24, VCPU_GPR(r24)(r4) | |
211 | lwz r25, VCPU_GPR(r25)(r4) | |
212 | lwz r26, VCPU_GPR(r26)(r4) | |
213 | lwz r27, VCPU_GPR(r27)(r4) | |
214 | lwz r28, VCPU_GPR(r28)(r4) | |
215 | lwz r29, VCPU_GPR(r29)(r4) | |
216 | lwz r30, VCPU_GPR(r30)(r4) | |
217 | lwz r31, VCPU_GPR(r31)(r4) | |
218 | ..skip_nv_load: | |
219 | ||
220 | /* Should we return to the guest? */ | |
221 | andi. r5, r3, RESUME_FLAG_HOST | |
222 | beq lightweight_exit | |
223 | ||
224 | srawi r3, r3, 2 /* Shift -ERR back down. */ | |
225 | ||
226 | heavyweight_exit: | |
227 | /* Not returning to guest. */ | |
228 | ||
229 | /* We already saved guest volatile register state; now save the | |
230 | * non-volatiles. */ | |
231 | stw r15, VCPU_GPR(r15)(r4) | |
232 | stw r16, VCPU_GPR(r16)(r4) | |
233 | stw r17, VCPU_GPR(r17)(r4) | |
234 | stw r18, VCPU_GPR(r18)(r4) | |
235 | stw r19, VCPU_GPR(r19)(r4) | |
236 | stw r20, VCPU_GPR(r20)(r4) | |
237 | stw r21, VCPU_GPR(r21)(r4) | |
238 | stw r22, VCPU_GPR(r22)(r4) | |
239 | stw r23, VCPU_GPR(r23)(r4) | |
240 | stw r24, VCPU_GPR(r24)(r4) | |
241 | stw r25, VCPU_GPR(r25)(r4) | |
242 | stw r26, VCPU_GPR(r26)(r4) | |
243 | stw r27, VCPU_GPR(r27)(r4) | |
244 | stw r28, VCPU_GPR(r28)(r4) | |
245 | stw r29, VCPU_GPR(r29)(r4) | |
246 | stw r30, VCPU_GPR(r30)(r4) | |
247 | stw r31, VCPU_GPR(r31)(r4) | |
248 | ||
249 | /* Load host non-volatile register state from host stack. */ | |
250 | lwz r14, HOST_NV_GPR(r14)(r1) | |
251 | lwz r15, HOST_NV_GPR(r15)(r1) | |
252 | lwz r16, HOST_NV_GPR(r16)(r1) | |
253 | lwz r17, HOST_NV_GPR(r17)(r1) | |
254 | lwz r18, HOST_NV_GPR(r18)(r1) | |
255 | lwz r19, HOST_NV_GPR(r19)(r1) | |
256 | lwz r20, HOST_NV_GPR(r20)(r1) | |
257 | lwz r21, HOST_NV_GPR(r21)(r1) | |
258 | lwz r22, HOST_NV_GPR(r22)(r1) | |
259 | lwz r23, HOST_NV_GPR(r23)(r1) | |
260 | lwz r24, HOST_NV_GPR(r24)(r1) | |
261 | lwz r25, HOST_NV_GPR(r25)(r1) | |
262 | lwz r26, HOST_NV_GPR(r26)(r1) | |
263 | lwz r27, HOST_NV_GPR(r27)(r1) | |
264 | lwz r28, HOST_NV_GPR(r28)(r1) | |
265 | lwz r29, HOST_NV_GPR(r29)(r1) | |
266 | lwz r30, HOST_NV_GPR(r30)(r1) | |
267 | lwz r31, HOST_NV_GPR(r31)(r1) | |
268 | ||
269 | /* Return to kvm_vcpu_run(). */ | |
270 | lwz r4, HOST_STACK_LR(r1) | |
271 | addi r1, r1, HOST_STACK_SIZE | |
272 | mtlr r4 | |
273 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | |
274 | blr | |
275 | ||
276 | ||
277 | /* Registers: | |
278 | * r3: kvm_run pointer | |
279 | * r4: vcpu pointer | |
280 | */ | |
281 | _GLOBAL(__kvmppc_vcpu_run) | |
282 | stwu r1, -HOST_STACK_SIZE(r1) | |
283 | stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | |
284 | ||
285 | /* Save host state to stack. */ | |
286 | stw r3, HOST_RUN(r1) | |
287 | mflr r3 | |
288 | stw r3, HOST_STACK_LR(r1) | |
289 | ||
290 | /* Save host non-volatile register state to stack. */ | |
291 | stw r14, HOST_NV_GPR(r14)(r1) | |
292 | stw r15, HOST_NV_GPR(r15)(r1) | |
293 | stw r16, HOST_NV_GPR(r16)(r1) | |
294 | stw r17, HOST_NV_GPR(r17)(r1) | |
295 | stw r18, HOST_NV_GPR(r18)(r1) | |
296 | stw r19, HOST_NV_GPR(r19)(r1) | |
297 | stw r20, HOST_NV_GPR(r20)(r1) | |
298 | stw r21, HOST_NV_GPR(r21)(r1) | |
299 | stw r22, HOST_NV_GPR(r22)(r1) | |
300 | stw r23, HOST_NV_GPR(r23)(r1) | |
301 | stw r24, HOST_NV_GPR(r24)(r1) | |
302 | stw r25, HOST_NV_GPR(r25)(r1) | |
303 | stw r26, HOST_NV_GPR(r26)(r1) | |
304 | stw r27, HOST_NV_GPR(r27)(r1) | |
305 | stw r28, HOST_NV_GPR(r28)(r1) | |
306 | stw r29, HOST_NV_GPR(r29)(r1) | |
307 | stw r30, HOST_NV_GPR(r30)(r1) | |
308 | stw r31, HOST_NV_GPR(r31)(r1) | |
309 | ||
310 | /* Load guest non-volatiles. */ | |
311 | lwz r14, VCPU_GPR(r14)(r4) | |
312 | lwz r15, VCPU_GPR(r15)(r4) | |
313 | lwz r16, VCPU_GPR(r16)(r4) | |
314 | lwz r17, VCPU_GPR(r17)(r4) | |
315 | lwz r18, VCPU_GPR(r18)(r4) | |
316 | lwz r19, VCPU_GPR(r19)(r4) | |
317 | lwz r20, VCPU_GPR(r20)(r4) | |
318 | lwz r21, VCPU_GPR(r21)(r4) | |
319 | lwz r22, VCPU_GPR(r22)(r4) | |
320 | lwz r23, VCPU_GPR(r23)(r4) | |
321 | lwz r24, VCPU_GPR(r24)(r4) | |
322 | lwz r25, VCPU_GPR(r25)(r4) | |
323 | lwz r26, VCPU_GPR(r26)(r4) | |
324 | lwz r27, VCPU_GPR(r27)(r4) | |
325 | lwz r28, VCPU_GPR(r28)(r4) | |
326 | lwz r29, VCPU_GPR(r29)(r4) | |
327 | lwz r30, VCPU_GPR(r30)(r4) | |
328 | lwz r31, VCPU_GPR(r31)(r4) | |
329 | ||
330 | lightweight_exit: | |
331 | stw r2, HOST_R2(r1) | |
332 | ||
333 | mfspr r3, SPRN_PID | |
334 | stw r3, VCPU_HOST_PID(r4) | |
49dd2c49 | 335 | lwz r3, VCPU_SHADOW_PID(r4) |
bbf45ba5 HB |
336 | mtspr SPRN_PID, r3 |
337 | ||
83aae4a8 | 338 | /* Prevent all asynchronous TLB updates. */ |
bbf45ba5 HB |
339 | mfmsr r5 |
340 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h | |
341 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | |
342 | andc r6, r5, r6 | |
343 | mtmsr r6 | |
344 | ||
20754c24 HB |
345 | /* Load the guest mappings, leaving the host's "pinned" kernel mappings |
346 | * in place. */ | |
bbf45ba5 | 347 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ |
83aae4a8 HB |
348 | li r5, PPC44x_TLB_SIZE |
349 | lis r5, tlb_44x_hwater@ha | |
350 | lwz r5, tlb_44x_hwater@l(r5) | |
351 | mtctr r5 | |
352 | addi r9, r4, VCPU_SHADOW_TLB | |
353 | addi r5, r4, VCPU_SHADOW_MOD | |
354 | li r3, 0 | |
bbf45ba5 | 355 | 1: |
83aae4a8 HB |
356 | lbzx r7, r3, r5 |
357 | cmpwi r7, 0 | |
358 | beq 3f | |
359 | ||
bbf45ba5 | 360 | /* Load guest entry. */ |
83aae4a8 HB |
361 | mulli r11, r3, TLBE_BYTES |
362 | add r11, r11, r9 | |
363 | lwz r7, 0(r11) | |
bbf45ba5 | 364 | mtspr SPRN_MMUCR, r7 |
83aae4a8 HB |
365 | lwz r7, 4(r11) |
366 | tlbwe r7, r3, PPC44x_TLB_PAGEID | |
367 | lwz r7, 8(r11) | |
368 | tlbwe r7, r3, PPC44x_TLB_XLAT | |
369 | lwz r7, 12(r11) | |
370 | tlbwe r7, r3, PPC44x_TLB_ATTRIB | |
371 | 3: | |
372 | addi r3, r3, 1 /* Increment index. */ | |
373 | bdnz 1b | |
374 | ||
bbf45ba5 HB |
375 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ |
376 | ||
83aae4a8 HB |
377 | /* Clear bitmap of modified TLB entries */ |
378 | li r5, PPC44x_TLB_SIZE>>2 | |
379 | mtctr r5 | |
380 | addi r5, r4, VCPU_SHADOW_MOD - 4 | |
381 | li r6, 0 | |
382 | 1: | |
383 | stwu r6, 4(r5) | |
384 | bdnz 1b | |
385 | ||
bbf45ba5 HB |
386 | iccci 0, 0 /* XXX hack */ |
387 | ||
388 | /* Load some guest volatiles. */ | |
389 | lwz r0, VCPU_GPR(r0)(r4) | |
390 | lwz r2, VCPU_GPR(r2)(r4) | |
391 | lwz r9, VCPU_GPR(r9)(r4) | |
392 | lwz r10, VCPU_GPR(r10)(r4) | |
393 | lwz r11, VCPU_GPR(r11)(r4) | |
394 | lwz r12, VCPU_GPR(r12)(r4) | |
395 | lwz r13, VCPU_GPR(r13)(r4) | |
396 | lwz r3, VCPU_LR(r4) | |
397 | mtlr r3 | |
398 | lwz r3, VCPU_XER(r4) | |
399 | mtxer r3 | |
400 | ||
401 | /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed, | |
402 | * so how do we make sure vcpu won't fault? */ | |
403 | lis r8, kvmppc_booke_handlers@ha | |
404 | lwz r8, kvmppc_booke_handlers@l(r8) | |
405 | mtspr SPRN_IVPR, r8 | |
406 | ||
407 | /* Save vcpu pointer for the exception handlers. */ | |
408 | mtspr SPRN_SPRG1, r4 | |
409 | ||
410 | /* Can't switch the stack pointer until after IVPR is switched, | |
411 | * because host interrupt handlers would get confused. */ | |
412 | lwz r1, VCPU_GPR(r1)(r4) | |
413 | ||
414 | /* XXX handle USPRG0 */ | |
415 | /* Host interrupt handlers may have clobbered these guest-readable | |
416 | * SPRGs, so we need to reload them here with the guest's values. */ | |
417 | lwz r3, VCPU_SPRG4(r4) | |
418 | mtspr SPRN_SPRG4, r3 | |
419 | lwz r3, VCPU_SPRG5(r4) | |
420 | mtspr SPRN_SPRG5, r3 | |
421 | lwz r3, VCPU_SPRG6(r4) | |
422 | mtspr SPRN_SPRG6, r3 | |
423 | lwz r3, VCPU_SPRG7(r4) | |
424 | mtspr SPRN_SPRG7, r3 | |
425 | ||
426 | /* Finish loading guest volatiles and jump to guest. */ | |
427 | lwz r3, VCPU_CTR(r4) | |
428 | mtctr r3 | |
429 | lwz r3, VCPU_CR(r4) | |
430 | mtcr r3 | |
431 | lwz r5, VCPU_GPR(r5)(r4) | |
432 | lwz r6, VCPU_GPR(r6)(r4) | |
433 | lwz r7, VCPU_GPR(r7)(r4) | |
434 | lwz r8, VCPU_GPR(r8)(r4) | |
435 | lwz r3, VCPU_PC(r4) | |
436 | mtsrr0 r3 | |
437 | lwz r3, VCPU_MSR(r4) | |
438 | oris r3, r3, KVMPPC_MSR_MASK@h | |
439 | ori r3, r3, KVMPPC_MSR_MASK@l | |
440 | mtsrr1 r3 | |
6a0ab738 HB |
441 | |
442 | /* Clear any debug events which occurred since we disabled MSR[DE]. | |
443 | * XXX This gives us a 3-instruction window in which a breakpoint | |
444 | * intended for guest context could fire in the host instead. */ | |
445 | lis r3, 0xffff | |
446 | ori r3, r3, 0xffff | |
447 | mtspr SPRN_DBSR, r3 | |
448 | ||
bbf45ba5 HB |
449 | lwz r3, VCPU_GPR(r3)(r4) |
450 | lwz r4, VCPU_GPR(r4)(r4) | |
451 | rfi |