Commit | Line | Data |
---|---|---|
07372794 AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2010 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | /* Real mode helpers */ | |
21 | ||
22 | #if defined(CONFIG_PPC_BOOK3S_64) | |
23 | ||
24 | #define GET_SHADOW_VCPU(reg) \ | |
3c42bf8a | 25 | mr reg, r13 |
02143947 | 26 | #define MTMSR_EERI(reg) mtmsrd (reg),1 |
07372794 AG |
27 | |
28 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
29 | ||
30 | #define GET_SHADOW_VCPU(reg) \ | |
31 | tophys(reg, r2); \ | |
32 | lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ | |
33 | tophys(reg, reg) | |
02143947 | 34 | #define MTMSR_EERI(reg) mtmsr (reg) |
07372794 AG |
35 | |
36 | #endif | |
37 | ||
38 | /* Disable for nested KVM */ | |
39 | #define USE_QUICK_LAST_INST | |
40 | ||
41 | ||
42 | /* Get helper functions for subarch specific functionality */ | |
43 | ||
44 | #if defined(CONFIG_PPC_BOOK3S_64) | |
45 | #include "book3s_64_slb.S" | |
46 | #elif defined(CONFIG_PPC_BOOK3S_32) | |
47 | #include "book3s_32_sr.S" | |
48 | #endif | |
49 | ||
50 | /****************************************************************************** | |
51 | * * | |
52 | * Entry code * | |
53 | * * | |
54 | *****************************************************************************/ | |
55 | ||
56 | .global kvmppc_handler_trampoline_enter | |
57 | kvmppc_handler_trampoline_enter: | |
58 | ||
59 | /* Required state: | |
60 | * | |
61 | * MSR = ~IR|DR | |
07372794 AG |
62 | * R1 = host R1 |
63 | * R2 = host R2 | |
02143947 PM |
64 | * R4 = guest shadow MSR |
65 | * R5 = normal host MSR | |
66 | * R6 = current host MSR (EE, IR, DR off) | |
67 | * LR = highmem guest exit code | |
07372794 AG |
68 | * all other volatile GPRS = free |
69 | * SVCPU[CR] = guest CR | |
70 | * SVCPU[XER] = guest XER | |
71 | * SVCPU[CTR] = guest CTR | |
72 | * SVCPU[LR] = guest LR | |
73 | */ | |
74 | ||
75 | /* r3 = shadow vcpu */ | |
76 | GET_SHADOW_VCPU(r3) | |
77 | ||
02143947 PM |
78 | /* Save guest exit handler address and MSR */ |
79 | mflr r0 | |
80 | PPC_STL r0, HSTATE_VMHANDLER(r3) | |
81 | PPC_STL r5, HSTATE_HOST_MSR(r3) | |
82 | ||
3c42bf8a PM |
83 | /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ |
84 | PPC_STL r1, HSTATE_HOST_R1(r3) | |
85 | PPC_STL r2, HSTATE_HOST_R2(r3) | |
86 | ||
07372794 AG |
87 | /* Activate guest mode, so faults get handled by KVM */ |
88 | li r11, KVM_GUEST_MODE_GUEST | |
3c42bf8a | 89 | stb r11, HSTATE_IN_GUEST(r3) |
07372794 AG |
90 | |
91 | /* Switch to guest segment. This is subarch specific. */ | |
92 | LOAD_GUEST_SEGMENTS | |
93 | ||
02143947 PM |
94 | #ifdef CONFIG_PPC_BOOK3S_64 |
95 | /* Some guests may need to have dcbz set to 32 byte length. | |
96 | * | |
97 | * Usually we ensure that by patching the guest's instructions | |
98 | * to trap on dcbz and emulate it in the hypervisor. | |
99 | * | |
100 | * If we can, we should tell the CPU to use 32 byte dcbz though, | |
101 | * because that's a lot faster. | |
102 | */ | |
103 | lbz r0, HSTATE_RESTORE_HID5(r3) | |
104 | cmpwi r0, 0 | |
105 | beq no_dcbz32_on | |
106 | ||
107 | mfspr r0,SPRN_HID5 | |
108 | ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | |
109 | mtspr SPRN_HID5,r0 | |
110 | no_dcbz32_on: | |
111 | ||
112 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
113 | ||
07372794 AG |
114 | /* Enter guest */ |
115 | ||
02143947 PM |
116 | PPC_LL r8, SVCPU_CTR(r3) |
117 | PPC_LL r9, SVCPU_LR(r3) | |
118 | lwz r10, SVCPU_CR(r3) | |
119 | lwz r11, SVCPU_XER(r3) | |
120 | ||
121 | mtctr r8 | |
122 | mtlr r9 | |
123 | mtcr r10 | |
124 | mtxer r11 | |
07372794 | 125 | |
02143947 PM |
126 | /* Move SRR0 and SRR1 into the respective regs */ |
127 | PPC_LL r9, SVCPU_PC(r3) | |
128 | /* First clear RI in our current MSR value */ | |
129 | li r0, MSR_RI | |
130 | andc r6, r6, r0 | |
131 | MTMSR_EERI(r6) | |
132 | mtsrr0 r9 | |
133 | mtsrr1 r4 | |
07372794 | 134 | |
de56a948 PM |
135 | PPC_LL r0, SVCPU_R0(r3) |
136 | PPC_LL r1, SVCPU_R1(r3) | |
137 | PPC_LL r2, SVCPU_R2(r3) | |
138 | PPC_LL r4, SVCPU_R4(r3) | |
139 | PPC_LL r5, SVCPU_R5(r3) | |
140 | PPC_LL r6, SVCPU_R6(r3) | |
141 | PPC_LL r7, SVCPU_R7(r3) | |
142 | PPC_LL r8, SVCPU_R8(r3) | |
143 | PPC_LL r9, SVCPU_R9(r3) | |
144 | PPC_LL r10, SVCPU_R10(r3) | |
145 | PPC_LL r11, SVCPU_R11(r3) | |
146 | PPC_LL r12, SVCPU_R12(r3) | |
147 | PPC_LL r13, SVCPU_R13(r3) | |
07372794 AG |
148 | |
149 | PPC_LL r3, (SVCPU_R3)(r3) | |
150 | ||
151 | RFI | |
152 | kvmppc_handler_trampoline_enter_end: | |
153 | ||
154 | ||
155 | ||
156 | /****************************************************************************** | |
157 | * * | |
158 | * Exit code * | |
159 | * * | |
160 | *****************************************************************************/ | |
161 | ||
162 | .global kvmppc_handler_trampoline_exit | |
163 | kvmppc_handler_trampoline_exit: | |
164 | ||
b01c8b54 PM |
165 | .global kvmppc_interrupt |
166 | kvmppc_interrupt: | |
167 | ||
07372794 AG |
168 | /* Register usage at this point: |
169 | * | |
170 | * SPRG_SCRATCH0 = guest R13 | |
171 | * R12 = exit handler id | |
3c42bf8a PM |
172 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) |
173 | * HSTATE.SCRATCH0 = guest R12 | |
174 | * HSTATE.SCRATCH1 = guest CR | |
07372794 AG |
175 | * |
176 | */ | |
177 | ||
178 | /* Save registers */ | |
179 | ||
3c42bf8a PM |
180 | PPC_STL r0, SVCPU_R0(r13) |
181 | PPC_STL r1, SVCPU_R1(r13) | |
182 | PPC_STL r2, SVCPU_R2(r13) | |
183 | PPC_STL r3, SVCPU_R3(r13) | |
184 | PPC_STL r4, SVCPU_R4(r13) | |
185 | PPC_STL r5, SVCPU_R5(r13) | |
186 | PPC_STL r6, SVCPU_R6(r13) | |
187 | PPC_STL r7, SVCPU_R7(r13) | |
188 | PPC_STL r8, SVCPU_R8(r13) | |
189 | PPC_STL r9, SVCPU_R9(r13) | |
190 | PPC_STL r10, SVCPU_R10(r13) | |
191 | PPC_STL r11, SVCPU_R11(r13) | |
07372794 AG |
192 | |
193 | /* Restore R1/R2 so we can handle faults */ | |
3c42bf8a PM |
194 | PPC_LL r1, HSTATE_HOST_R1(r13) |
195 | PPC_LL r2, HSTATE_HOST_R2(r13) | |
07372794 AG |
196 | |
197 | /* Save guest PC and MSR */ | |
b01c8b54 PM |
198 | #ifdef CONFIG_PPC64 |
199 | BEGIN_FTR_SECTION | |
56e13dba | 200 | mr r10, r12 |
a5d4f3ad BH |
201 | andi. r0,r12,0x2 |
202 | beq 1f | |
203 | mfspr r3,SPRN_HSRR0 | |
204 | mfspr r4,SPRN_HSRR1 | |
205 | andi. r12,r12,0x3ffd | |
206 | b 2f | |
969391c5 | 207 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
b01c8b54 | 208 | #endif |
a5d4f3ad | 209 | 1: mfsrr0 r3 |
07372794 | 210 | mfsrr1 r4 |
a5d4f3ad | 211 | 2: |
3c42bf8a PM |
212 | PPC_STL r3, SVCPU_PC(r13) |
213 | PPC_STL r4, SVCPU_SHADOW_SRR1(r13) | |
07372794 AG |
214 | |
215 | /* Get scratch'ed off registers */ | |
673b189a | 216 | GET_SCRATCH0(r9) |
3c42bf8a PM |
217 | PPC_LL r8, HSTATE_SCRATCH0(r13) |
218 | lwz r7, HSTATE_SCRATCH1(r13) | |
07372794 | 219 | |
3c42bf8a PM |
220 | PPC_STL r9, SVCPU_R13(r13) |
221 | PPC_STL r8, SVCPU_R12(r13) | |
222 | stw r7, SVCPU_CR(r13) | |
07372794 AG |
223 | |
224 | /* Save more register state */ | |
225 | ||
226 | mfxer r5 | |
227 | mfdar r6 | |
228 | mfdsisr r7 | |
229 | mfctr r8 | |
230 | mflr r9 | |
231 | ||
3c42bf8a PM |
232 | stw r5, SVCPU_XER(r13) |
233 | PPC_STL r6, SVCPU_FAULT_DAR(r13) | |
234 | stw r7, SVCPU_FAULT_DSISR(r13) | |
235 | PPC_STL r8, SVCPU_CTR(r13) | |
236 | PPC_STL r9, SVCPU_LR(r13) | |
07372794 AG |
237 | |
238 | /* | |
239 | * In order for us to easily get the last instruction, | |
240 | * we got the #vmexit at, we exploit the fact that the | |
241 | * virtual layout is still the same here, so we can just | |
242 | * ld from the guest's PC address | |
243 | */ | |
244 | ||
245 | /* We only load the last instruction when it's safe */ | |
246 | cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE | |
247 | beq ld_last_inst | |
248 | cmpwi r12, BOOK3S_INTERRUPT_PROGRAM | |
249 | beq ld_last_inst | |
77e675ad AG |
250 | cmpwi r12, BOOK3S_INTERRUPT_SYSCALL |
251 | beq ld_last_prev_inst | |
6fc55825 AG |
252 | cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT |
253 | beq- ld_last_inst | |
7ef4e985 AG |
254 | #ifdef CONFIG_PPC64 |
255 | BEGIN_FTR_SECTION | |
256 | cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST | |
257 | beq- ld_last_inst | |
258 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
259 | #endif | |
07372794 AG |
260 | |
261 | b no_ld_last_inst | |
262 | ||
77e675ad AG |
263 | ld_last_prev_inst: |
264 | addi r3, r3, -4 | |
265 | ||
07372794 AG |
266 | ld_last_inst: |
267 | /* Save off the guest instruction we're at */ | |
268 | ||
269 | /* In case lwz faults */ | |
270 | li r0, KVM_INST_FETCH_FAILED | |
271 | ||
272 | #ifdef USE_QUICK_LAST_INST | |
273 | ||
274 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
275 | * we'll just continue at the next IP. */ | |
276 | li r9, KVM_GUEST_MODE_SKIP | |
3c42bf8a | 277 | stb r9, HSTATE_IN_GUEST(r13) |
07372794 AG |
278 | |
279 | /* 1) enable paging for data */ | |
280 | mfmsr r9 | |
281 | ori r11, r9, MSR_DR /* Enable paging for data */ | |
282 | mtmsr r11 | |
283 | sync | |
284 | /* 2) fetch the instruction */ | |
285 | lwz r0, 0(r3) | |
286 | /* 3) disable paging again */ | |
287 | mtmsr r9 | |
288 | sync | |
289 | ||
290 | #endif | |
3c42bf8a | 291 | stw r0, SVCPU_LAST_INST(r13) |
07372794 AG |
292 | |
293 | no_ld_last_inst: | |
294 | ||
295 | /* Unset guest mode */ | |
296 | li r9, KVM_GUEST_MODE_NONE | |
3c42bf8a | 297 | stb r9, HSTATE_IN_GUEST(r13) |
07372794 AG |
298 | |
299 | /* Switch back to host MMU */ | |
300 | LOAD_HOST_SEGMENTS | |
301 | ||
02143947 PM |
302 | #ifdef CONFIG_PPC_BOOK3S_64 |
303 | ||
304 | lbz r5, HSTATE_RESTORE_HID5(r13) | |
305 | cmpwi r5, 0 | |
306 | beq no_dcbz32_off | |
307 | ||
308 | li r4, 0 | |
309 | mfspr r5,SPRN_HID5 | |
310 | rldimi r5,r4,6,56 | |
311 | mtspr SPRN_HID5,r5 | |
312 | ||
313 | no_dcbz32_off: | |
314 | ||
315 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
316 | ||
317 | /* | |
318 | * For some interrupts, we need to call the real Linux | |
319 | * handler, so it can do work for us. This has to happen | |
320 | * as if the interrupt arrived from the kernel though, | |
321 | * so let's fake it here where most state is restored. | |
322 | * | |
323 | * Having set up SRR0/1 with the address where we want | |
324 | * to continue with relocation on (potentially in module | |
325 | * space), we either just go straight there with rfi[d], | |
56e13dba AG |
326 | * or we jump to an interrupt handler if there is an |
327 | * interrupt to be handled first. In the latter case, | |
328 | * the rfi[d] at the end of the interrupt handler will | |
329 | * get us back to where we want to continue. | |
02143947 PM |
330 | */ |
331 | ||
07372794 AG |
332 | /* Register usage at this point: |
333 | * | |
334 | * R1 = host R1 | |
335 | * R2 = host R2 | |
56e13dba | 336 | * R10 = raw exit handler id |
07372794 | 337 | * R12 = exit handler id |
3c42bf8a | 338 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) |
07372794 AG |
339 | * SVCPU.* = guest * |
340 | * | |
341 | */ | |
342 | ||
02143947 | 343 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
3c42bf8a | 344 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
02143947 | 345 | |
56e13dba AG |
346 | #ifdef CONFIG_PPC64 |
347 | BEGIN_FTR_SECTION | |
348 | andi. r0,r10,0x2 | |
349 | beq 1f | |
350 | mtspr SPRN_HSRR1, r6 | |
351 | mtspr SPRN_HSRR0, r8 | |
352 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |
353 | #endif | |
354 | 1: /* Restore host msr -> SRR1 */ | |
02143947 PM |
355 | mtsrr1 r6 |
356 | /* Load highmem handler address */ | |
07372794 AG |
357 | mtsrr0 r8 |
358 | ||
02143947 | 359 | /* RFI into the highmem handler, or jump to interrupt handler */ |
56e13dba AG |
360 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
361 | beqa BOOK3S_INTERRUPT_EXTERNAL | |
362 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | |
363 | beqa BOOK3S_INTERRUPT_DECREMENTER | |
364 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | |
365 | beqa BOOK3S_INTERRUPT_PERFMON | |
366 | ||
07372794 AG |
367 | RFI |
368 | kvmppc_handler_trampoline_exit_end: |