Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
12 | * | |
13 | * Derived from book3s_rmhandlers.S and other files, which are: | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
177339d7 | 23 | #include <asm/mmu.h> |
de56a948 | 24 | #include <asm/page.h> |
177339d7 PM |
25 | #include <asm/ptrace.h> |
26 | #include <asm/hvcall.h> | |
de56a948 PM |
27 | #include <asm/asm-offsets.h> |
28 | #include <asm/exception-64s.h> | |
f0888f70 | 29 | #include <asm/kvm_book3s_asm.h> |
f64e8084 | 30 | #include <asm/book3s/64/mmu-hash.h> |
e4e38121 | 31 | #include <asm/tm.h> |
fd7bacbc | 32 | #include <asm/opal.h> |
e4e38121 MN |
33 | |
34 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | |
de56a948 | 35 | |
e0b7ec05 PM |
36 | /* Values in HSTATE_NAPPING(r13) */ |
37 | #define NAPPING_CEDE 1 | |
38 | #define NAPPING_NOVCPU 2 | |
39 | ||
de56a948 | 40 | /* |
19ccb76a | 41 | * Call kvmppc_hv_entry in real mode. |
de56a948 PM |
42 | * Must be called with interrupts hard-disabled. |
43 | * | |
44 | * Input Registers: | |
45 | * | |
46 | * LR = return address to continue at after eventually re-enabling MMU | |
47 | */ | |
6ed179b6 | 48 | _GLOBAL_TOC(kvmppc_hv_entry_trampoline) |
218309b7 PM |
49 | mflr r0 |
50 | std r0, PPC_LR_STKOFF(r1) | |
51 | stdu r1, -112(r1) | |
de56a948 | 52 | mfmsr r10 |
218309b7 | 53 | LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) |
de56a948 PM |
54 | li r0,MSR_RI |
55 | andc r0,r10,r0 | |
56 | li r6,MSR_IR | MSR_DR | |
57 | andc r6,r10,r6 | |
58 | mtmsrd r0,1 /* clear RI in MSR */ | |
59 | mtsrr0 r5 | |
60 | mtsrr1 r6 | |
61 | RFI | |
62 | ||
218309b7 | 63 | kvmppc_call_hv_entry: |
e0b7ec05 | 64 | ld r4, HSTATE_KVM_VCPU(r13) |
218309b7 PM |
65 | bl kvmppc_hv_entry |
66 | ||
67 | /* Back from guest - restore host state and return to caller */ | |
68 | ||
eee7ff9d | 69 | BEGIN_FTR_SECTION |
218309b7 PM |
70 | /* Restore host DABR and DABRX */ |
71 | ld r5,HSTATE_DABR(r13) | |
72 | li r6,7 | |
73 | mtspr SPRN_DABR,r5 | |
74 | mtspr SPRN_DABRX,r6 | |
eee7ff9d | 75 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
218309b7 PM |
76 | |
77 | /* Restore SPRG3 */ | |
9d378dfa SW |
78 | ld r3,PACA_SPRG_VDSO(r13) |
79 | mtspr SPRN_SPRG_VDSO_WRITE,r3 | |
218309b7 | 80 | |
218309b7 PM |
81 | /* Reload the host's PMU registers */ |
82 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | |
83 | lbz r4, LPPACA_PMCINUSE(r3) | |
84 | cmpwi r4, 0 | |
85 | beq 23f /* skip if not */ | |
9bc01a9b | 86 | BEGIN_FTR_SECTION |
9a4fc4ea | 87 | ld r3, HSTATE_MMCR0(r13) |
9bc01a9b PM |
88 | andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO |
89 | cmpwi r4, MMCR0_PMAO | |
90 | beql kvmppc_fix_pmao | |
91 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | |
9a4fc4ea ME |
92 | lwz r3, HSTATE_PMC1(r13) |
93 | lwz r4, HSTATE_PMC2(r13) | |
94 | lwz r5, HSTATE_PMC3(r13) | |
95 | lwz r6, HSTATE_PMC4(r13) | |
96 | lwz r8, HSTATE_PMC5(r13) | |
97 | lwz r9, HSTATE_PMC6(r13) | |
218309b7 PM |
98 | mtspr SPRN_PMC1, r3 |
99 | mtspr SPRN_PMC2, r4 | |
100 | mtspr SPRN_PMC3, r5 | |
101 | mtspr SPRN_PMC4, r6 | |
102 | mtspr SPRN_PMC5, r8 | |
103 | mtspr SPRN_PMC6, r9 | |
9a4fc4ea ME |
104 | ld r3, HSTATE_MMCR0(r13) |
105 | ld r4, HSTATE_MMCR1(r13) | |
106 | ld r5, HSTATE_MMCRA(r13) | |
107 | ld r6, HSTATE_SIAR(r13) | |
108 | ld r7, HSTATE_SDAR(r13) | |
218309b7 PM |
109 | mtspr SPRN_MMCR1, r4 |
110 | mtspr SPRN_MMCRA, r5 | |
72cde5a8 PM |
111 | mtspr SPRN_SIAR, r6 |
112 | mtspr SPRN_SDAR, r7 | |
113 | BEGIN_FTR_SECTION | |
9a4fc4ea ME |
114 | ld r8, HSTATE_MMCR2(r13) |
115 | ld r9, HSTATE_SIER(r13) | |
72cde5a8 PM |
116 | mtspr SPRN_MMCR2, r8 |
117 | mtspr SPRN_SIER, r9 | |
118 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
218309b7 PM |
119 | mtspr SPRN_MMCR0, r3 |
120 | isync | |
121 | 23: | |
122 | ||
e0b7ec05 PM |
123 | /* |
124 | * Reload DEC. HDEC interrupts were disabled when | |
125 | * we reloaded the host's LPCR value. | |
126 | */ | |
127 | ld r3, HSTATE_DECEXP(r13) | |
128 | mftb r4 | |
129 | subf r4, r4, r3 | |
130 | mtspr SPRN_DEC, r4 | |
131 | ||
b4deba5c PM |
132 | /* hwthread_req may have got set by cede or no vcpu, so clear it */ |
133 | li r0, 0 | |
134 | stb r0, HSTATE_HWTHREAD_REQ(r13) | |
135 | ||
218309b7 PM |
136 | /* |
137 | * For external and machine check interrupts, we need | |
138 | * to call the Linux handler to process the interrupt. | |
139 | * We do that by jumping to absolute address 0x500 for | |
140 | * external interrupts, or the machine_check_fwnmi label | |
141 | * for machine checks (since firmware might have patched | |
142 | * the vector area at 0x200). The [h]rfid at the end of the | |
143 | * handler will return to the book3s_hv_interrupts.S code. | |
144 | * For other interrupts we do the rfid to get back | |
145 | * to the book3s_hv_interrupts.S code here. | |
146 | */ | |
147 | ld r8, 112+PPC_LR_STKOFF(r1) | |
148 | addi r1, r1, 112 | |
149 | ld r7, HSTATE_HOST_MSR(r13) | |
150 | ||
151 | cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
152 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
218309b7 | 153 | beq 11f |
70aa3961 GS |
154 | cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL |
155 | beq 15f /* Invoke the H_DOORBELL handler */ | |
0869b6fd MS |
156 | cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI |
157 | beq cr2, 14f /* HMI check */ | |
218309b7 PM |
158 | |
159 | /* RFI into the highmem handler, or branch to interrupt handler */ | |
160 | mfmsr r6 | |
161 | li r0, MSR_RI | |
162 | andc r6, r6, r0 | |
163 | mtmsrd r6, 1 /* Clear RI in MSR */ | |
164 | mtsrr0 r8 | |
165 | mtsrr1 r7 | |
218309b7 PM |
166 | beq cr1, 13f /* machine check */ |
167 | RFI | |
168 | ||
169 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ | |
170 | 11: mtspr SPRN_HSRR0, r8 | |
171 | mtspr SPRN_HSRR1, r7 | |
172 | ba 0x500 | |
173 | ||
174 | 13: b machine_check_fwnmi | |
175 | ||
0869b6fd MS |
176 | 14: mtspr SPRN_HSRR0, r8 |
177 | mtspr SPRN_HSRR1, r7 | |
178 | b hmi_exception_after_realmode | |
179 | ||
70aa3961 GS |
180 | 15: mtspr SPRN_HSRR0, r8 |
181 | mtspr SPRN_HSRR1, r7 | |
182 | ba 0xe80 | |
183 | ||
e0b7ec05 PM |
184 | kvmppc_primary_no_guest: |
185 | /* We handle this much like a ceded vcpu */ | |
fd6d53b1 PM |
186 | /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ |
187 | mfspr r3, SPRN_HDEC | |
188 | mtspr SPRN_DEC, r3 | |
6af27c84 PM |
189 | /* |
190 | * Make sure the primary has finished the MMU switch. | |
191 | * We should never get here on a secondary thread, but | |
192 | * check it for robustness' sake. | |
193 | */ | |
194 | ld r5, HSTATE_KVM_VCORE(r13) | |
195 | 65: lbz r0, VCORE_IN_GUEST(r5) | |
196 | cmpwi r0, 0 | |
197 | beq 65b | |
198 | /* Set LPCR. */ | |
199 | ld r8,VCORE_LPCR(r5) | |
200 | mtspr SPRN_LPCR,r8 | |
201 | isync | |
e0b7ec05 PM |
202 | /* set our bit in napping_threads */ |
203 | ld r5, HSTATE_KVM_VCORE(r13) | |
204 | lbz r7, HSTATE_PTID(r13) | |
205 | li r0, 1 | |
206 | sld r0, r0, r7 | |
207 | addi r6, r5, VCORE_NAPPING_THREADS | |
208 | 1: lwarx r3, 0, r6 | |
209 | or r3, r3, r0 | |
210 | stwcx. r3, 0, r6 | |
211 | bne 1b | |
7d6c40da | 212 | /* order napping_threads update vs testing entry_exit_map */ |
e0b7ec05 PM |
213 | isync |
214 | li r12, 0 | |
215 | lwz r7, VCORE_ENTRY_EXIT(r5) | |
216 | cmpwi r7, 0x100 | |
217 | bge kvm_novcpu_exit /* another thread already exiting */ | |
218 | li r3, NAPPING_NOVCPU | |
219 | stb r3, HSTATE_NAPPING(r13) | |
e0b7ec05 | 220 | |
ccc07772 | 221 | li r3, 0 /* Don't wake on privileged (OS) doorbell */ |
e0b7ec05 PM |
222 | b kvm_do_nap |
223 | ||
37f55d30 SW |
224 | /* |
225 | * kvm_novcpu_wakeup | |
226 | * Entered from kvm_start_guest if kvm_hstate.napping is set | |
227 | * to NAPPING_NOVCPU | |
228 | * r2 = kernel TOC | |
229 | * r13 = paca | |
230 | */ | |
e0b7ec05 PM |
231 | kvm_novcpu_wakeup: |
232 | ld r1, HSTATE_HOST_R1(r13) | |
233 | ld r5, HSTATE_KVM_VCORE(r13) | |
234 | li r0, 0 | |
235 | stb r0, HSTATE_NAPPING(r13) | |
e0b7ec05 | 236 | |
e3bbbbfa PM |
237 | /* check the wake reason */ |
238 | bl kvmppc_check_wake_reason | |
6af27c84 | 239 | |
37f55d30 SW |
240 | /* |
241 | * Restore volatile registers since we could have called | |
242 | * a C routine in kvmppc_check_wake_reason. | |
243 | * r5 = VCORE | |
244 | */ | |
245 | ld r5, HSTATE_KVM_VCORE(r13) | |
246 | ||
e0b7ec05 | 247 | /* see if any other thread is already exiting */ |
e0b7ec05 PM |
248 | lwz r0, VCORE_ENTRY_EXIT(r5) |
249 | cmpwi r0, 0x100 | |
250 | bge kvm_novcpu_exit | |
251 | ||
252 | /* clear our bit in napping_threads */ | |
253 | lbz r7, HSTATE_PTID(r13) | |
254 | li r0, 1 | |
255 | sld r0, r0, r7 | |
256 | addi r6, r5, VCORE_NAPPING_THREADS | |
e3bbbbfa PM |
257 | 4: lwarx r7, 0, r6 |
258 | andc r7, r7, r0 | |
259 | stwcx. r7, 0, r6 | |
e0b7ec05 PM |
260 | bne 4b |
261 | ||
e3bbbbfa | 262 | /* See if the wake reason means we need to exit */ |
e0b7ec05 PM |
263 | cmpdi r3, 0 |
264 | bge kvm_novcpu_exit | |
e0b7ec05 | 265 | |
fd6d53b1 PM |
266 | /* See if our timeslice has expired (HDEC is negative) */ |
267 | mfspr r0, SPRN_HDEC | |
268 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER | |
269 | cmpwi r0, 0 | |
270 | blt kvm_novcpu_exit | |
271 | ||
e0b7ec05 PM |
272 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ |
273 | ld r4, HSTATE_KVM_VCPU(r13) | |
274 | cmpdi r4, 0 | |
b6c295df PM |
275 | beq kvmppc_primary_no_guest |
276 | ||
277 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
278 | addi r3, r4, VCPU_TB_RMENTRY | |
279 | bl kvmhv_start_timing | |
280 | #endif | |
281 | b kvmppc_got_guest | |
e0b7ec05 PM |
282 | |
283 | kvm_novcpu_exit: | |
6af27c84 PM |
284 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
285 | ld r4, HSTATE_KVM_VCPU(r13) | |
286 | cmpdi r4, 0 | |
287 | beq 13f | |
288 | addi r3, r4, VCPU_TB_RMEXIT | |
289 | bl kvmhv_accumulate_time | |
290 | #endif | |
eddb60fb PM |
291 | 13: mr r3, r12 |
292 | stw r12, 112-4(r1) | |
293 | bl kvmhv_commence_exit | |
294 | nop | |
295 | lwz r12, 112-4(r1) | |
6af27c84 | 296 | b kvmhv_switch_to_host |
e0b7ec05 | 297 | |
371fefd6 | 298 | /* |
e0b7ec05 | 299 | * We come in here when wakened from nap mode. |
371fefd6 PM |
300 | * Relocation is off and most register values are lost. |
301 | * r13 points to the PACA. | |
302 | */ | |
303 | .globl kvm_start_guest | |
304 | kvm_start_guest: | |
fd17dc7b PM |
305 | |
306 | /* Set runlatch bit the minute you wake up from nap */ | |
1f09c3ed PM |
307 | mfspr r0, SPRN_CTRLF |
308 | ori r0, r0, 1 | |
309 | mtspr SPRN_CTRLT, r0 | |
fd17dc7b | 310 | |
19ccb76a PM |
311 | ld r2,PACATOC(r13) |
312 | ||
f0888f70 PM |
313 | li r0,KVM_HWTHREAD_IN_KVM |
314 | stb r0,HSTATE_HWTHREAD_STATE(r13) | |
371fefd6 | 315 | |
f0888f70 PM |
316 | /* NV GPR values from power7_idle() will no longer be valid */ |
317 | li r0,1 | |
318 | stb r0,PACA_NAPSTATELOST(r13) | |
371fefd6 | 319 | |
4619ac88 PM |
320 | /* were we napping due to cede? */ |
321 | lbz r0,HSTATE_NAPPING(r13) | |
e0b7ec05 PM |
322 | cmpwi r0,NAPPING_CEDE |
323 | beq kvm_end_cede | |
324 | cmpwi r0,NAPPING_NOVCPU | |
325 | beq kvm_novcpu_wakeup | |
326 | ||
327 | ld r1,PACAEMERGSP(r13) | |
328 | subi r1,r1,STACK_FRAME_OVERHEAD | |
4619ac88 PM |
329 | |
330 | /* | |
331 | * We weren't napping due to cede, so this must be a secondary | |
332 | * thread being woken up to run a guest, or being woken up due | |
333 | * to a stray IPI. (Or due to some machine check or hypervisor | |
334 | * maintenance interrupt while the core is in KVM.) | |
335 | */ | |
f0888f70 PM |
336 | |
337 | /* Check the wake reason in SRR1 to see why we got here */ | |
e3bbbbfa | 338 | bl kvmppc_check_wake_reason |
37f55d30 SW |
339 | /* |
340 | * kvmppc_check_wake_reason could invoke a C routine, but we | |
341 | * have no volatile registers to restore when we return. | |
342 | */ | |
343 | ||
e3bbbbfa PM |
344 | cmpdi r3, 0 |
345 | bge kvm_no_guest | |
371fefd6 | 346 | |
b4deba5c PM |
347 | /* get vcore pointer, NULL if we have nothing to run */ |
348 | ld r5,HSTATE_KVM_VCORE(r13) | |
349 | cmpdi r5,0 | |
350 | /* if we have no vcore to run, go back to sleep */ | |
7b444c67 | 351 | beq kvm_no_guest |
f0888f70 | 352 | |
56548fc0 PM |
353 | kvm_secondary_got_guest: |
354 | ||
e0b7ec05 | 355 | /* Set HSTATE_DSCR(r13) to something sensible */ |
1db36525 | 356 | ld r6, PACA_DSCR_DEFAULT(r13) |
e0b7ec05 | 357 | std r6, HSTATE_DSCR(r13) |
2fde6d20 | 358 | |
b4deba5c PM |
359 | /* On thread 0 of a subcore, set HDEC to max */ |
360 | lbz r4, HSTATE_PTID(r13) | |
361 | cmpwi r4, 0 | |
362 | bne 63f | |
363 | lis r6, 0x7fff | |
364 | ori r6, r6, 0xffff | |
365 | mtspr SPRN_HDEC, r6 | |
366 | /* and set per-LPAR registers, if doing dynamic micro-threading */ | |
367 | ld r6, HSTATE_SPLIT_MODE(r13) | |
368 | cmpdi r6, 0 | |
369 | beq 63f | |
370 | ld r0, KVM_SPLIT_RPR(r6) | |
371 | mtspr SPRN_RPR, r0 | |
372 | ld r0, KVM_SPLIT_PMMAR(r6) | |
373 | mtspr SPRN_PMMAR, r0 | |
374 | ld r0, KVM_SPLIT_LDBAR(r6) | |
375 | mtspr SPRN_LDBAR, r0 | |
376 | isync | |
377 | 63: | |
378 | /* Order load of vcpu after load of vcore */ | |
5d5b99cd | 379 | lwsync |
b4deba5c | 380 | ld r4, HSTATE_KVM_VCPU(r13) |
e0b7ec05 | 381 | bl kvmppc_hv_entry |
218309b7 PM |
382 | |
383 | /* Back from the guest, go back to nap */ | |
b4deba5c | 384 | /* Clear our vcpu and vcore pointers so we don't come back in early */ |
218309b7 | 385 | li r0, 0 |
b4deba5c | 386 | std r0, HSTATE_KVM_VCPU(r13) |
f019b7ad | 387 | /* |
b4deba5c | 388 | * Once we clear HSTATE_KVM_VCORE(r13), the code in |
5d5b99cd PM |
389 | * kvmppc_run_core() is going to assume that all our vcpu |
390 | * state is visible in memory. This lwsync makes sure | |
391 | * that that is true. | |
f019b7ad | 392 | */ |
218309b7 | 393 | lwsync |
b4deba5c | 394 | std r0, HSTATE_KVM_VCORE(r13) |
218309b7 | 395 | |
fd7bacbc MS |
396 | /* |
397 | * All secondaries exiting guest will fall through this path. | |
398 | * Before proceeding, just check for HMI interrupt and | |
399 | * invoke opal hmi handler. By now we are sure that the | |
400 | * primary thread on this core/subcore has already made partition | |
401 | * switch/TB resync and we are good to call opal hmi handler. | |
402 | */ | |
403 | cmpwi r12, BOOK3S_INTERRUPT_HMI | |
404 | bne kvm_no_guest | |
405 | ||
406 | li r3,0 /* NULL argument */ | |
407 | bl hmi_exception_realmode | |
56548fc0 PM |
408 | /* |
409 | * At this point we have finished executing in the guest. | |
410 | * We need to wait for hwthread_req to become zero, since | |
411 | * we may not turn on the MMU while hwthread_req is non-zero. | |
412 | * While waiting we also need to check if we get given a vcpu to run. | |
413 | */ | |
218309b7 | 414 | kvm_no_guest: |
56548fc0 PM |
415 | lbz r3, HSTATE_HWTHREAD_REQ(r13) |
416 | cmpwi r3, 0 | |
417 | bne 53f | |
418 | HMT_MEDIUM | |
419 | li r0, KVM_HWTHREAD_IN_KERNEL | |
218309b7 | 420 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
56548fc0 PM |
421 | /* need to recheck hwthread_req after a barrier, to avoid race */ |
422 | sync | |
423 | lbz r3, HSTATE_HWTHREAD_REQ(r13) | |
424 | cmpwi r3, 0 | |
425 | bne 54f | |
426 | /* | |
5fa6b6bd | 427 | * We jump to pnv_wakeup_loss, which will return to the caller |
56548fc0 PM |
428 | * of power7_nap in the powernv cpu offline loop. The value we |
429 | * put in r3 becomes the return value for power7_nap. | |
430 | */ | |
218309b7 PM |
431 | li r3, LPCR_PECE0 |
432 | mfspr r4, SPRN_LPCR | |
433 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | |
434 | mtspr SPRN_LPCR, r4 | |
56548fc0 | 435 | li r3, 0 |
5fa6b6bd | 436 | b pnv_wakeup_loss |
56548fc0 PM |
437 | |
438 | 53: HMT_LOW | |
b4deba5c PM |
439 | ld r5, HSTATE_KVM_VCORE(r13) |
440 | cmpdi r5, 0 | |
441 | bne 60f | |
442 | ld r3, HSTATE_SPLIT_MODE(r13) | |
443 | cmpdi r3, 0 | |
444 | beq kvm_no_guest | |
445 | lbz r0, KVM_SPLIT_DO_NAP(r3) | |
446 | cmpwi r0, 0 | |
56548fc0 PM |
447 | beq kvm_no_guest |
448 | HMT_MEDIUM | |
b4deba5c PM |
449 | b kvm_unsplit_nap |
450 | 60: HMT_MEDIUM | |
56548fc0 PM |
451 | b kvm_secondary_got_guest |
452 | ||
453 | 54: li r0, KVM_HWTHREAD_IN_KVM | |
454 | stb r0, HSTATE_HWTHREAD_STATE(r13) | |
455 | b kvm_no_guest | |
218309b7 | 456 | |
b4deba5c PM |
457 | /* |
458 | * Here the primary thread is trying to return the core to | |
459 | * whole-core mode, so we need to nap. | |
460 | */ | |
461 | kvm_unsplit_nap: | |
fd7bacbc MS |
462 | /* |
463 | * When secondaries are napping in kvm_unsplit_nap() with | |
464 | * hwthread_req = 1, HMI goes ignored even though subcores are | |
465 | * already exited the guest. Hence HMI keeps waking up secondaries | |
466 | * from nap in a loop and secondaries always go back to nap since | |
467 | * no vcore is assigned to them. This makes impossible for primary | |
468 | * thread to get hold of secondary threads resulting into a soft | |
469 | * lockup in KVM path. | |
470 | * | |
471 | * Let us check if HMI is pending and handle it before we go to nap. | |
472 | */ | |
473 | cmpwi r12, BOOK3S_INTERRUPT_HMI | |
474 | bne 55f | |
475 | li r3, 0 /* NULL argument */ | |
476 | bl hmi_exception_realmode | |
477 | 55: | |
7f235328 GS |
478 | /* |
479 | * Ensure that secondary doesn't nap when it has | |
480 | * its vcore pointer set. | |
481 | */ | |
482 | sync /* matches smp_mb() before setting split_info.do_nap */ | |
483 | ld r0, HSTATE_KVM_VCORE(r13) | |
484 | cmpdi r0, 0 | |
485 | bne kvm_no_guest | |
b4deba5c PM |
486 | /* clear any pending message */ |
487 | BEGIN_FTR_SECTION | |
488 | lis r6, (PPC_DBELL_SERVER << (63-36))@h | |
489 | PPC_MSGCLR(6) | |
490 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
491 | /* Set kvm_split_mode.napped[tid] = 1 */ | |
492 | ld r3, HSTATE_SPLIT_MODE(r13) | |
493 | li r0, 1 | |
494 | lhz r4, PACAPACAINDEX(r13) | |
495 | clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */ | |
496 | addi r4, r4, KVM_SPLIT_NAPPED | |
497 | stbx r0, r3, r4 | |
498 | /* Check the do_nap flag again after setting napped[] */ | |
499 | sync | |
500 | lbz r0, KVM_SPLIT_DO_NAP(r3) | |
501 | cmpwi r0, 0 | |
502 | beq 57f | |
503 | li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 | |
bf53c88e PM |
504 | mfspr r5, SPRN_LPCR |
505 | rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) | |
506 | b kvm_nap_sequence | |
b4deba5c PM |
507 | |
508 | 57: li r0, 0 | |
509 | stbx r0, r3, r4 | |
510 | b kvm_no_guest | |
511 | ||
218309b7 PM |
512 | /****************************************************************************** |
513 | * * | |
514 | * Entry code * | |
515 | * * | |
516 | *****************************************************************************/ | |
517 | ||
e9cf1e08 PM |
518 | /* Stack frame offsets */ |
519 | #define STACK_SLOT_TID (112-16) | |
520 | #define STACK_SLOT_PSSCR (112-24) | |
521 | ||
de56a948 PM |
522 | .global kvmppc_hv_entry |
523 | kvmppc_hv_entry: | |
524 | ||
525 | /* Required state: | |
526 | * | |
e0b7ec05 | 527 | * R4 = vcpu pointer (or NULL) |
de56a948 PM |
528 | * MSR = ~IR|DR |
529 | * R13 = PACA | |
530 | * R1 = host R1 | |
06a29e42 | 531 | * R2 = TOC |
de56a948 PM |
532 | * all other volatile GPRS = free |
533 | */ | |
534 | mflr r0 | |
218309b7 PM |
535 | std r0, PPC_LR_STKOFF(r1) |
536 | stdu r1, -112(r1) | |
de56a948 | 537 | |
de56a948 PM |
538 | /* Save R1 in the PACA */ |
539 | std r1, HSTATE_HOST_R1(r13) | |
540 | ||
44a3add8 PM |
541 | li r6, KVM_GUEST_MODE_HOST_HV |
542 | stb r6, HSTATE_IN_GUEST(r13) | |
543 | ||
b6c295df PM |
544 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
545 | /* Store initial timestamp */ | |
546 | cmpdi r4, 0 | |
547 | beq 1f | |
548 | addi r3, r4, VCPU_TB_RMENTRY | |
549 | bl kvmhv_start_timing | |
550 | 1: | |
551 | #endif | |
de56a948 PM |
552 | /* Clear out SLB */ |
553 | li r6,0 | |
554 | slbmte r6,r6 | |
555 | slbia | |
556 | ptesync | |
557 | ||
9e368f29 | 558 | /* |
c17b98cf | 559 | * POWER7/POWER8 host -> guest partition switch code. |
9e368f29 PM |
560 | * We don't have to lock against concurrent tlbies, |
561 | * but we do have to coordinate across hardware threads. | |
562 | */ | |
7d6c40da PM |
563 | /* Set bit in entry map iff exit map is zero. */ |
564 | ld r5, HSTATE_KVM_VCORE(r13) | |
565 | li r7, 1 | |
566 | lbz r6, HSTATE_PTID(r13) | |
567 | sld r7, r7, r6 | |
568 | addi r9, r5, VCORE_ENTRY_EXIT | |
569 | 21: lwarx r3, 0, r9 | |
570 | cmpwi r3, 0x100 /* any threads starting to exit? */ | |
371fefd6 | 571 | bge secondary_too_late /* if so we're too late to the party */ |
7d6c40da PM |
572 | or r3, r3, r7 |
573 | stwcx. r3, 0, r9 | |
371fefd6 PM |
574 | bne 21b |
575 | ||
576 | /* Primary thread switches to guest partition. */ | |
e0b7ec05 | 577 | ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ |
371fefd6 | 578 | cmpwi r6,0 |
6af27c84 | 579 | bne 10f |
de56a948 | 580 | lwz r7,KVM_LPID(r9) |
7a84084c PM |
581 | BEGIN_FTR_SECTION |
582 | ld r6,KVM_SDR1(r9) | |
de56a948 PM |
583 | li r0,LPID_RSVD /* switch to reserved LPID */ |
584 | mtspr SPRN_LPID,r0 | |
585 | ptesync | |
586 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
7a84084c | 587 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
de56a948 PM |
588 | mtspr SPRN_LPID,r7 |
589 | isync | |
1b400ba0 PM |
590 | |
591 | /* See if we need to flush the TLB */ | |
592 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ | |
593 | clrldi r7,r6,64-6 /* extract bit number (6 bits) */ | |
594 | srdi r6,r6,6 /* doubleword number */ | |
595 | sldi r6,r6,3 /* address offset */ | |
596 | add r6,r6,r9 | |
597 | addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ | |
371fefd6 | 598 | li r0,1 |
1b400ba0 PM |
599 | sld r0,r0,r7 |
600 | ld r7,0(r6) | |
601 | and. r7,r7,r0 | |
602 | beq 22f | |
603 | 23: ldarx r7,0,r6 /* if set, clear the bit */ | |
604 | andc r7,r7,r0 | |
605 | stdcx. r7,0,r6 | |
606 | bne 23b | |
ca252055 | 607 | /* Flush the TLB of any entries for this LPID */ |
7c5b06ca PM |
608 | lwz r6,KVM_TLB_SETS(r9) |
609 | li r0,0 /* RS for P9 version of tlbiel */ | |
1b400ba0 PM |
610 | mtctr r6 |
611 | li r7,0x800 /* IS field = 0b10 */ | |
612 | ptesync | |
613 | 28: tlbiel r7 | |
614 | addi r7,r7,0x1000 | |
615 | bdnz 28b | |
616 | ptesync | |
617 | ||
93b0f4dc PM |
618 | /* Add timebase offset onto timebase */ |
619 | 22: ld r8,VCORE_TB_OFFSET(r5) | |
620 | cmpdi r8,0 | |
621 | beq 37f | |
622 | mftb r6 /* current host timebase */ | |
623 | add r8,r8,r6 | |
624 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | |
625 | mftb r7 /* check if lower 24 bits overflowed */ | |
626 | clrldi r6,r6,40 | |
627 | clrldi r7,r7,40 | |
628 | cmpld r7,r6 | |
629 | bge 37f | |
630 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | |
631 | mtspr SPRN_TBU40,r8 | |
632 | ||
388cc6e1 PM |
633 | /* Load guest PCR value to select appropriate compat mode */ |
634 | 37: ld r7, VCORE_PCR(r5) | |
635 | cmpdi r7, 0 | |
636 | beq 38f | |
637 | mtspr SPRN_PCR, r7 | |
638 | 38: | |
b005255e MN |
639 | |
640 | BEGIN_FTR_SECTION | |
88b02cf9 | 641 | /* DPDES and VTB are shared between threads */ |
b005255e | 642 | ld r8, VCORE_DPDES(r5) |
88b02cf9 | 643 | ld r7, VCORE_VTB(r5) |
b005255e | 644 | mtspr SPRN_DPDES, r8 |
88b02cf9 | 645 | mtspr SPRN_VTB, r7 |
b005255e MN |
646 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
647 | ||
fd7bacbc MS |
648 | /* Mark the subcore state as inside guest */ |
649 | bl kvmppc_subcore_enter_guest | |
650 | nop | |
651 | ld r5, HSTATE_KVM_VCORE(r13) | |
652 | ld r4, HSTATE_KVM_VCPU(r13) | |
388cc6e1 | 653 | li r0,1 |
371fefd6 | 654 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
9e368f29 | 655 | |
e0b7ec05 | 656 | /* Do we have a guest vcpu to run? */ |
6af27c84 | 657 | 10: cmpdi r4, 0 |
e0b7ec05 PM |
658 | beq kvmppc_primary_no_guest |
659 | kvmppc_got_guest: | |
de56a948 PM |
660 | |
661 | /* Load up guest SLB entries */ | |
e0b7ec05 | 662 | lwz r5,VCPU_SLB_MAX(r4) |
de56a948 PM |
663 | cmpwi r5,0 |
664 | beq 9f | |
665 | mtctr r5 | |
666 | addi r6,r4,VCPU_SLB | |
667 | 1: ld r8,VCPU_SLB_E(r6) | |
668 | ld r9,VCPU_SLB_V(r6) | |
669 | slbmte r9,r8 | |
670 | addi r6,r6,VCPU_SLB_SIZE | |
671 | bdnz 1b | |
672 | 9: | |
e0b7ec05 PM |
673 | /* Increment yield count if they have a VPA */ |
674 | ld r3, VCPU_VPA(r4) | |
675 | cmpdi r3, 0 | |
676 | beq 25f | |
0865a583 AG |
677 | li r6, LPPACA_YIELDCOUNT |
678 | LWZX_BE r5, r3, r6 | |
e0b7ec05 | 679 | addi r5, r5, 1 |
0865a583 | 680 | STWX_BE r5, r3, r6 |
e0b7ec05 PM |
681 | li r6, 1 |
682 | stb r6, VCPU_VPA_DIRTY(r4) | |
683 | 25: | |
684 | ||
e0b7ec05 PM |
685 | /* Save purr/spurr */ |
686 | mfspr r5,SPRN_PURR | |
687 | mfspr r6,SPRN_SPURR | |
688 | std r5,HSTATE_PURR(r13) | |
689 | std r6,HSTATE_SPURR(r13) | |
690 | ld r7,VCPU_PURR(r4) | |
691 | ld r8,VCPU_SPURR(r4) | |
692 | mtspr SPRN_PURR,r7 | |
693 | mtspr SPRN_SPURR,r8 | |
e0b7ec05 | 694 | |
e9cf1e08 PM |
695 | /* Save host values of some registers */ |
696 | BEGIN_FTR_SECTION | |
697 | mfspr r5, SPRN_TIDR | |
698 | mfspr r6, SPRN_PSSCR | |
699 | std r5, STACK_SLOT_TID(r1) | |
700 | std r6, STACK_SLOT_PSSCR(r1) | |
701 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
702 | ||
e0b7ec05 PM |
703 | BEGIN_FTR_SECTION |
704 | /* Set partition DABR */ | |
705 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ | |
8563bf52 | 706 | lwz r5,VCPU_DABRX(r4) |
e0b7ec05 PM |
707 | ld r6,VCPU_DABR(r4) |
708 | mtspr SPRN_DABRX,r5 | |
709 | mtspr SPRN_DABR,r6 | |
e0b7ec05 | 710 | isync |
e0b7ec05 PM |
711 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
712 | ||
e4e38121 MN |
713 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
714 | BEGIN_FTR_SECTION | |
f024ee09 PM |
715 | bl kvmppc_restore_tm |
716 | END_FTR_SECTION_IFSET(CPU_FTR_TM) | |
e4e38121 MN |
717 | #endif |
718 | ||
e0b7ec05 PM |
719 | /* Load guest PMU registers */ |
720 | /* R4 is live here (vcpu pointer) */ | |
721 | li r3, 1 | |
722 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
723 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
724 | isync | |
9bc01a9b PM |
725 | BEGIN_FTR_SECTION |
726 | ld r3, VCPU_MMCR(r4) | |
727 | andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | |
728 | cmpwi r5, MMCR0_PMAO | |
729 | beql kvmppc_fix_pmao | |
730 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | |
e0b7ec05 PM |
731 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ |
732 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | |
733 | lwz r6, VCPU_PMC + 8(r4) | |
734 | lwz r7, VCPU_PMC + 12(r4) | |
735 | lwz r8, VCPU_PMC + 16(r4) | |
736 | lwz r9, VCPU_PMC + 20(r4) | |
e0b7ec05 PM |
737 | mtspr SPRN_PMC1, r3 |
738 | mtspr SPRN_PMC2, r5 | |
739 | mtspr SPRN_PMC3, r6 | |
740 | mtspr SPRN_PMC4, r7 | |
741 | mtspr SPRN_PMC5, r8 | |
742 | mtspr SPRN_PMC6, r9 | |
e0b7ec05 PM |
743 | ld r3, VCPU_MMCR(r4) |
744 | ld r5, VCPU_MMCR + 8(r4) | |
745 | ld r6, VCPU_MMCR + 16(r4) | |
746 | ld r7, VCPU_SIAR(r4) | |
747 | ld r8, VCPU_SDAR(r4) | |
748 | mtspr SPRN_MMCR1, r5 | |
749 | mtspr SPRN_MMCRA, r6 | |
750 | mtspr SPRN_SIAR, r7 | |
751 | mtspr SPRN_SDAR, r8 | |
b005255e MN |
752 | BEGIN_FTR_SECTION |
753 | ld r5, VCPU_MMCR + 24(r4) | |
754 | ld r6, VCPU_SIER(r4) | |
83677f55 PM |
755 | mtspr SPRN_MMCR2, r5 |
756 | mtspr SPRN_SIER, r6 | |
757 | BEGIN_FTR_SECTION_NESTED(96) | |
b005255e MN |
758 | lwz r7, VCPU_PMC + 24(r4) |
759 | lwz r8, VCPU_PMC + 28(r4) | |
760 | ld r9, VCPU_MMCR + 32(r4) | |
b005255e MN |
761 | mtspr SPRN_SPMC1, r7 |
762 | mtspr SPRN_SPMC2, r8 | |
763 | mtspr SPRN_MMCRS, r9 | |
83677f55 | 764 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) |
b005255e | 765 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
e0b7ec05 PM |
766 | mtspr SPRN_MMCR0, r3 |
767 | isync | |
768 | ||
769 | /* Load up FP, VMX and VSX registers */ | |
770 | bl kvmppc_load_fp | |
771 | ||
772 | ld r14, VCPU_GPR(R14)(r4) | |
773 | ld r15, VCPU_GPR(R15)(r4) | |
774 | ld r16, VCPU_GPR(R16)(r4) | |
775 | ld r17, VCPU_GPR(R17)(r4) | |
776 | ld r18, VCPU_GPR(R18)(r4) | |
777 | ld r19, VCPU_GPR(R19)(r4) | |
778 | ld r20, VCPU_GPR(R20)(r4) | |
779 | ld r21, VCPU_GPR(R21)(r4) | |
780 | ld r22, VCPU_GPR(R22)(r4) | |
781 | ld r23, VCPU_GPR(R23)(r4) | |
782 | ld r24, VCPU_GPR(R24)(r4) | |
783 | ld r25, VCPU_GPR(R25)(r4) | |
784 | ld r26, VCPU_GPR(R26)(r4) | |
785 | ld r27, VCPU_GPR(R27)(r4) | |
786 | ld r28, VCPU_GPR(R28)(r4) | |
787 | ld r29, VCPU_GPR(R29)(r4) | |
788 | ld r30, VCPU_GPR(R30)(r4) | |
789 | ld r31, VCPU_GPR(R31)(r4) | |
790 | ||
e0b7ec05 PM |
791 | /* Switch DSCR to guest value */ |
792 | ld r5, VCPU_DSCR(r4) | |
793 | mtspr SPRN_DSCR, r5 | |
e0b7ec05 | 794 | |
b005255e | 795 | BEGIN_FTR_SECTION |
c17b98cf | 796 | /* Skip next section on POWER7 */ |
b005255e MN |
797 | b 8f |
798 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |
b005255e MN |
799 | /* Load up POWER8-specific registers */ |
800 | ld r5, VCPU_IAMR(r4) | |
801 | lwz r6, VCPU_PSPB(r4) | |
802 | ld r7, VCPU_FSCR(r4) | |
803 | mtspr SPRN_IAMR, r5 | |
804 | mtspr SPRN_PSPB, r6 | |
805 | mtspr SPRN_FSCR, r7 | |
806 | ld r5, VCPU_DAWR(r4) | |
807 | ld r6, VCPU_DAWRX(r4) | |
808 | ld r7, VCPU_CIABR(r4) | |
809 | ld r8, VCPU_TAR(r4) | |
810 | mtspr SPRN_DAWR, r5 | |
811 | mtspr SPRN_DAWRX, r6 | |
812 | mtspr SPRN_CIABR, r7 | |
813 | mtspr SPRN_TAR, r8 | |
814 | ld r5, VCPU_IC(r4) | |
7b490411 | 815 | ld r8, VCPU_EBBHR(r4) |
88b02cf9 | 816 | mtspr SPRN_IC, r5 |
b005255e MN |
817 | mtspr SPRN_EBBHR, r8 |
818 | ld r5, VCPU_EBBRR(r4) | |
819 | ld r6, VCPU_BESCR(r4) | |
83677f55 PM |
820 | lwz r7, VCPU_GUEST_PID(r4) |
821 | ld r8, VCPU_WORT(r4) | |
b005255e MN |
822 | mtspr SPRN_EBBRR, r5 |
823 | mtspr SPRN_BESCR, r6 | |
83677f55 PM |
824 | mtspr SPRN_PID, r7 |
825 | mtspr SPRN_WORT, r8 | |
826 | BEGIN_FTR_SECTION | |
e9cf1e08 | 827 | /* POWER8-only registers */ |
b005255e MN |
828 | ld r5, VCPU_TCSCR(r4) |
829 | ld r6, VCPU_ACOP(r4) | |
83677f55 PM |
830 | ld r7, VCPU_CSIGR(r4) |
831 | ld r8, VCPU_TACR(r4) | |
b005255e MN |
832 | mtspr SPRN_TCSCR, r5 |
833 | mtspr SPRN_ACOP, r6 | |
83677f55 PM |
834 | mtspr SPRN_CSIGR, r7 |
835 | mtspr SPRN_TACR, r8 | |
e9cf1e08 PM |
836 | FTR_SECTION_ELSE |
837 | /* POWER9-only registers */ | |
838 | ld r5, VCPU_TID(r4) | |
839 | ld r6, VCPU_PSSCR(r4) | |
840 | oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ | |
841 | mtspr SPRN_TIDR, r5 | |
842 | mtspr SPRN_PSSCR, r6 | |
843 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |
b005255e MN |
844 | 8: |
845 | ||
e0b7ec05 PM |
846 | /* |
847 | * Set the decrementer to the guest decrementer. | |
848 | */ | |
849 | ld r8,VCPU_DEC_EXPIRES(r4) | |
c5fb80d3 PM |
850 | /* r8 is a host timebase value here, convert to guest TB */ |
851 | ld r5,HSTATE_KVM_VCORE(r13) | |
852 | ld r6,VCORE_TB_OFFSET(r5) | |
853 | add r8,r8,r6 | |
e0b7ec05 PM |
854 | mftb r7 |
855 | subf r3,r7,r8 | |
856 | mtspr SPRN_DEC,r3 | |
857 | stw r3,VCPU_DEC(r4) | |
858 | ||
859 | ld r5, VCPU_SPRG0(r4) | |
860 | ld r6, VCPU_SPRG1(r4) | |
861 | ld r7, VCPU_SPRG2(r4) | |
862 | ld r8, VCPU_SPRG3(r4) | |
863 | mtspr SPRN_SPRG0, r5 | |
864 | mtspr SPRN_SPRG1, r6 | |
865 | mtspr SPRN_SPRG2, r7 | |
866 | mtspr SPRN_SPRG3, r8 | |
867 | ||
868 | /* Load up DAR and DSISR */ | |
869 | ld r5, VCPU_DAR(r4) | |
870 | lwz r6, VCPU_DSISR(r4) | |
871 | mtspr SPRN_DAR, r5 | |
872 | mtspr SPRN_DSISR, r6 | |
873 | ||
e0b7ec05 PM |
874 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
875 | ld r5,VCPU_AMR(r4) | |
876 | ld r6,VCPU_UAMOR(r4) | |
877 | li r7,-1 | |
878 | mtspr SPRN_AMR,r5 | |
879 | mtspr SPRN_UAMOR,r6 | |
880 | mtspr SPRN_AMOR,r7 | |
de56a948 PM |
881 | |
882 | /* Restore state of CTRL run bit; assume 1 on entry */ | |
883 | lwz r5,VCPU_CTRL(r4) | |
884 | andi. r5,r5,1 | |
885 | bne 4f | |
886 | mfspr r6,SPRN_CTRLF | |
887 | clrrdi r6,r6,1 | |
888 | mtspr SPRN_CTRLT,r6 | |
889 | 4: | |
6af27c84 PM |
890 | /* Secondary threads wait for primary to have done partition switch */ |
891 | ld r5, HSTATE_KVM_VCORE(r13) | |
892 | lbz r6, HSTATE_PTID(r13) | |
893 | cmpwi r6, 0 | |
894 | beq 21f | |
895 | lbz r0, VCORE_IN_GUEST(r5) | |
896 | cmpwi r0, 0 | |
897 | bne 21f | |
898 | HMT_LOW | |
b4deba5c PM |
899 | 20: lwz r3, VCORE_ENTRY_EXIT(r5) |
900 | cmpwi r3, 0x100 | |
901 | bge no_switch_exit | |
902 | lbz r0, VCORE_IN_GUEST(r5) | |
6af27c84 PM |
903 | cmpwi r0, 0 |
904 | beq 20b | |
905 | HMT_MEDIUM | |
906 | 21: | |
907 | /* Set LPCR. */ | |
908 | ld r8,VCORE_LPCR(r5) | |
909 | mtspr SPRN_LPCR,r8 | |
910 | isync | |
911 | ||
912 | /* Check if HDEC expires soon */ | |
913 | mfspr r3, SPRN_HDEC | |
914 | cmpwi r3, 512 /* 1 microsecond */ | |
915 | blt hdec_soon | |
916 | ||
37f55d30 | 917 | deliver_guest_interrupt: |
de56a948 | 918 | ld r6, VCPU_CTR(r4) |
c63517c2 | 919 | ld r7, VCPU_XER(r4) |
de56a948 PM |
920 | |
921 | mtctr r6 | |
922 | mtxer r7 | |
923 | ||
e3bbbbfa | 924 | kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ |
4619ac88 PM |
925 | ld r10, VCPU_PC(r4) |
926 | ld r11, VCPU_MSR(r4) | |
de56a948 PM |
927 | ld r6, VCPU_SRR0(r4) |
928 | ld r7, VCPU_SRR1(r4) | |
e3bbbbfa PM |
929 | mtspr SPRN_SRR0, r6 |
930 | mtspr SPRN_SRR1, r7 | |
de56a948 | 931 | |
4619ac88 | 932 | /* r11 = vcpu->arch.msr & ~MSR_HV */ |
de56a948 PM |
933 | rldicl r11, r11, 63 - MSR_HV_LG, 1 |
934 | rotldi r11, r11, 1 + MSR_HV_LG | |
935 | ori r11, r11, MSR_ME | |
936 | ||
19ccb76a | 937 | /* Check if we can deliver an external or decrementer interrupt now */ |
e3bbbbfa PM |
938 | ld r0, VCPU_PENDING_EXC(r4) |
939 | rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 | |
940 | cmpdi cr1, r0, 0 | |
941 | andi. r8, r11, MSR_EE | |
e3bbbbfa PM |
942 | mfspr r8, SPRN_LPCR |
943 | /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ | |
944 | rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH | |
945 | mtspr SPRN_LPCR, r8 | |
19ccb76a | 946 | isync |
19ccb76a | 947 | beq 5f |
e3bbbbfa PM |
948 | li r0, BOOK3S_INTERRUPT_EXTERNAL |
949 | bne cr1, 12f | |
950 | mfspr r0, SPRN_DEC | |
951 | cmpwi r0, 0 | |
952 | li r0, BOOK3S_INTERRUPT_DECREMENTER | |
953 | bge 5f | |
19ccb76a | 954 | |
e3bbbbfa | 955 | 12: mtspr SPRN_SRR0, r10 |
19ccb76a | 956 | mr r10,r0 |
e3bbbbfa | 957 | mtspr SPRN_SRR1, r11 |
e4e38121 MN |
958 | mr r9, r4 |
959 | bl kvmppc_msr_interrupt | |
e3bbbbfa | 960 | 5: |
19ccb76a | 961 | |
27025a60 LPF |
962 | /* |
963 | * Required state: | |
964 | * R4 = vcpu | |
965 | * R10: value for HSRR0 | |
966 | * R11: value for HSRR1 | |
967 | * R13 = PACA | |
968 | */ | |
de56a948 | 969 | fast_guest_return: |
4619ac88 PM |
970 | li r0,0 |
971 | stb r0,VCPU_CEDED(r4) /* cancel cede */ | |
de56a948 PM |
972 | mtspr SPRN_HSRR0,r10 |
973 | mtspr SPRN_HSRR1,r11 | |
974 | ||
975 | /* Activate guest mode, so faults get handled by KVM */ | |
44a3add8 | 976 | li r9, KVM_GUEST_MODE_GUEST_HV |
de56a948 PM |
977 | stb r9, HSTATE_IN_GUEST(r13) |
978 | ||
b6c295df PM |
979 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
980 | /* Accumulate timing */ | |
981 | addi r3, r4, VCPU_TB_GUEST | |
982 | bl kvmhv_accumulate_time | |
983 | #endif | |
984 | ||
de56a948 PM |
985 | /* Enter guest */ |
986 | ||
0acb9111 PM |
987 | BEGIN_FTR_SECTION |
988 | ld r5, VCPU_CFAR(r4) | |
989 | mtspr SPRN_CFAR, r5 | |
990 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |
4b8473c9 PM |
991 | BEGIN_FTR_SECTION |
992 | ld r0, VCPU_PPR(r4) | |
993 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
0acb9111 | 994 | |
de56a948 PM |
995 | ld r5, VCPU_LR(r4) |
996 | lwz r6, VCPU_CR(r4) | |
997 | mtlr r5 | |
998 | mtcr r6 | |
999 | ||
c75df6f9 MN |
1000 | ld r1, VCPU_GPR(R1)(r4) |
1001 | ld r2, VCPU_GPR(R2)(r4) | |
1002 | ld r3, VCPU_GPR(R3)(r4) | |
1003 | ld r5, VCPU_GPR(R5)(r4) | |
1004 | ld r6, VCPU_GPR(R6)(r4) | |
1005 | ld r7, VCPU_GPR(R7)(r4) | |
1006 | ld r8, VCPU_GPR(R8)(r4) | |
1007 | ld r9, VCPU_GPR(R9)(r4) | |
1008 | ld r10, VCPU_GPR(R10)(r4) | |
1009 | ld r11, VCPU_GPR(R11)(r4) | |
1010 | ld r12, VCPU_GPR(R12)(r4) | |
1011 | ld r13, VCPU_GPR(R13)(r4) | |
1012 | ||
4b8473c9 PM |
1013 | BEGIN_FTR_SECTION |
1014 | mtspr SPRN_PPR, r0 | |
1015 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
1016 | ld r0, VCPU_GPR(R0)(r4) | |
c75df6f9 | 1017 | ld r4, VCPU_GPR(R4)(r4) |
de56a948 PM |
1018 | |
1019 | hrfid | |
1020 | b . | |
1021 | ||
b6c295df | 1022 | secondary_too_late: |
6af27c84 | 1023 | li r12, 0 |
b6c295df PM |
1024 | cmpdi r4, 0 |
1025 | beq 11f | |
6af27c84 PM |
1026 | stw r12, VCPU_TRAP(r4) |
1027 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
b6c295df PM |
1028 | addi r3, r4, VCPU_TB_RMEXIT |
1029 | bl kvmhv_accumulate_time | |
6af27c84 | 1030 | #endif |
b6c295df PM |
1031 | 11: b kvmhv_switch_to_host |
1032 | ||
b4deba5c PM |
1033 | no_switch_exit: |
1034 | HMT_MEDIUM | |
1035 | li r12, 0 | |
1036 | b 12f | |
b6c295df | 1037 | hdec_soon: |
6af27c84 | 1038 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER |
b4deba5c | 1039 | 12: stw r12, VCPU_TRAP(r4) |
6af27c84 PM |
1040 | mr r9, r4 |
1041 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
b6c295df PM |
1042 | addi r3, r4, VCPU_TB_RMEXIT |
1043 | bl kvmhv_accumulate_time | |
b6c295df | 1044 | #endif |
6af27c84 | 1045 | b guest_exit_cont |
b6c295df | 1046 | |
de56a948 PM |
1047 | /****************************************************************************** |
1048 | * * | |
1049 | * Exit code * | |
1050 | * * | |
1051 | *****************************************************************************/ | |
1052 | ||
1053 | /* | |
1054 | * We come here from the first-level interrupt handlers. | |
1055 | */ | |
dd96b2c2 AK |
1056 | .globl kvmppc_interrupt_hv |
1057 | kvmppc_interrupt_hv: | |
de56a948 PM |
1058 | /* |
1059 | * Register contents: | |
1060 | * R12 = interrupt vector | |
1061 | * R13 = PACA | |
1062 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | |
1063 | * guest R13 saved in SPRN_SCRATCH0 | |
1064 | */ | |
36e7bb38 | 1065 | std r9, HSTATE_SCRATCH2(r13) |
44a3add8 PM |
1066 | |
1067 | lbz r9, HSTATE_IN_GUEST(r13) | |
1068 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | |
1069 | beq kvmppc_bad_host_intr | |
dd96b2c2 AK |
1070 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
1071 | cmpwi r9, KVM_GUEST_MODE_GUEST | |
36e7bb38 | 1072 | ld r9, HSTATE_SCRATCH2(r13) |
dd96b2c2 AK |
1073 | beq kvmppc_interrupt_pr |
1074 | #endif | |
44a3add8 PM |
1075 | /* We're now back in the host but in guest MMU context */ |
1076 | li r9, KVM_GUEST_MODE_HOST_HV | |
1077 | stb r9, HSTATE_IN_GUEST(r13) | |
1078 | ||
de56a948 PM |
1079 | ld r9, HSTATE_KVM_VCPU(r13) |
1080 | ||
1081 | /* Save registers */ | |
1082 | ||
c75df6f9 MN |
1083 | std r0, VCPU_GPR(R0)(r9) |
1084 | std r1, VCPU_GPR(R1)(r9) | |
1085 | std r2, VCPU_GPR(R2)(r9) | |
1086 | std r3, VCPU_GPR(R3)(r9) | |
1087 | std r4, VCPU_GPR(R4)(r9) | |
1088 | std r5, VCPU_GPR(R5)(r9) | |
1089 | std r6, VCPU_GPR(R6)(r9) | |
1090 | std r7, VCPU_GPR(R7)(r9) | |
1091 | std r8, VCPU_GPR(R8)(r9) | |
36e7bb38 | 1092 | ld r0, HSTATE_SCRATCH2(r13) |
c75df6f9 MN |
1093 | std r0, VCPU_GPR(R9)(r9) |
1094 | std r10, VCPU_GPR(R10)(r9) | |
1095 | std r11, VCPU_GPR(R11)(r9) | |
de56a948 PM |
1096 | ld r3, HSTATE_SCRATCH0(r13) |
1097 | lwz r4, HSTATE_SCRATCH1(r13) | |
c75df6f9 | 1098 | std r3, VCPU_GPR(R12)(r9) |
de56a948 | 1099 | stw r4, VCPU_CR(r9) |
0acb9111 PM |
1100 | BEGIN_FTR_SECTION |
1101 | ld r3, HSTATE_CFAR(r13) | |
1102 | std r3, VCPU_CFAR(r9) | |
1103 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |
4b8473c9 PM |
1104 | BEGIN_FTR_SECTION |
1105 | ld r4, HSTATE_PPR(r13) | |
1106 | std r4, VCPU_PPR(r9) | |
1107 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
de56a948 PM |
1108 | |
1109 | /* Restore R1/R2 so we can handle faults */ | |
1110 | ld r1, HSTATE_HOST_R1(r13) | |
1111 | ld r2, PACATOC(r13) | |
1112 | ||
1113 | mfspr r10, SPRN_SRR0 | |
1114 | mfspr r11, SPRN_SRR1 | |
1115 | std r10, VCPU_SRR0(r9) | |
1116 | std r11, VCPU_SRR1(r9) | |
1117 | andi. r0, r12, 2 /* need to read HSRR0/1? */ | |
1118 | beq 1f | |
1119 | mfspr r10, SPRN_HSRR0 | |
1120 | mfspr r11, SPRN_HSRR1 | |
1121 | clrrdi r12, r12, 2 | |
1122 | 1: std r10, VCPU_PC(r9) | |
1123 | std r11, VCPU_MSR(r9) | |
1124 | ||
1125 | GET_SCRATCH0(r3) | |
1126 | mflr r4 | |
c75df6f9 | 1127 | std r3, VCPU_GPR(R13)(r9) |
de56a948 PM |
1128 | std r4, VCPU_LR(r9) |
1129 | ||
de56a948 PM |
1130 | stw r12,VCPU_TRAP(r9) |
1131 | ||
b6c295df PM |
1132 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1133 | addi r3, r9, VCPU_TB_RMINTR | |
1134 | mr r4, r9 | |
1135 | bl kvmhv_accumulate_time | |
1136 | ld r5, VCPU_GPR(R5)(r9) | |
1137 | ld r6, VCPU_GPR(R6)(r9) | |
1138 | ld r7, VCPU_GPR(R7)(r9) | |
1139 | ld r8, VCPU_GPR(R8)(r9) | |
1140 | #endif | |
1141 | ||
4a157d61 | 1142 | /* Save HEIR (HV emulation assist reg) in emul_inst |
697d3899 PM |
1143 | if this is an HEI (HV emulation interrupt, e40) */ |
1144 | li r3,KVM_INST_FETCH_FAILED | |
2bf27601 | 1145 | stw r3,VCPU_LAST_INST(r9) |
697d3899 PM |
1146 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
1147 | bne 11f | |
1148 | mfspr r3,SPRN_HEIR | |
4a157d61 | 1149 | 11: stw r3,VCPU_HEIR(r9) |
697d3899 PM |
1150 | |
1151 | /* these are volatile across C function calls */ | |
1152 | mfctr r3 | |
1153 | mfxer r4 | |
1154 | std r3, VCPU_CTR(r9) | |
c63517c2 | 1155 | std r4, VCPU_XER(r9) |
697d3899 | 1156 | |
697d3899 PM |
1157 | /* If this is a page table miss then see if it's theirs or ours */ |
1158 | cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
1159 | beq kvmppc_hdsi | |
342d3db7 PM |
1160 | cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
1161 | beq kvmppc_hisi | |
697d3899 | 1162 | |
de56a948 PM |
1163 | /* See if this is a leftover HDEC interrupt */ |
1164 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
1165 | bne 2f | |
1166 | mfspr r3,SPRN_HDEC | |
1167 | cmpwi r3,0 | |
1f09c3ed PM |
1168 | mr r4,r9 |
1169 | bge fast_guest_return | |
de56a948 | 1170 | 2: |
697d3899 | 1171 | /* See if this is an hcall we can handle in real mode */ |
a8606e20 PM |
1172 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
1173 | beq hcall_try_real_mode | |
de56a948 | 1174 | |
66feed61 PM |
1175 | /* Hypervisor doorbell - exit only if host IPI flag set */ |
1176 | cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL | |
1177 | bne 3f | |
1178 | lbz r0, HSTATE_HOST_IPI(r13) | |
06554d9f | 1179 | cmpwi r0, 0 |
66feed61 PM |
1180 | beq 4f |
1181 | b guest_exit_cont | |
1182 | 3: | |
54695c30 BH |
1183 | /* External interrupt ? */ |
1184 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
1f09c3ed | 1185 | bne+ guest_exit_cont |
54695c30 BH |
1186 | |
1187 | /* External interrupt, first check for host_ipi. If this is | |
1188 | * set, we know the host wants us out so let's do it now | |
1189 | */ | |
c934243c | 1190 | bl kvmppc_read_intr |
37f55d30 SW |
1191 | |
1192 | /* | |
1193 | * Restore the active volatile registers after returning from | |
1194 | * a C function. | |
1195 | */ | |
1196 | ld r9, HSTATE_KVM_VCPU(r13) | |
1197 | li r12, BOOK3S_INTERRUPT_EXTERNAL | |
1198 | ||
1199 | /* | |
1200 | * kvmppc_read_intr return codes: | |
1201 | * | |
1202 | * Exit to host (r3 > 0) | |
1203 | * 1 An interrupt is pending that needs to be handled by the host | |
1204 | * Exit guest and return to host by branching to guest_exit_cont | |
1205 | * | |
f7af5209 SW |
1206 | * 2 Passthrough that needs completion in the host |
1207 | * Exit guest and return to host by branching to guest_exit_cont | |
1208 | * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD | |
1209 | * to indicate to the host to complete handling the interrupt | |
1210 | * | |
37f55d30 SW |
1211 | * Before returning to guest, we check if any CPU is heading out |
1212 | * to the host and if so, we head out also. If no CPUs are heading | |
1213 | * check return values <= 0. | |
1214 | * | |
1215 | * Return to guest (r3 <= 0) | |
1216 | * 0 No external interrupt is pending | |
1217 | * -1 A guest wakeup IPI (which has now been cleared) | |
1218 | * In either case, we return to guest to deliver any pending | |
1219 | * guest interrupts. | |
e3c13e56 SW |
1220 | * |
1221 | * -2 A PCI passthrough external interrupt was handled | |
1222 | * (interrupt was delivered directly to guest) | |
1223 | * Return to guest to deliver any pending guest interrupts. | |
37f55d30 SW |
1224 | */ |
1225 | ||
f7af5209 SW |
1226 | cmpdi r3, 1 |
1227 | ble 1f | |
1228 | ||
1229 | /* Return code = 2 */ | |
1230 | li r12, BOOK3S_INTERRUPT_HV_RM_HARD | |
1231 | stw r12, VCPU_TRAP(r9) | |
1232 | b guest_exit_cont | |
1233 | ||
1234 | 1: /* Return code <= 1 */ | |
c934243c | 1235 | cmpdi r3, 0 |
1f09c3ed | 1236 | bgt guest_exit_cont |
54695c30 | 1237 | |
37f55d30 | 1238 | /* Return code <= 0 */ |
66feed61 | 1239 | 4: ld r5, HSTATE_KVM_VCORE(r13) |
4619ac88 PM |
1240 | lwz r0, VCORE_ENTRY_EXIT(r5) |
1241 | cmpwi r0, 0x100 | |
e3bbbbfa | 1242 | mr r4, r9 |
1f09c3ed | 1243 | blt deliver_guest_interrupt |
de56a948 | 1244 | |
b4072df4 | 1245 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
de56a948 | 1246 | /* Save more register state */ |
de56a948 PM |
1247 | mfdar r6 |
1248 | mfdsisr r7 | |
de56a948 PM |
1249 | std r6, VCPU_DAR(r9) |
1250 | stw r7, VCPU_DSISR(r9) | |
697d3899 | 1251 | /* don't overwrite fault_dar/fault_dsisr if HDSI */ |
de56a948 | 1252 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE |
6af27c84 | 1253 | beq mc_cont |
697d3899 | 1254 | std r6, VCPU_FAULT_DAR(r9) |
de56a948 PM |
1255 | stw r7, VCPU_FAULT_DSISR(r9) |
1256 | ||
b4072df4 PM |
1257 | /* See if it is a machine check */ |
1258 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
1259 | beq machine_check_realmode | |
1260 | mc_cont: | |
b6c295df PM |
1261 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1262 | addi r3, r9, VCPU_TB_RMEXIT | |
1263 | mr r4, r9 | |
1264 | bl kvmhv_accumulate_time | |
1265 | #endif | |
b4072df4 | 1266 | |
7e022e71 | 1267 | mr r3, r12 |
6af27c84 PM |
1268 | /* Increment exit count, poke other threads to exit */ |
1269 | bl kvmhv_commence_exit | |
eddb60fb PM |
1270 | nop |
1271 | ld r9, HSTATE_KVM_VCPU(r13) | |
1272 | lwz r12, VCPU_TRAP(r9) | |
6af27c84 | 1273 | |
ec257165 PM |
1274 | /* Stop others sending VCPU interrupts to this physical CPU */ |
1275 | li r0, -1 | |
1276 | stw r0, VCPU_CPU(r9) | |
1277 | stw r0, VCPU_THREAD_CPU(r9) | |
1278 | ||
de56a948 | 1279 | /* Save guest CTRL register, set runlatch to 1 */ |
6af27c84 | 1280 | mfspr r6,SPRN_CTRLF |
de56a948 PM |
1281 | stw r6,VCPU_CTRL(r9) |
1282 | andi. r0,r6,1 | |
1283 | bne 4f | |
1284 | ori r6,r6,1 | |
1285 | mtspr SPRN_CTRLT,r6 | |
1286 | 4: | |
1287 | /* Read the guest SLB and save it away */ | |
1288 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ | |
1289 | mtctr r0 | |
1290 | li r6,0 | |
1291 | addi r7,r9,VCPU_SLB | |
1292 | li r5,0 | |
1293 | 1: slbmfee r8,r6 | |
1294 | andis. r0,r8,SLB_ESID_V@h | |
1295 | beq 2f | |
1296 | add r8,r8,r6 /* put index in */ | |
1297 | slbmfev r3,r6 | |
1298 | std r8,VCPU_SLB_E(r7) | |
1299 | std r3,VCPU_SLB_V(r7) | |
1300 | addi r7,r7,VCPU_SLB_SIZE | |
1301 | addi r5,r5,1 | |
1302 | 2: addi r6,r6,1 | |
1303 | bdnz 1b | |
1304 | stw r5,VCPU_SLB_MAX(r9) | |
1305 | ||
1306 | /* | |
1307 | * Save the guest PURR/SPURR | |
1308 | */ | |
1309 | mfspr r5,SPRN_PURR | |
1310 | mfspr r6,SPRN_SPURR | |
1311 | ld r7,VCPU_PURR(r9) | |
1312 | ld r8,VCPU_SPURR(r9) | |
1313 | std r5,VCPU_PURR(r9) | |
1314 | std r6,VCPU_SPURR(r9) | |
1315 | subf r5,r7,r5 | |
1316 | subf r6,r8,r6 | |
1317 | ||
1318 | /* | |
1319 | * Restore host PURR/SPURR and add guest times | |
1320 | * so that the time in the guest gets accounted. | |
1321 | */ | |
1322 | ld r3,HSTATE_PURR(r13) | |
1323 | ld r4,HSTATE_SPURR(r13) | |
1324 | add r3,r3,r5 | |
1325 | add r4,r4,r6 | |
1326 | mtspr SPRN_PURR,r3 | |
1327 | mtspr SPRN_SPURR,r4 | |
1328 | ||
e0b7ec05 PM |
1329 | /* Save DEC */ |
1330 | mfspr r5,SPRN_DEC | |
1331 | mftb r6 | |
1332 | extsw r5,r5 | |
1333 | add r5,r5,r6 | |
c5fb80d3 PM |
1334 | /* r5 is a guest timebase value here, convert to host TB */ |
1335 | ld r3,HSTATE_KVM_VCORE(r13) | |
1336 | ld r4,VCORE_TB_OFFSET(r3) | |
1337 | subf r5,r4,r5 | |
e0b7ec05 PM |
1338 | std r5,VCPU_DEC_EXPIRES(r9) |
1339 | ||
b005255e MN |
1340 | BEGIN_FTR_SECTION |
1341 | b 8f | |
1342 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |
b005255e MN |
1343 | /* Save POWER8-specific registers */ |
1344 | mfspr r5, SPRN_IAMR | |
1345 | mfspr r6, SPRN_PSPB | |
1346 | mfspr r7, SPRN_FSCR | |
1347 | std r5, VCPU_IAMR(r9) | |
1348 | stw r6, VCPU_PSPB(r9) | |
1349 | std r7, VCPU_FSCR(r9) | |
1350 | mfspr r5, SPRN_IC | |
b005255e MN |
1351 | mfspr r7, SPRN_TAR |
1352 | std r5, VCPU_IC(r9) | |
b005255e | 1353 | std r7, VCPU_TAR(r9) |
7b490411 | 1354 | mfspr r8, SPRN_EBBHR |
b005255e MN |
1355 | std r8, VCPU_EBBHR(r9) |
1356 | mfspr r5, SPRN_EBBRR | |
1357 | mfspr r6, SPRN_BESCR | |
83677f55 PM |
1358 | mfspr r7, SPRN_PID |
1359 | mfspr r8, SPRN_WORT | |
b005255e MN |
1360 | std r5, VCPU_EBBRR(r9) |
1361 | std r6, VCPU_BESCR(r9) | |
83677f55 PM |
1362 | stw r7, VCPU_GUEST_PID(r9) |
1363 | std r8, VCPU_WORT(r9) | |
1364 | BEGIN_FTR_SECTION | |
b005255e MN |
1365 | mfspr r5, SPRN_TCSCR |
1366 | mfspr r6, SPRN_ACOP | |
83677f55 PM |
1367 | mfspr r7, SPRN_CSIGR |
1368 | mfspr r8, SPRN_TACR | |
b005255e MN |
1369 | std r5, VCPU_TCSCR(r9) |
1370 | std r6, VCPU_ACOP(r9) | |
83677f55 PM |
1371 | std r7, VCPU_CSIGR(r9) |
1372 | std r8, VCPU_TACR(r9) | |
e9cf1e08 PM |
1373 | FTR_SECTION_ELSE |
1374 | mfspr r5, SPRN_TIDR | |
1375 | mfspr r6, SPRN_PSSCR | |
1376 | std r5, VCPU_TID(r9) | |
1377 | rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ | |
1378 | rotldi r6, r6, 60 | |
1379 | std r6, VCPU_PSSCR(r9) | |
1380 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |
ccec4456 PM |
1381 | /* |
1382 | * Restore various registers to 0, where non-zero values | |
1383 | * set by the guest could disrupt the host. | |
1384 | */ | |
1385 | li r0, 0 | |
1386 | mtspr SPRN_IAMR, r0 | |
1387 | mtspr SPRN_CIABR, r0 | |
1388 | mtspr SPRN_DAWRX, r0 | |
ccec4456 | 1389 | mtspr SPRN_WORT, r0 |
83677f55 PM |
1390 | BEGIN_FTR_SECTION |
1391 | mtspr SPRN_TCSCR, r0 | |
ccec4456 PM |
1392 | /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ |
1393 | li r0, 1 | |
1394 | sldi r0, r0, 31 | |
1395 | mtspr SPRN_MMCRS, r0 | |
83677f55 | 1396 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
b005255e MN |
1397 | 8: |
1398 | ||
e0b7ec05 | 1399 | /* Save and reset AMR and UAMOR before turning on the MMU */ |
e0b7ec05 PM |
1400 | mfspr r5,SPRN_AMR |
1401 | mfspr r6,SPRN_UAMOR | |
1402 | std r5,VCPU_AMR(r9) | |
1403 | std r6,VCPU_UAMOR(r9) | |
1404 | li r6,0 | |
1405 | mtspr SPRN_AMR,r6 | |
e0b7ec05 PM |
1406 | |
1407 | /* Switch DSCR back to host value */ | |
e0b7ec05 PM |
1408 | mfspr r8, SPRN_DSCR |
1409 | ld r7, HSTATE_DSCR(r13) | |
1410 | std r8, VCPU_DSCR(r9) | |
1411 | mtspr SPRN_DSCR, r7 | |
e0b7ec05 PM |
1412 | |
1413 | /* Save non-volatile GPRs */ | |
1414 | std r14, VCPU_GPR(R14)(r9) | |
1415 | std r15, VCPU_GPR(R15)(r9) | |
1416 | std r16, VCPU_GPR(R16)(r9) | |
1417 | std r17, VCPU_GPR(R17)(r9) | |
1418 | std r18, VCPU_GPR(R18)(r9) | |
1419 | std r19, VCPU_GPR(R19)(r9) | |
1420 | std r20, VCPU_GPR(R20)(r9) | |
1421 | std r21, VCPU_GPR(R21)(r9) | |
1422 | std r22, VCPU_GPR(R22)(r9) | |
1423 | std r23, VCPU_GPR(R23)(r9) | |
1424 | std r24, VCPU_GPR(R24)(r9) | |
1425 | std r25, VCPU_GPR(R25)(r9) | |
1426 | std r26, VCPU_GPR(R26)(r9) | |
1427 | std r27, VCPU_GPR(R27)(r9) | |
1428 | std r28, VCPU_GPR(R28)(r9) | |
1429 | std r29, VCPU_GPR(R29)(r9) | |
1430 | std r30, VCPU_GPR(R30)(r9) | |
1431 | std r31, VCPU_GPR(R31)(r9) | |
1432 | ||
1433 | /* Save SPRGs */ | |
1434 | mfspr r3, SPRN_SPRG0 | |
1435 | mfspr r4, SPRN_SPRG1 | |
1436 | mfspr r5, SPRN_SPRG2 | |
1437 | mfspr r6, SPRN_SPRG3 | |
1438 | std r3, VCPU_SPRG0(r9) | |
1439 | std r4, VCPU_SPRG1(r9) | |
1440 | std r5, VCPU_SPRG2(r9) | |
1441 | std r6, VCPU_SPRG3(r9) | |
1442 | ||
1443 | /* save FP state */ | |
1444 | mr r3, r9 | |
1445 | bl kvmppc_save_fp | |
de56a948 | 1446 | |
0a8eccef PM |
1447 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1448 | BEGIN_FTR_SECTION | |
f024ee09 PM |
1449 | bl kvmppc_save_tm |
1450 | END_FTR_SECTION_IFSET(CPU_FTR_TM) | |
0a8eccef PM |
1451 | #endif |
1452 | ||
e0b7ec05 PM |
1453 | /* Increment yield count if they have a VPA */ |
1454 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | |
1455 | cmpdi r8, 0 | |
1456 | beq 25f | |
0865a583 AG |
1457 | li r4, LPPACA_YIELDCOUNT |
1458 | LWZX_BE r3, r8, r4 | |
e0b7ec05 | 1459 | addi r3, r3, 1 |
0865a583 | 1460 | STWX_BE r3, r8, r4 |
e0b7ec05 PM |
1461 | li r3, 1 |
1462 | stb r3, VCPU_VPA_DIRTY(r9) | |
1463 | 25: | |
1464 | /* Save PMU registers if requested */ | |
1465 | /* r8 and cr0.eq are live here */ | |
9bc01a9b PM |
1466 | BEGIN_FTR_SECTION |
1467 | /* | |
1468 | * POWER8 seems to have a hardware bug where setting | |
1469 | * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] | |
1470 | * when some counters are already negative doesn't seem | |
1471 | * to cause a performance monitor alert (and hence interrupt). | |
1472 | * The effect of this is that when saving the PMU state, | |
1473 | * if there is no PMU alert pending when we read MMCR0 | |
1474 | * before freezing the counters, but one becomes pending | |
1475 | * before we read the counters, we lose it. | |
1476 | * To work around this, we need a way to freeze the counters | |
1477 | * before reading MMCR0. Normally, freezing the counters | |
1478 | * is done by writing MMCR0 (to set MMCR0[FC]) which | |
1479 | * unavoidably writes MMCR0[PMA0] as well. On POWER8, | |
1480 | * we can also freeze the counters using MMCR2, by writing | |
1481 | * 1s to all the counter freeze condition bits (there are | |
1482 | * 9 bits each for 6 counters). | |
1483 | */ | |
1484 | li r3, -1 /* set all freeze bits */ | |
1485 | clrrdi r3, r3, 10 | |
1486 | mfspr r10, SPRN_MMCR2 | |
1487 | mtspr SPRN_MMCR2, r3 | |
1488 | isync | |
1489 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e0b7ec05 PM |
1490 | li r3, 1 |
1491 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
1492 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | |
1493 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
1494 | mfspr r6, SPRN_MMCRA | |
c17b98cf | 1495 | /* Clear MMCRA in order to disable SDAR updates */ |
e0b7ec05 PM |
1496 | li r7, 0 |
1497 | mtspr SPRN_MMCRA, r7 | |
e0b7ec05 PM |
1498 | isync |
1499 | beq 21f /* if no VPA, save PMU stuff anyway */ | |
1500 | lbz r7, LPPACA_PMCINUSE(r8) | |
1501 | cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ | |
1502 | bne 21f | |
1503 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | |
1504 | b 22f | |
1505 | 21: mfspr r5, SPRN_MMCR1 | |
1506 | mfspr r7, SPRN_SIAR | |
1507 | mfspr r8, SPRN_SDAR | |
1508 | std r4, VCPU_MMCR(r9) | |
1509 | std r5, VCPU_MMCR + 8(r9) | |
1510 | std r6, VCPU_MMCR + 16(r9) | |
9bc01a9b PM |
1511 | BEGIN_FTR_SECTION |
1512 | std r10, VCPU_MMCR + 24(r9) | |
1513 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e0b7ec05 PM |
1514 | std r7, VCPU_SIAR(r9) |
1515 | std r8, VCPU_SDAR(r9) | |
1516 | mfspr r3, SPRN_PMC1 | |
1517 | mfspr r4, SPRN_PMC2 | |
1518 | mfspr r5, SPRN_PMC3 | |
1519 | mfspr r6, SPRN_PMC4 | |
1520 | mfspr r7, SPRN_PMC5 | |
1521 | mfspr r8, SPRN_PMC6 | |
e0b7ec05 PM |
1522 | stw r3, VCPU_PMC(r9) |
1523 | stw r4, VCPU_PMC + 4(r9) | |
1524 | stw r5, VCPU_PMC + 8(r9) | |
1525 | stw r6, VCPU_PMC + 12(r9) | |
1526 | stw r7, VCPU_PMC + 16(r9) | |
1527 | stw r8, VCPU_PMC + 20(r9) | |
b005255e | 1528 | BEGIN_FTR_SECTION |
b005255e | 1529 | mfspr r5, SPRN_SIER |
83677f55 PM |
1530 | std r5, VCPU_SIER(r9) |
1531 | BEGIN_FTR_SECTION_NESTED(96) | |
b005255e MN |
1532 | mfspr r6, SPRN_SPMC1 |
1533 | mfspr r7, SPRN_SPMC2 | |
1534 | mfspr r8, SPRN_MMCRS | |
b005255e MN |
1535 | stw r6, VCPU_PMC + 24(r9) |
1536 | stw r7, VCPU_PMC + 28(r9) | |
1537 | std r8, VCPU_MMCR + 32(r9) | |
1538 | lis r4, 0x8000 | |
1539 | mtspr SPRN_MMCRS, r4 | |
83677f55 | 1540 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) |
b005255e | 1541 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
e0b7ec05 | 1542 | 22: |
de56a948 PM |
1543 | /* Clear out SLB */ |
1544 | li r5,0 | |
1545 | slbmte r5,r5 | |
1546 | slbia | |
1547 | ptesync | |
1548 | ||
e9cf1e08 PM |
1549 | /* Restore host values of some registers */ |
1550 | BEGIN_FTR_SECTION | |
1551 | ld r5, STACK_SLOT_TID(r1) | |
1552 | ld r6, STACK_SLOT_PSSCR(r1) | |
1553 | mtspr SPRN_TIDR, r5 | |
1554 | mtspr SPRN_PSSCR, r6 | |
1555 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
1556 | ||
9e368f29 | 1557 | /* |
c17b98cf | 1558 | * POWER7/POWER8 guest -> host partition switch code. |
9e368f29 PM |
1559 | * We don't have to lock against tlbies but we do |
1560 | * have to coordinate the hardware threads. | |
1561 | */ | |
b6c295df | 1562 | kvmhv_switch_to_host: |
371fefd6 | 1563 | /* Secondary threads wait for primary to do partition switch */ |
6af27c84 | 1564 | ld r5,HSTATE_KVM_VCORE(r13) |
e0b7ec05 PM |
1565 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ |
1566 | lbz r3,HSTATE_PTID(r13) | |
371fefd6 PM |
1567 | cmpwi r3,0 |
1568 | beq 15f | |
1569 | HMT_LOW | |
1570 | 13: lbz r3,VCORE_IN_GUEST(r5) | |
1571 | cmpwi r3,0 | |
1572 | bne 13b | |
1573 | HMT_MEDIUM | |
1574 | b 16f | |
1575 | ||
1576 | /* Primary thread waits for all the secondaries to exit guest */ | |
1577 | 15: lwz r3,VCORE_ENTRY_EXIT(r5) | |
b4deba5c | 1578 | rlwinm r0,r3,32-8,0xff |
371fefd6 PM |
1579 | clrldi r3,r3,56 |
1580 | cmpw r3,r0 | |
1581 | bne 15b | |
1582 | isync | |
1583 | ||
b4deba5c PM |
1584 | /* Did we actually switch to the guest at all? */ |
1585 | lbz r6, VCORE_IN_GUEST(r5) | |
1586 | cmpwi r6, 0 | |
1587 | beq 19f | |
1588 | ||
371fefd6 | 1589 | /* Primary thread switches back to host partition */ |
de56a948 | 1590 | lwz r7,KVM_HOST_LPID(r4) |
7a84084c PM |
1591 | BEGIN_FTR_SECTION |
1592 | ld r6,KVM_HOST_SDR1(r4) | |
de56a948 PM |
1593 | li r8,LPID_RSVD /* switch to reserved LPID */ |
1594 | mtspr SPRN_LPID,r8 | |
1595 | ptesync | |
7a84084c PM |
1596 | mtspr SPRN_SDR1,r6 /* switch to host page table */ |
1597 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
de56a948 PM |
1598 | mtspr SPRN_LPID,r7 |
1599 | isync | |
93b0f4dc | 1600 | |
b005255e | 1601 | BEGIN_FTR_SECTION |
88b02cf9 | 1602 | /* DPDES and VTB are shared between threads */ |
b005255e | 1603 | mfspr r7, SPRN_DPDES |
88b02cf9 | 1604 | mfspr r8, SPRN_VTB |
b005255e | 1605 | std r7, VCORE_DPDES(r5) |
88b02cf9 | 1606 | std r8, VCORE_VTB(r5) |
b005255e MN |
1607 | /* clear DPDES so we don't get guest doorbells in the host */ |
1608 | li r8, 0 | |
1609 | mtspr SPRN_DPDES, r8 | |
1610 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
1611 | ||
fd7bacbc MS |
1612 | /* If HMI, call kvmppc_realmode_hmi_handler() */ |
1613 | cmpwi r12, BOOK3S_INTERRUPT_HMI | |
1614 | bne 27f | |
1615 | bl kvmppc_realmode_hmi_handler | |
1616 | nop | |
1617 | li r12, BOOK3S_INTERRUPT_HMI | |
1618 | /* | |
1619 | * At this point kvmppc_realmode_hmi_handler would have resync-ed | |
1620 | * the TB. Hence it is not required to subtract guest timebase | |
1621 | * offset from timebase. So, skip it. | |
1622 | * | |
1623 | * Also, do not call kvmppc_subcore_exit_guest() because it has | |
1624 | * been invoked as part of kvmppc_realmode_hmi_handler(). | |
1625 | */ | |
1626 | b 30f | |
1627 | ||
1628 | 27: | |
93b0f4dc PM |
1629 | /* Subtract timebase offset from timebase */ |
1630 | ld r8,VCORE_TB_OFFSET(r5) | |
1631 | cmpdi r8,0 | |
1632 | beq 17f | |
c5fb80d3 | 1633 | mftb r6 /* current guest timebase */ |
93b0f4dc PM |
1634 | subf r8,r8,r6 |
1635 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | |
1636 | mftb r7 /* check if lower 24 bits overflowed */ | |
1637 | clrldi r6,r6,40 | |
1638 | clrldi r7,r7,40 | |
1639 | cmpld r7,r6 | |
1640 | bge 17f | |
1641 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | |
1642 | mtspr SPRN_TBU40,r8 | |
1643 | ||
fd7bacbc MS |
1644 | 17: bl kvmppc_subcore_exit_guest |
1645 | nop | |
1646 | 30: ld r5,HSTATE_KVM_VCORE(r13) | |
1647 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ | |
1648 | ||
388cc6e1 | 1649 | /* Reset PCR */ |
fd7bacbc | 1650 | ld r0, VCORE_PCR(r5) |
388cc6e1 PM |
1651 | cmpdi r0, 0 |
1652 | beq 18f | |
1653 | li r0, 0 | |
1654 | mtspr SPRN_PCR, r0 | |
1655 | 18: | |
93b0f4dc | 1656 | /* Signal secondary CPUs to continue */ |
371fefd6 | 1657 | stb r0,VCORE_IN_GUEST(r5) |
b4deba5c | 1658 | 19: lis r8,0x7fff /* MAX_INT@h */ |
de56a948 PM |
1659 | mtspr SPRN_HDEC,r8 |
1660 | ||
371fefd6 | 1661 | 16: ld r8,KVM_HOST_LPCR(r4) |
de56a948 PM |
1662 | mtspr SPRN_LPCR,r8 |
1663 | isync | |
1664 | ||
1665 | /* load host SLB entries */ | |
c17b98cf | 1666 | ld r8,PACA_SLBSHADOWPTR(r13) |
de56a948 PM |
1667 | |
1668 | .rept SLB_NUM_BOLTED | |
0865a583 AG |
1669 | li r3, SLBSHADOW_SAVEAREA |
1670 | LDX_BE r5, r8, r3 | |
1671 | addi r3, r3, 8 | |
1672 | LDX_BE r6, r8, r3 | |
de56a948 PM |
1673 | andis. r7,r5,SLB_ESID_V@h |
1674 | beq 1f | |
1675 | slbmte r6,r5 | |
1676 | 1: addi r8,r8,16 | |
1677 | .endr | |
1678 | ||
b6c295df PM |
1679 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1680 | /* Finish timing, if we have a vcpu */ | |
1681 | ld r4, HSTATE_KVM_VCPU(r13) | |
1682 | cmpdi r4, 0 | |
1683 | li r3, 0 | |
1684 | beq 2f | |
1685 | bl kvmhv_accumulate_time | |
1686 | 2: | |
1687 | #endif | |
44a3add8 PM |
1688 | /* Unset guest mode */ |
1689 | li r0, KVM_GUEST_MODE_NONE | |
1690 | stb r0, HSTATE_IN_GUEST(r13) | |
1691 | ||
218309b7 PM |
1692 | ld r0, 112+PPC_LR_STKOFF(r1) |
1693 | addi r1, r1, 112 | |
1694 | mtlr r0 | |
1695 | blr | |
b4072df4 | 1696 | |
697d3899 PM |
1697 | /* |
1698 | * Check whether an HDSI is an HPTE not found fault or something else. | |
1699 | * If it is an HPTE not found fault that is due to the guest accessing | |
1700 | * a page that they have mapped but which we have paged out, then | |
1701 | * we continue on with the guest exit path. In all other cases, | |
1702 | * reflect the HDSI to the guest as a DSI. | |
1703 | */ | |
1704 | kvmppc_hdsi: | |
1705 | mfspr r4, SPRN_HDAR | |
1706 | mfspr r6, SPRN_HDSISR | |
4cf302bc PM |
1707 | /* HPTE not found fault or protection fault? */ |
1708 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h | |
697d3899 PM |
1709 | beq 1f /* if not, send it to the guest */ |
1710 | andi. r0, r11, MSR_DR /* data relocation enabled? */ | |
1711 | beq 3f | |
1712 | clrrdi r0, r4, 28 | |
c75df6f9 | 1713 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
cf29b215 PM |
1714 | li r0, BOOK3S_INTERRUPT_DATA_SEGMENT |
1715 | bne 7f /* if no SLB entry found */ | |
697d3899 PM |
1716 | 4: std r4, VCPU_FAULT_DAR(r9) |
1717 | stw r6, VCPU_FAULT_DSISR(r9) | |
1718 | ||
1719 | /* Search the hash table. */ | |
1720 | mr r3, r9 /* vcpu pointer */ | |
342d3db7 | 1721 | li r7, 1 /* data fault */ |
b1576fec | 1722 | bl kvmppc_hpte_hv_fault |
697d3899 PM |
1723 | ld r9, HSTATE_KVM_VCPU(r13) |
1724 | ld r10, VCPU_PC(r9) | |
1725 | ld r11, VCPU_MSR(r9) | |
1726 | li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
1727 | cmpdi r3, 0 /* retry the instruction */ | |
1728 | beq 6f | |
1729 | cmpdi r3, -1 /* handle in kernel mode */ | |
b4072df4 | 1730 | beq guest_exit_cont |
697d3899 PM |
1731 | cmpdi r3, -2 /* MMIO emulation; need instr word */ |
1732 | beq 2f | |
1733 | ||
cf29b215 | 1734 | /* Synthesize a DSI (or DSegI) for the guest */ |
697d3899 PM |
1735 | ld r4, VCPU_FAULT_DAR(r9) |
1736 | mr r6, r3 | |
cf29b215 | 1737 | 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE |
697d3899 | 1738 | mtspr SPRN_DSISR, r6 |
cf29b215 | 1739 | 7: mtspr SPRN_DAR, r4 |
697d3899 PM |
1740 | mtspr SPRN_SRR0, r10 |
1741 | mtspr SPRN_SRR1, r11 | |
cf29b215 | 1742 | mr r10, r0 |
e4e38121 | 1743 | bl kvmppc_msr_interrupt |
b4072df4 | 1744 | fast_interrupt_c_return: |
697d3899 | 1745 | 6: ld r7, VCPU_CTR(r9) |
c63517c2 | 1746 | ld r8, VCPU_XER(r9) |
697d3899 PM |
1747 | mtctr r7 |
1748 | mtxer r8 | |
1749 | mr r4, r9 | |
1750 | b fast_guest_return | |
1751 | ||
1752 | 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
1753 | ld r5, KVM_VRMA_SLB_V(r5) | |
1754 | b 4b | |
1755 | ||
1756 | /* If this is for emulated MMIO, load the instruction word */ | |
1757 | 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ | |
1758 | ||
1759 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
1760 | * we'll just continue at the next IP. */ | |
1761 | li r0, KVM_GUEST_MODE_SKIP | |
1762 | stb r0, HSTATE_IN_GUEST(r13) | |
1763 | ||
1764 | /* Do the access with MSR:DR enabled */ | |
1765 | mfmsr r3 | |
1766 | ori r4, r3, MSR_DR /* Enable paging for data */ | |
1767 | mtmsrd r4 | |
1768 | lwz r8, 0(r10) | |
1769 | mtmsrd r3 | |
1770 | ||
1771 | /* Store the result */ | |
1772 | stw r8, VCPU_LAST_INST(r9) | |
1773 | ||
1774 | /* Unset guest mode. */ | |
44a3add8 | 1775 | li r0, KVM_GUEST_MODE_HOST_HV |
697d3899 | 1776 | stb r0, HSTATE_IN_GUEST(r13) |
b4072df4 | 1777 | b guest_exit_cont |
de56a948 | 1778 | |
342d3db7 PM |
1779 | /* |
1780 | * Similarly for an HISI, reflect it to the guest as an ISI unless | |
1781 | * it is an HPTE not found fault for a page that we have paged out. | |
1782 | */ | |
1783 | kvmppc_hisi: | |
1784 | andis. r0, r11, SRR1_ISI_NOPT@h | |
1785 | beq 1f | |
1786 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ | |
1787 | beq 3f | |
1788 | clrrdi r0, r10, 28 | |
c75df6f9 | 1789 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
cf29b215 PM |
1790 | li r0, BOOK3S_INTERRUPT_INST_SEGMENT |
1791 | bne 7f /* if no SLB entry found */ | |
342d3db7 PM |
1792 | 4: |
1793 | /* Search the hash table. */ | |
1794 | mr r3, r9 /* vcpu pointer */ | |
1795 | mr r4, r10 | |
1796 | mr r6, r11 | |
1797 | li r7, 0 /* instruction fault */ | |
b1576fec | 1798 | bl kvmppc_hpte_hv_fault |
342d3db7 PM |
1799 | ld r9, HSTATE_KVM_VCPU(r13) |
1800 | ld r10, VCPU_PC(r9) | |
1801 | ld r11, VCPU_MSR(r9) | |
1802 | li r12, BOOK3S_INTERRUPT_H_INST_STORAGE | |
1803 | cmpdi r3, 0 /* retry the instruction */ | |
b4072df4 | 1804 | beq fast_interrupt_c_return |
342d3db7 | 1805 | cmpdi r3, -1 /* handle in kernel mode */ |
b4072df4 | 1806 | beq guest_exit_cont |
342d3db7 | 1807 | |
cf29b215 | 1808 | /* Synthesize an ISI (or ISegI) for the guest */ |
342d3db7 | 1809 | mr r11, r3 |
cf29b215 PM |
1810 | 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE |
1811 | 7: mtspr SPRN_SRR0, r10 | |
342d3db7 | 1812 | mtspr SPRN_SRR1, r11 |
cf29b215 | 1813 | mr r10, r0 |
e4e38121 | 1814 | bl kvmppc_msr_interrupt |
b4072df4 | 1815 | b fast_interrupt_c_return |
342d3db7 PM |
1816 | |
1817 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
1818 | ld r5, KVM_VRMA_SLB_V(r6) | |
1819 | b 4b | |
1820 | ||
a8606e20 PM |
1821 | /* |
1822 | * Try to handle an hcall in real mode. | |
1823 | * Returns to the guest if we handle it, or continues on up to | |
1824 | * the kernel if we can't (i.e. if we don't have a handler for | |
1825 | * it, or if the handler returns H_TOO_HARD). | |
1f09c3ed PM |
1826 | * |
1827 | * r5 - r8 contain hcall args, | |
1828 | * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca | |
a8606e20 | 1829 | */ |
a8606e20 | 1830 | hcall_try_real_mode: |
c75df6f9 | 1831 | ld r3,VCPU_GPR(R3)(r9) |
a8606e20 | 1832 | andi. r0,r11,MSR_PR |
27025a60 LPF |
1833 | /* sc 1 from userspace - reflect to guest syscall */ |
1834 | bne sc_1_fast_return | |
a8606e20 PM |
1835 | clrrdi r3,r3,2 |
1836 | cmpldi r3,hcall_real_table_end - hcall_real_table | |
b4072df4 | 1837 | bge guest_exit_cont |
699a0ea0 PM |
1838 | /* See if this hcall is enabled for in-kernel handling */ |
1839 | ld r4, VCPU_KVM(r9) | |
1840 | srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ | |
1841 | sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ | |
1842 | add r4, r4, r0 | |
1843 | ld r0, KVM_ENABLED_HCALLS(r4) | |
1844 | rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ | |
1845 | srd r0, r0, r4 | |
1846 | andi. r0, r0, 1 | |
1847 | beq guest_exit_cont | |
1848 | /* Get pointer to handler, if any, and call it */ | |
a8606e20 | 1849 | LOAD_REG_ADDR(r4, hcall_real_table) |
4baa1d87 | 1850 | lwax r3,r3,r4 |
a8606e20 | 1851 | cmpwi r3,0 |
b4072df4 | 1852 | beq guest_exit_cont |
05a308c7 AB |
1853 | add r12,r3,r4 |
1854 | mtctr r12 | |
a8606e20 | 1855 | mr r3,r9 /* get vcpu pointer */ |
c75df6f9 | 1856 | ld r4,VCPU_GPR(R4)(r9) |
a8606e20 PM |
1857 | bctrl |
1858 | cmpdi r3,H_TOO_HARD | |
1859 | beq hcall_real_fallback | |
1860 | ld r4,HSTATE_KVM_VCPU(r13) | |
c75df6f9 | 1861 | std r3,VCPU_GPR(R3)(r4) |
a8606e20 PM |
1862 | ld r10,VCPU_PC(r4) |
1863 | ld r11,VCPU_MSR(r4) | |
1864 | b fast_guest_return | |
1865 | ||
27025a60 LPF |
1866 | sc_1_fast_return: |
1867 | mtspr SPRN_SRR0,r10 | |
1868 | mtspr SPRN_SRR1,r11 | |
1869 | li r10, BOOK3S_INTERRUPT_SYSCALL | |
e4e38121 | 1870 | bl kvmppc_msr_interrupt |
27025a60 LPF |
1871 | mr r4,r9 |
1872 | b fast_guest_return | |
1873 | ||
a8606e20 PM |
1874 | /* We've attempted a real mode hcall, but it's punted it back |
1875 | * to userspace. We need to restore some clobbered volatiles | |
1876 | * before resuming the pass-it-to-qemu path */ | |
1877 | hcall_real_fallback: | |
1878 | li r12,BOOK3S_INTERRUPT_SYSCALL | |
1879 | ld r9, HSTATE_KVM_VCPU(r13) | |
a8606e20 | 1880 | |
b4072df4 | 1881 | b guest_exit_cont |
a8606e20 PM |
1882 | |
1883 | .globl hcall_real_table | |
1884 | hcall_real_table: | |
1885 | .long 0 /* 0 - unused */ | |
c1fb0194 AB |
1886 | .long DOTSYM(kvmppc_h_remove) - hcall_real_table |
1887 | .long DOTSYM(kvmppc_h_enter) - hcall_real_table | |
1888 | .long DOTSYM(kvmppc_h_read) - hcall_real_table | |
cdeee518 PM |
1889 | .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table |
1890 | .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table | |
c1fb0194 AB |
1891 | .long DOTSYM(kvmppc_h_protect) - hcall_real_table |
1892 | .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table | |
31217db7 | 1893 | .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table |
a8606e20 | 1894 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
c1fb0194 | 1895 | .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table |
a8606e20 PM |
1896 | .long 0 /* 0x2c */ |
1897 | .long 0 /* 0x30 */ | |
1898 | .long 0 /* 0x34 */ | |
1899 | .long 0 /* 0x38 */ | |
1900 | .long 0 /* 0x3c */ | |
1901 | .long 0 /* 0x40 */ | |
1902 | .long 0 /* 0x44 */ | |
1903 | .long 0 /* 0x48 */ | |
1904 | .long 0 /* 0x4c */ | |
1905 | .long 0 /* 0x50 */ | |
1906 | .long 0 /* 0x54 */ | |
1907 | .long 0 /* 0x58 */ | |
1908 | .long 0 /* 0x5c */ | |
1909 | .long 0 /* 0x60 */ | |
e7d26f28 | 1910 | #ifdef CONFIG_KVM_XICS |
c1fb0194 AB |
1911 | .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table |
1912 | .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table | |
1913 | .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table | |
e7d26f28 | 1914 | .long 0 /* 0x70 - H_IPOLL */ |
c1fb0194 | 1915 | .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table |
e7d26f28 BH |
1916 | #else |
1917 | .long 0 /* 0x64 - H_EOI */ | |
1918 | .long 0 /* 0x68 - H_CPPR */ | |
1919 | .long 0 /* 0x6c - H_IPI */ | |
1920 | .long 0 /* 0x70 - H_IPOLL */ | |
1921 | .long 0 /* 0x74 - H_XIRR */ | |
1922 | #endif | |
a8606e20 PM |
1923 | .long 0 /* 0x78 */ |
1924 | .long 0 /* 0x7c */ | |
1925 | .long 0 /* 0x80 */ | |
1926 | .long 0 /* 0x84 */ | |
1927 | .long 0 /* 0x88 */ | |
1928 | .long 0 /* 0x8c */ | |
1929 | .long 0 /* 0x90 */ | |
1930 | .long 0 /* 0x94 */ | |
1931 | .long 0 /* 0x98 */ | |
1932 | .long 0 /* 0x9c */ | |
1933 | .long 0 /* 0xa0 */ | |
1934 | .long 0 /* 0xa4 */ | |
1935 | .long 0 /* 0xa8 */ | |
1936 | .long 0 /* 0xac */ | |
1937 | .long 0 /* 0xb0 */ | |
1938 | .long 0 /* 0xb4 */ | |
1939 | .long 0 /* 0xb8 */ | |
1940 | .long 0 /* 0xbc */ | |
1941 | .long 0 /* 0xc0 */ | |
1942 | .long 0 /* 0xc4 */ | |
1943 | .long 0 /* 0xc8 */ | |
1944 | .long 0 /* 0xcc */ | |
1945 | .long 0 /* 0xd0 */ | |
1946 | .long 0 /* 0xd4 */ | |
1947 | .long 0 /* 0xd8 */ | |
1948 | .long 0 /* 0xdc */ | |
c1fb0194 | 1949 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table |
90fd09f8 | 1950 | .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table |
a8606e20 PM |
1951 | .long 0 /* 0xe8 */ |
1952 | .long 0 /* 0xec */ | |
1953 | .long 0 /* 0xf0 */ | |
1954 | .long 0 /* 0xf4 */ | |
1955 | .long 0 /* 0xf8 */ | |
1956 | .long 0 /* 0xfc */ | |
1957 | .long 0 /* 0x100 */ | |
1958 | .long 0 /* 0x104 */ | |
1959 | .long 0 /* 0x108 */ | |
1960 | .long 0 /* 0x10c */ | |
1961 | .long 0 /* 0x110 */ | |
1962 | .long 0 /* 0x114 */ | |
1963 | .long 0 /* 0x118 */ | |
1964 | .long 0 /* 0x11c */ | |
1965 | .long 0 /* 0x120 */ | |
c1fb0194 | 1966 | .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table |
8563bf52 PM |
1967 | .long 0 /* 0x128 */ |
1968 | .long 0 /* 0x12c */ | |
1969 | .long 0 /* 0x130 */ | |
c1fb0194 | 1970 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table |
31217db7 | 1971 | .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table |
d3695aa4 | 1972 | .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table |
e928e9cb ME |
1973 | .long 0 /* 0x140 */ |
1974 | .long 0 /* 0x144 */ | |
1975 | .long 0 /* 0x148 */ | |
1976 | .long 0 /* 0x14c */ | |
1977 | .long 0 /* 0x150 */ | |
1978 | .long 0 /* 0x154 */ | |
1979 | .long 0 /* 0x158 */ | |
1980 | .long 0 /* 0x15c */ | |
1981 | .long 0 /* 0x160 */ | |
1982 | .long 0 /* 0x164 */ | |
1983 | .long 0 /* 0x168 */ | |
1984 | .long 0 /* 0x16c */ | |
1985 | .long 0 /* 0x170 */ | |
1986 | .long 0 /* 0x174 */ | |
1987 | .long 0 /* 0x178 */ | |
1988 | .long 0 /* 0x17c */ | |
1989 | .long 0 /* 0x180 */ | |
1990 | .long 0 /* 0x184 */ | |
1991 | .long 0 /* 0x188 */ | |
1992 | .long 0 /* 0x18c */ | |
1993 | .long 0 /* 0x190 */ | |
1994 | .long 0 /* 0x194 */ | |
1995 | .long 0 /* 0x198 */ | |
1996 | .long 0 /* 0x19c */ | |
1997 | .long 0 /* 0x1a0 */ | |
1998 | .long 0 /* 0x1a4 */ | |
1999 | .long 0 /* 0x1a8 */ | |
2000 | .long 0 /* 0x1ac */ | |
2001 | .long 0 /* 0x1b0 */ | |
2002 | .long 0 /* 0x1b4 */ | |
2003 | .long 0 /* 0x1b8 */ | |
2004 | .long 0 /* 0x1bc */ | |
2005 | .long 0 /* 0x1c0 */ | |
2006 | .long 0 /* 0x1c4 */ | |
2007 | .long 0 /* 0x1c8 */ | |
2008 | .long 0 /* 0x1cc */ | |
2009 | .long 0 /* 0x1d0 */ | |
2010 | .long 0 /* 0x1d4 */ | |
2011 | .long 0 /* 0x1d8 */ | |
2012 | .long 0 /* 0x1dc */ | |
2013 | .long 0 /* 0x1e0 */ | |
2014 | .long 0 /* 0x1e4 */ | |
2015 | .long 0 /* 0x1e8 */ | |
2016 | .long 0 /* 0x1ec */ | |
2017 | .long 0 /* 0x1f0 */ | |
2018 | .long 0 /* 0x1f4 */ | |
2019 | .long 0 /* 0x1f8 */ | |
2020 | .long 0 /* 0x1fc */ | |
2021 | .long 0 /* 0x200 */ | |
2022 | .long 0 /* 0x204 */ | |
2023 | .long 0 /* 0x208 */ | |
2024 | .long 0 /* 0x20c */ | |
2025 | .long 0 /* 0x210 */ | |
2026 | .long 0 /* 0x214 */ | |
2027 | .long 0 /* 0x218 */ | |
2028 | .long 0 /* 0x21c */ | |
2029 | .long 0 /* 0x220 */ | |
2030 | .long 0 /* 0x224 */ | |
2031 | .long 0 /* 0x228 */ | |
2032 | .long 0 /* 0x22c */ | |
2033 | .long 0 /* 0x230 */ | |
2034 | .long 0 /* 0x234 */ | |
2035 | .long 0 /* 0x238 */ | |
2036 | .long 0 /* 0x23c */ | |
2037 | .long 0 /* 0x240 */ | |
2038 | .long 0 /* 0x244 */ | |
2039 | .long 0 /* 0x248 */ | |
2040 | .long 0 /* 0x24c */ | |
2041 | .long 0 /* 0x250 */ | |
2042 | .long 0 /* 0x254 */ | |
2043 | .long 0 /* 0x258 */ | |
2044 | .long 0 /* 0x25c */ | |
2045 | .long 0 /* 0x260 */ | |
2046 | .long 0 /* 0x264 */ | |
2047 | .long 0 /* 0x268 */ | |
2048 | .long 0 /* 0x26c */ | |
2049 | .long 0 /* 0x270 */ | |
2050 | .long 0 /* 0x274 */ | |
2051 | .long 0 /* 0x278 */ | |
2052 | .long 0 /* 0x27c */ | |
2053 | .long 0 /* 0x280 */ | |
2054 | .long 0 /* 0x284 */ | |
2055 | .long 0 /* 0x288 */ | |
2056 | .long 0 /* 0x28c */ | |
2057 | .long 0 /* 0x290 */ | |
2058 | .long 0 /* 0x294 */ | |
2059 | .long 0 /* 0x298 */ | |
2060 | .long 0 /* 0x29c */ | |
2061 | .long 0 /* 0x2a0 */ | |
2062 | .long 0 /* 0x2a4 */ | |
2063 | .long 0 /* 0x2a8 */ | |
2064 | .long 0 /* 0x2ac */ | |
2065 | .long 0 /* 0x2b0 */ | |
2066 | .long 0 /* 0x2b4 */ | |
2067 | .long 0 /* 0x2b8 */ | |
2068 | .long 0 /* 0x2bc */ | |
2069 | .long 0 /* 0x2c0 */ | |
2070 | .long 0 /* 0x2c4 */ | |
2071 | .long 0 /* 0x2c8 */ | |
2072 | .long 0 /* 0x2cc */ | |
2073 | .long 0 /* 0x2d0 */ | |
2074 | .long 0 /* 0x2d4 */ | |
2075 | .long 0 /* 0x2d8 */ | |
2076 | .long 0 /* 0x2dc */ | |
2077 | .long 0 /* 0x2e0 */ | |
2078 | .long 0 /* 0x2e4 */ | |
2079 | .long 0 /* 0x2e8 */ | |
2080 | .long 0 /* 0x2ec */ | |
2081 | .long 0 /* 0x2f0 */ | |
2082 | .long 0 /* 0x2f4 */ | |
2083 | .long 0 /* 0x2f8 */ | |
2084 | .long 0 /* 0x2fc */ | |
2085 | .long DOTSYM(kvmppc_h_random) - hcall_real_table | |
ae2113a4 | 2086 | .globl hcall_real_table_end |
a8606e20 PM |
2087 | hcall_real_table_end: |
2088 | ||
8563bf52 PM |
2089 | _GLOBAL(kvmppc_h_set_xdabr) |
2090 | andi. r0, r5, DABRX_USER | DABRX_KERNEL | |
2091 | beq 6f | |
2092 | li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI | |
2093 | andc. r0, r5, r0 | |
2094 | beq 3f | |
2095 | 6: li r3, H_PARAMETER | |
2096 | blr | |
2097 | ||
a8606e20 | 2098 | _GLOBAL(kvmppc_h_set_dabr) |
8563bf52 PM |
2099 | li r5, DABRX_USER | DABRX_KERNEL |
2100 | 3: | |
eee7ff9d MN |
2101 | BEGIN_FTR_SECTION |
2102 | b 2f | |
2103 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
a8606e20 | 2104 | std r4,VCPU_DABR(r3) |
8563bf52 PM |
2105 | stw r5, VCPU_DABRX(r3) |
2106 | mtspr SPRN_DABRX, r5 | |
8943633c PM |
2107 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
2108 | 1: mtspr SPRN_DABR,r4 | |
2109 | mfspr r5, SPRN_DABR | |
2110 | cmpd r4, r5 | |
2111 | bne 1b | |
2112 | isync | |
a8606e20 PM |
2113 | li r3,0 |
2114 | blr | |
2115 | ||
8563bf52 PM |
2116 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ |
2117 | 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW | |
760a7364 | 2118 | rlwimi r5, r4, 2, DAWRX_WT |
8563bf52 PM |
2119 | clrrdi r4, r4, 3 |
2120 | std r4, VCPU_DAWR(r3) | |
2121 | std r5, VCPU_DAWRX(r3) | |
2122 | mtspr SPRN_DAWR, r4 | |
2123 | mtspr SPRN_DAWRX, r5 | |
2124 | li r3, 0 | |
a8606e20 PM |
2125 | blr |
2126 | ||
1f09c3ed | 2127 | _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ |
19ccb76a PM |
2128 | ori r11,r11,MSR_EE |
2129 | std r11,VCPU_MSR(r3) | |
2130 | li r0,1 | |
2131 | stb r0,VCPU_CEDED(r3) | |
2132 | sync /* order setting ceded vs. testing prodded */ | |
2133 | lbz r5,VCPU_PRODDED(r3) | |
2134 | cmpwi r5,0 | |
04f995a5 | 2135 | bne kvm_cede_prodded |
6af27c84 PM |
2136 | li r12,0 /* set trap to 0 to say hcall is handled */ |
2137 | stw r12,VCPU_TRAP(r3) | |
19ccb76a | 2138 | li r0,H_SUCCESS |
c75df6f9 | 2139 | std r0,VCPU_GPR(R3)(r3) |
19ccb76a PM |
2140 | |
2141 | /* | |
2142 | * Set our bit in the bitmask of napping threads unless all the | |
2143 | * other threads are already napping, in which case we send this | |
2144 | * up to the host. | |
2145 | */ | |
2146 | ld r5,HSTATE_KVM_VCORE(r13) | |
e0b7ec05 | 2147 | lbz r6,HSTATE_PTID(r13) |
19ccb76a PM |
2148 | lwz r8,VCORE_ENTRY_EXIT(r5) |
2149 | clrldi r8,r8,56 | |
2150 | li r0,1 | |
2151 | sld r0,r0,r6 | |
2152 | addi r6,r5,VCORE_NAPPING_THREADS | |
2153 | 31: lwarx r4,0,r6 | |
2154 | or r4,r4,r0 | |
7d6c40da PM |
2155 | cmpw r4,r8 |
2156 | beq kvm_cede_exit | |
19ccb76a PM |
2157 | stwcx. r4,0,r6 |
2158 | bne 31b | |
7d6c40da | 2159 | /* order napping_threads update vs testing entry_exit_map */ |
f019b7ad | 2160 | isync |
e0b7ec05 | 2161 | li r0,NAPPING_CEDE |
19ccb76a | 2162 | stb r0,HSTATE_NAPPING(r13) |
19ccb76a PM |
2163 | lwz r7,VCORE_ENTRY_EXIT(r5) |
2164 | cmpwi r7,0x100 | |
2165 | bge 33f /* another thread already exiting */ | |
2166 | ||
2167 | /* | |
2168 | * Although not specifically required by the architecture, POWER7 | |
2169 | * preserves the following registers in nap mode, even if an SMT mode | |
2170 | * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, | |
2171 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | |
2172 | */ | |
2173 | /* Save non-volatile GPRs */ | |
c75df6f9 MN |
2174 | std r14, VCPU_GPR(R14)(r3) |
2175 | std r15, VCPU_GPR(R15)(r3) | |
2176 | std r16, VCPU_GPR(R16)(r3) | |
2177 | std r17, VCPU_GPR(R17)(r3) | |
2178 | std r18, VCPU_GPR(R18)(r3) | |
2179 | std r19, VCPU_GPR(R19)(r3) | |
2180 | std r20, VCPU_GPR(R20)(r3) | |
2181 | std r21, VCPU_GPR(R21)(r3) | |
2182 | std r22, VCPU_GPR(R22)(r3) | |
2183 | std r23, VCPU_GPR(R23)(r3) | |
2184 | std r24, VCPU_GPR(R24)(r3) | |
2185 | std r25, VCPU_GPR(R25)(r3) | |
2186 | std r26, VCPU_GPR(R26)(r3) | |
2187 | std r27, VCPU_GPR(R27)(r3) | |
2188 | std r28, VCPU_GPR(R28)(r3) | |
2189 | std r29, VCPU_GPR(R29)(r3) | |
2190 | std r30, VCPU_GPR(R30)(r3) | |
2191 | std r31, VCPU_GPR(R31)(r3) | |
19ccb76a PM |
2192 | |
2193 | /* save FP state */ | |
595e4f7e | 2194 | bl kvmppc_save_fp |
19ccb76a | 2195 | |
93d17397 PM |
2196 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
2197 | BEGIN_FTR_SECTION | |
2198 | ld r9, HSTATE_KVM_VCPU(r13) | |
2199 | bl kvmppc_save_tm | |
2200 | END_FTR_SECTION_IFSET(CPU_FTR_TM) | |
2201 | #endif | |
2202 | ||
fd6d53b1 PM |
2203 | /* |
2204 | * Set DEC to the smaller of DEC and HDEC, so that we wake | |
2205 | * no later than the end of our timeslice (HDEC interrupts | |
2206 | * don't wake us from nap). | |
2207 | */ | |
2208 | mfspr r3, SPRN_DEC | |
2209 | mfspr r4, SPRN_HDEC | |
2210 | mftb r5 | |
2211 | cmpw r3, r4 | |
2212 | ble 67f | |
2213 | mtspr SPRN_DEC, r4 | |
2214 | 67: | |
2215 | /* save expiry time of guest decrementer */ | |
2216 | extsw r3, r3 | |
2217 | add r3, r3, r5 | |
2218 | ld r4, HSTATE_KVM_VCPU(r13) | |
2219 | ld r5, HSTATE_KVM_VCORE(r13) | |
2220 | ld r6, VCORE_TB_OFFSET(r5) | |
2221 | subf r3, r6, r3 /* convert to host TB value */ | |
2222 | std r3, VCPU_DEC_EXPIRES(r4) | |
2223 | ||
b6c295df PM |
2224 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
2225 | ld r4, HSTATE_KVM_VCPU(r13) | |
2226 | addi r3, r4, VCPU_TB_CEDE | |
2227 | bl kvmhv_accumulate_time | |
2228 | #endif | |
2229 | ||
ccc07772 PM |
2230 | lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ |
2231 | ||
19ccb76a | 2232 | /* |
aa31e843 | 2233 | * Take a nap until a decrementer or external or doobell interrupt |
ccc07772 | 2234 | * occurs, with PECE1 and PECE0 set in LPCR. |
66feed61 | 2235 | * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. |
ccc07772 | 2236 | * Also clear the runlatch bit before napping. |
19ccb76a | 2237 | */ |
56548fc0 | 2238 | kvm_do_nap: |
1f09c3ed PM |
2239 | mfspr r0, SPRN_CTRLF |
2240 | clrrdi r0, r0, 1 | |
2241 | mtspr SPRN_CTRLT, r0 | |
582b910e | 2242 | |
f0888f70 PM |
2243 | li r0,1 |
2244 | stb r0,HSTATE_HWTHREAD_REQ(r13) | |
19ccb76a PM |
2245 | mfspr r5,SPRN_LPCR |
2246 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 | |
aa31e843 | 2247 | BEGIN_FTR_SECTION |
66feed61 | 2248 | ori r5, r5, LPCR_PECEDH |
ccc07772 | 2249 | rlwimi r5, r3, 0, LPCR_PECEDP |
aa31e843 | 2250 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
bf53c88e PM |
2251 | |
2252 | kvm_nap_sequence: /* desired LPCR value in r5 */ | |
2253 | BEGIN_FTR_SECTION | |
2254 | /* | |
2255 | * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) | |
2256 | * enable state loss = 1 (allow SMT mode switch) | |
2257 | * requested level = 0 (just stop dispatching) | |
2258 | */ | |
2259 | lis r3, (PSSCR_EC | PSSCR_ESL)@h | |
2260 | mtspr SPRN_PSSCR, r3 | |
2261 | /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ | |
2262 | li r4, LPCR_PECE_HVEE@higher | |
2263 | sldi r4, r4, 32 | |
2264 | or r5, r5, r4 | |
2265 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
19ccb76a PM |
2266 | mtspr SPRN_LPCR,r5 |
2267 | isync | |
2268 | li r0, 0 | |
2269 | std r0, HSTATE_SCRATCH0(r13) | |
2270 | ptesync | |
2271 | ld r0, HSTATE_SCRATCH0(r13) | |
2272 | 1: cmpd r0, r0 | |
2273 | bne 1b | |
bf53c88e | 2274 | BEGIN_FTR_SECTION |
19ccb76a | 2275 | nap |
bf53c88e PM |
2276 | FTR_SECTION_ELSE |
2277 | PPC_STOP | |
2278 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |
19ccb76a PM |
2279 | b . |
2280 | ||
e3bbbbfa PM |
2281 | 33: mr r4, r3 |
2282 | li r3, 0 | |
2283 | li r12, 0 | |
2284 | b 34f | |
2285 | ||
19ccb76a | 2286 | kvm_end_cede: |
4619ac88 PM |
2287 | /* get vcpu pointer */ |
2288 | ld r4, HSTATE_KVM_VCPU(r13) | |
2289 | ||
19ccb76a PM |
2290 | /* Woken by external or decrementer interrupt */ |
2291 | ld r1, HSTATE_HOST_R1(r13) | |
19ccb76a | 2292 | |
b6c295df PM |
2293 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
2294 | addi r3, r4, VCPU_TB_RMINTR | |
2295 | bl kvmhv_accumulate_time | |
2296 | #endif | |
2297 | ||
93d17397 PM |
2298 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
2299 | BEGIN_FTR_SECTION | |
2300 | bl kvmppc_restore_tm | |
2301 | END_FTR_SECTION_IFSET(CPU_FTR_TM) | |
2302 | #endif | |
2303 | ||
19ccb76a PM |
2304 | /* load up FP state */ |
2305 | bl kvmppc_load_fp | |
2306 | ||
fd6d53b1 PM |
2307 | /* Restore guest decrementer */ |
2308 | ld r3, VCPU_DEC_EXPIRES(r4) | |
2309 | ld r5, HSTATE_KVM_VCORE(r13) | |
2310 | ld r6, VCORE_TB_OFFSET(r5) | |
2311 | add r3, r3, r6 /* convert host TB to guest TB value */ | |
2312 | mftb r7 | |
2313 | subf r3, r7, r3 | |
2314 | mtspr SPRN_DEC, r3 | |
2315 | ||
19ccb76a | 2316 | /* Load NV GPRS */ |
c75df6f9 MN |
2317 | ld r14, VCPU_GPR(R14)(r4) |
2318 | ld r15, VCPU_GPR(R15)(r4) | |
2319 | ld r16, VCPU_GPR(R16)(r4) | |
2320 | ld r17, VCPU_GPR(R17)(r4) | |
2321 | ld r18, VCPU_GPR(R18)(r4) | |
2322 | ld r19, VCPU_GPR(R19)(r4) | |
2323 | ld r20, VCPU_GPR(R20)(r4) | |
2324 | ld r21, VCPU_GPR(R21)(r4) | |
2325 | ld r22, VCPU_GPR(R22)(r4) | |
2326 | ld r23, VCPU_GPR(R23)(r4) | |
2327 | ld r24, VCPU_GPR(R24)(r4) | |
2328 | ld r25, VCPU_GPR(R25)(r4) | |
2329 | ld r26, VCPU_GPR(R26)(r4) | |
2330 | ld r27, VCPU_GPR(R27)(r4) | |
2331 | ld r28, VCPU_GPR(R28)(r4) | |
2332 | ld r29, VCPU_GPR(R29)(r4) | |
2333 | ld r30, VCPU_GPR(R30)(r4) | |
2334 | ld r31, VCPU_GPR(R31)(r4) | |
37f55d30 | 2335 | |
e3bbbbfa PM |
2336 | /* Check the wake reason in SRR1 to see why we got here */ |
2337 | bl kvmppc_check_wake_reason | |
19ccb76a | 2338 | |
37f55d30 SW |
2339 | /* |
2340 | * Restore volatile registers since we could have called a | |
2341 | * C routine in kvmppc_check_wake_reason | |
2342 | * r4 = VCPU | |
2343 | * r3 tells us whether we need to return to host or not | |
2344 | * WARNING: it gets checked further down: | |
2345 | * should not modify r3 until this check is done. | |
2346 | */ | |
2347 | ld r4, HSTATE_KVM_VCPU(r13) | |
2348 | ||
19ccb76a | 2349 | /* clear our bit in vcore->napping_threads */ |
e3bbbbfa PM |
2350 | 34: ld r5,HSTATE_KVM_VCORE(r13) |
2351 | lbz r7,HSTATE_PTID(r13) | |
19ccb76a | 2352 | li r0,1 |
e3bbbbfa | 2353 | sld r0,r0,r7 |
19ccb76a PM |
2354 | addi r6,r5,VCORE_NAPPING_THREADS |
2355 | 32: lwarx r7,0,r6 | |
2356 | andc r7,r7,r0 | |
2357 | stwcx. r7,0,r6 | |
2358 | bne 32b | |
2359 | li r0,0 | |
2360 | stb r0,HSTATE_NAPPING(r13) | |
2361 | ||
37f55d30 | 2362 | /* See if the wake reason saved in r3 means we need to exit */ |
e3bbbbfa | 2363 | stw r12, VCPU_TRAP(r4) |
4619ac88 | 2364 | mr r9, r4 |
e3bbbbfa PM |
2365 | cmpdi r3, 0 |
2366 | bgt guest_exit_cont | |
4619ac88 | 2367 | |
19ccb76a PM |
2368 | /* see if any other thread is already exiting */ |
2369 | lwz r0,VCORE_ENTRY_EXIT(r5) | |
2370 | cmpwi r0,0x100 | |
e3bbbbfa | 2371 | bge guest_exit_cont |
19ccb76a | 2372 | |
e3bbbbfa | 2373 | b kvmppc_cede_reentry /* if not go back to guest */ |
19ccb76a PM |
2374 | |
2375 | /* cede when already previously prodded case */ | |
04f995a5 PM |
2376 | kvm_cede_prodded: |
2377 | li r0,0 | |
19ccb76a PM |
2378 | stb r0,VCPU_PRODDED(r3) |
2379 | sync /* order testing prodded vs. clearing ceded */ | |
2380 | stb r0,VCPU_CEDED(r3) | |
2381 | li r3,H_SUCCESS | |
2382 | blr | |
2383 | ||
2384 | /* we've ceded but we want to give control to the host */ | |
04f995a5 | 2385 | kvm_cede_exit: |
6af27c84 PM |
2386 | ld r9, HSTATE_KVM_VCPU(r13) |
2387 | b guest_exit_cont | |
19ccb76a | 2388 | |
b4072df4 PM |
2389 | /* Try to handle a machine check in real mode */ |
2390 | machine_check_realmode: | |
2391 | mr r3, r9 /* get vcpu pointer */ | |
b1576fec | 2392 | bl kvmppc_realmode_machine_check |
b4072df4 | 2393 | nop |
b4072df4 PM |
2394 | ld r9, HSTATE_KVM_VCPU(r13) |
2395 | li r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
74845bc2 MS |
2396 | /* |
2397 | * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through | |
2398 | * machine check interrupt (set HSRR0 to 0x200). And for handled | |
2399 | * errors (no-fatal), just go back to guest execution with current | |
2400 | * HSRR0 instead of exiting guest. This new approach will inject | |
2401 | * machine check to guest for fatal error causing guest to crash. | |
2402 | * | |
2403 | * The old code used to return to host for unhandled errors which | |
2404 | * was causing guest to hang with soft lockups inside guest and | |
2405 | * makes it difficult to recover guest instance. | |
966d713e MS |
2406 | * |
2407 | * if we receive machine check with MSR(RI=0) then deliver it to | |
2408 | * guest as machine check causing guest to crash. | |
74845bc2 | 2409 | */ |
74845bc2 | 2410 | ld r11, VCPU_MSR(r9) |
1c9e3d51 PM |
2411 | rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ |
2412 | bne mc_cont /* if so, exit to host */ | |
966d713e MS |
2413 | andi. r10, r11, MSR_RI /* check for unrecoverable exception */ |
2414 | beq 1f /* Deliver a machine check to guest */ | |
2415 | ld r10, VCPU_PC(r9) | |
2416 | cmpdi r3, 0 /* Did we handle MCE ? */ | |
74845bc2 | 2417 | bne 2f /* Continue guest execution. */ |
b4072df4 | 2418 | /* If not, deliver a machine check. SRR0/1 are already set */ |
966d713e | 2419 | 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK |
e4e38121 | 2420 | bl kvmppc_msr_interrupt |
74845bc2 | 2421 | 2: b fast_interrupt_c_return |
b4072df4 | 2422 | |
e3bbbbfa PM |
2423 | /* |
2424 | * Check the reason we woke from nap, and take appropriate action. | |
1f09c3ed | 2425 | * Returns (in r3): |
e3bbbbfa PM |
2426 | * 0 if nothing needs to be done |
2427 | * 1 if something happened that needs to be handled by the host | |
66feed61 | 2428 | * -1 if there was a guest wakeup (IPI or msgsnd) |
e3c13e56 SW |
2429 | * -2 if we handled a PCI passthrough interrupt (returned by |
2430 | * kvmppc_read_intr only) | |
e3bbbbfa PM |
2431 | * |
2432 | * Also sets r12 to the interrupt vector for any interrupt that needs | |
2433 | * to be handled now by the host (0x500 for external interrupt), or zero. | |
37f55d30 SW |
2434 | * Modifies all volatile registers (since it may call a C function). |
2435 | * This routine calls kvmppc_read_intr, a C function, if an external | |
2436 | * interrupt is pending. | |
e3bbbbfa PM |
2437 | */ |
2438 | kvmppc_check_wake_reason: | |
2439 | mfspr r6, SPRN_SRR1 | |
aa31e843 PM |
2440 | BEGIN_FTR_SECTION |
2441 | rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ | |
2442 | FTR_SECTION_ELSE | |
2443 | rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ | |
2444 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) | |
2445 | cmpwi r6, 8 /* was it an external interrupt? */ | |
37f55d30 | 2446 | beq 7f /* if so, see what it was */ |
e3bbbbfa PM |
2447 | li r3, 0 |
2448 | li r12, 0 | |
2449 | cmpwi r6, 6 /* was it the decrementer? */ | |
2450 | beq 0f | |
aa31e843 PM |
2451 | BEGIN_FTR_SECTION |
2452 | cmpwi r6, 5 /* privileged doorbell? */ | |
2453 | beq 0f | |
5d00f66b PM |
2454 | cmpwi r6, 3 /* hypervisor doorbell? */ |
2455 | beq 3f | |
aa31e843 | 2456 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
fd7bacbc MS |
2457 | cmpwi r6, 0xa /* Hypervisor maintenance ? */ |
2458 | beq 4f | |
e3bbbbfa PM |
2459 | li r3, 1 /* anything else, return 1 */ |
2460 | 0: blr | |
2461 | ||
5d00f66b PM |
2462 | /* hypervisor doorbell */ |
2463 | 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL | |
70aa3961 GS |
2464 | |
2465 | /* | |
2466 | * Clear the doorbell as we will invoke the handler | |
2467 | * explicitly in the guest exit path. | |
2468 | */ | |
2469 | lis r6, (PPC_DBELL_SERVER << (63-36))@h | |
2470 | PPC_MSGCLR(6) | |
66feed61 | 2471 | /* see if it's a host IPI */ |
5d00f66b | 2472 | li r3, 1 |
66feed61 PM |
2473 | lbz r0, HSTATE_HOST_IPI(r13) |
2474 | cmpwi r0, 0 | |
2475 | bnelr | |
70aa3961 | 2476 | /* if not, return -1 */ |
66feed61 | 2477 | li r3, -1 |
5d00f66b PM |
2478 | blr |
2479 | ||
fd7bacbc MS |
2480 | /* Woken up due to Hypervisor maintenance interrupt */ |
2481 | 4: li r12, BOOK3S_INTERRUPT_HMI | |
2482 | li r3, 1 | |
2483 | blr | |
2484 | ||
37f55d30 SW |
2485 | /* external interrupt - create a stack frame so we can call C */ |
2486 | 7: mflr r0 | |
2487 | std r0, PPC_LR_STKOFF(r1) | |
2488 | stdu r1, -PPC_MIN_STKFRM(r1) | |
2489 | bl kvmppc_read_intr | |
2490 | nop | |
2491 | li r12, BOOK3S_INTERRUPT_EXTERNAL | |
f7af5209 SW |
2492 | cmpdi r3, 1 |
2493 | ble 1f | |
2494 | ||
2495 | /* | |
2496 | * Return code of 2 means PCI passthrough interrupt, but | |
2497 | * we need to return back to host to complete handling the | |
2498 | * interrupt. Trap reason is expected in r12 by guest | |
2499 | * exit code. | |
2500 | */ | |
2501 | li r12, BOOK3S_INTERRUPT_HV_RM_HARD | |
2502 | 1: | |
37f55d30 SW |
2503 | ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) |
2504 | addi r1, r1, PPC_MIN_STKFRM | |
2505 | mtlr r0 | |
2506 | blr | |
371fefd6 | 2507 | |
de56a948 PM |
2508 | /* |
2509 | * Save away FP, VMX and VSX registers. | |
2510 | * r3 = vcpu pointer | |
595e4f7e PM |
2511 | * N.B. r30 and r31 are volatile across this function, |
2512 | * thus it is not callable from C. | |
a8606e20 | 2513 | */ |
595e4f7e PM |
2514 | kvmppc_save_fp: |
2515 | mflr r30 | |
2516 | mr r31,r3 | |
8943633c PM |
2517 | mfmsr r5 |
2518 | ori r8,r5,MSR_FP | |
de56a948 PM |
2519 | #ifdef CONFIG_ALTIVEC |
2520 | BEGIN_FTR_SECTION | |
2521 | oris r8,r8,MSR_VEC@h | |
2522 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
2523 | #endif | |
2524 | #ifdef CONFIG_VSX | |
2525 | BEGIN_FTR_SECTION | |
2526 | oris r8,r8,MSR_VSX@h | |
2527 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
2528 | #endif | |
2529 | mtmsrd r8 | |
595e4f7e | 2530 | addi r3,r3,VCPU_FPRS |
9bf163f8 | 2531 | bl store_fp_state |
de56a948 PM |
2532 | #ifdef CONFIG_ALTIVEC |
2533 | BEGIN_FTR_SECTION | |
595e4f7e | 2534 | addi r3,r31,VCPU_VRS |
9bf163f8 | 2535 | bl store_vr_state |
de56a948 PM |
2536 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2537 | #endif | |
2538 | mfspr r6,SPRN_VRSAVE | |
e724f080 | 2539 | stw r6,VCPU_VRSAVE(r31) |
595e4f7e | 2540 | mtlr r30 |
de56a948 PM |
2541 | blr |
2542 | ||
2543 | /* | |
2544 | * Load up FP, VMX and VSX registers | |
2545 | * r4 = vcpu pointer | |
595e4f7e PM |
2546 | * N.B. r30 and r31 are volatile across this function, |
2547 | * thus it is not callable from C. | |
de56a948 | 2548 | */ |
de56a948 | 2549 | kvmppc_load_fp: |
595e4f7e PM |
2550 | mflr r30 |
2551 | mr r31,r4 | |
de56a948 PM |
2552 | mfmsr r9 |
2553 | ori r8,r9,MSR_FP | |
2554 | #ifdef CONFIG_ALTIVEC | |
2555 | BEGIN_FTR_SECTION | |
2556 | oris r8,r8,MSR_VEC@h | |
2557 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
2558 | #endif | |
2559 | #ifdef CONFIG_VSX | |
2560 | BEGIN_FTR_SECTION | |
2561 | oris r8,r8,MSR_VSX@h | |
2562 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
2563 | #endif | |
2564 | mtmsrd r8 | |
595e4f7e | 2565 | addi r3,r4,VCPU_FPRS |
9bf163f8 | 2566 | bl load_fp_state |
de56a948 PM |
2567 | #ifdef CONFIG_ALTIVEC |
2568 | BEGIN_FTR_SECTION | |
595e4f7e | 2569 | addi r3,r31,VCPU_VRS |
9bf163f8 | 2570 | bl load_vr_state |
de56a948 PM |
2571 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2572 | #endif | |
e724f080 | 2573 | lwz r7,VCPU_VRSAVE(r31) |
de56a948 | 2574 | mtspr SPRN_VRSAVE,r7 |
595e4f7e PM |
2575 | mtlr r30 |
2576 | mr r4,r31 | |
de56a948 | 2577 | blr |
44a3add8 | 2578 | |
f024ee09 PM |
2579 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
2580 | /* | |
2581 | * Save transactional state and TM-related registers. | |
2582 | * Called with r9 pointing to the vcpu struct. | |
2583 | * This can modify all checkpointed registers, but | |
2584 | * restores r1, r2 and r9 (vcpu pointer) before exit. | |
2585 | */ | |
2586 | kvmppc_save_tm: | |
2587 | mflr r0 | |
2588 | std r0, PPC_LR_STKOFF(r1) | |
2589 | ||
2590 | /* Turn on TM. */ | |
2591 | mfmsr r8 | |
2592 | li r0, 1 | |
2593 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | |
2594 | mtmsrd r8 | |
2595 | ||
2596 | ld r5, VCPU_MSR(r9) | |
2597 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | |
2598 | beq 1f /* TM not active in guest. */ | |
2599 | ||
2600 | std r1, HSTATE_HOST_R1(r13) | |
2601 | li r3, TM_CAUSE_KVM_RESCHED | |
2602 | ||
2603 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | |
2604 | li r5, 0 | |
2605 | mtmsrd r5, 1 | |
2606 | ||
2607 | /* All GPRs are volatile at this point. */ | |
2608 | TRECLAIM(R3) | |
2609 | ||
2610 | /* Temporarily store r13 and r9 so we have some regs to play with */ | |
2611 | SET_SCRATCH0(r13) | |
2612 | GET_PACA(r13) | |
2613 | std r9, PACATMSCRATCH(r13) | |
2614 | ld r9, HSTATE_KVM_VCPU(r13) | |
2615 | ||
2616 | /* Get a few more GPRs free. */ | |
2617 | std r29, VCPU_GPRS_TM(29)(r9) | |
2618 | std r30, VCPU_GPRS_TM(30)(r9) | |
2619 | std r31, VCPU_GPRS_TM(31)(r9) | |
2620 | ||
2621 | /* Save away PPR and DSCR soon so don't run with user values. */ | |
2622 | mfspr r31, SPRN_PPR | |
2623 | HMT_MEDIUM | |
2624 | mfspr r30, SPRN_DSCR | |
2625 | ld r29, HSTATE_DSCR(r13) | |
2626 | mtspr SPRN_DSCR, r29 | |
2627 | ||
2628 | /* Save all but r9, r13 & r29-r31 */ | |
2629 | reg = 0 | |
2630 | .rept 29 | |
2631 | .if (reg != 9) && (reg != 13) | |
2632 | std reg, VCPU_GPRS_TM(reg)(r9) | |
2633 | .endif | |
2634 | reg = reg + 1 | |
2635 | .endr | |
2636 | /* ... now save r13 */ | |
2637 | GET_SCRATCH0(r4) | |
2638 | std r4, VCPU_GPRS_TM(13)(r9) | |
2639 | /* ... and save r9 */ | |
2640 | ld r4, PACATMSCRATCH(r13) | |
2641 | std r4, VCPU_GPRS_TM(9)(r9) | |
2642 | ||
2643 | /* Reload stack pointer and TOC. */ | |
2644 | ld r1, HSTATE_HOST_R1(r13) | |
2645 | ld r2, PACATOC(r13) | |
2646 | ||
2647 | /* Set MSR RI now we have r1 and r13 back. */ | |
2648 | li r5, MSR_RI | |
2649 | mtmsrd r5, 1 | |
2650 | ||
2651 | /* Save away checkpinted SPRs. */ | |
2652 | std r31, VCPU_PPR_TM(r9) | |
2653 | std r30, VCPU_DSCR_TM(r9) | |
2654 | mflr r5 | |
2655 | mfcr r6 | |
2656 | mfctr r7 | |
2657 | mfspr r8, SPRN_AMR | |
2658 | mfspr r10, SPRN_TAR | |
0d808df0 | 2659 | mfxer r11 |
f024ee09 PM |
2660 | std r5, VCPU_LR_TM(r9) |
2661 | stw r6, VCPU_CR_TM(r9) | |
2662 | std r7, VCPU_CTR_TM(r9) | |
2663 | std r8, VCPU_AMR_TM(r9) | |
2664 | std r10, VCPU_TAR_TM(r9) | |
0d808df0 | 2665 | std r11, VCPU_XER_TM(r9) |
f024ee09 PM |
2666 | |
2667 | /* Restore r12 as trap number. */ | |
2668 | lwz r12, VCPU_TRAP(r9) | |
2669 | ||
2670 | /* Save FP/VSX. */ | |
2671 | addi r3, r9, VCPU_FPRS_TM | |
2672 | bl store_fp_state | |
2673 | addi r3, r9, VCPU_VRS_TM | |
2674 | bl store_vr_state | |
2675 | mfspr r6, SPRN_VRSAVE | |
2676 | stw r6, VCPU_VRSAVE_TM(r9) | |
2677 | 1: | |
2678 | /* | |
2679 | * We need to save these SPRs after the treclaim so that the software | |
2680 | * error code is recorded correctly in the TEXASR. Also the user may | |
2681 | * change these outside of a transaction, so they must always be | |
2682 | * context switched. | |
2683 | */ | |
2684 | mfspr r5, SPRN_TFHAR | |
2685 | mfspr r6, SPRN_TFIAR | |
2686 | mfspr r7, SPRN_TEXASR | |
2687 | std r5, VCPU_TFHAR(r9) | |
2688 | std r6, VCPU_TFIAR(r9) | |
2689 | std r7, VCPU_TEXASR(r9) | |
2690 | ||
2691 | ld r0, PPC_LR_STKOFF(r1) | |
2692 | mtlr r0 | |
2693 | blr | |
2694 | ||
2695 | /* | |
2696 | * Restore transactional state and TM-related registers. | |
2697 | * Called with r4 pointing to the vcpu struct. | |
2698 | * This potentially modifies all checkpointed registers. | |
2699 | * It restores r1, r2, r4 from the PACA. | |
2700 | */ | |
2701 | kvmppc_restore_tm: | |
2702 | mflr r0 | |
2703 | std r0, PPC_LR_STKOFF(r1) | |
2704 | ||
2705 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ | |
2706 | mfmsr r5 | |
2707 | li r6, MSR_TM >> 32 | |
2708 | sldi r6, r6, 32 | |
2709 | or r5, r5, r6 | |
2710 | ori r5, r5, MSR_FP | |
2711 | oris r5, r5, (MSR_VEC | MSR_VSX)@h | |
2712 | mtmsrd r5 | |
2713 | ||
2714 | /* | |
2715 | * The user may change these outside of a transaction, so they must | |
2716 | * always be context switched. | |
2717 | */ | |
2718 | ld r5, VCPU_TFHAR(r4) | |
2719 | ld r6, VCPU_TFIAR(r4) | |
2720 | ld r7, VCPU_TEXASR(r4) | |
2721 | mtspr SPRN_TFHAR, r5 | |
2722 | mtspr SPRN_TFIAR, r6 | |
2723 | mtspr SPRN_TEXASR, r7 | |
2724 | ||
2725 | ld r5, VCPU_MSR(r4) | |
2726 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | |
2727 | beqlr /* TM not active in guest */ | |
2728 | std r1, HSTATE_HOST_R1(r13) | |
2729 | ||
2730 | /* Make sure the failure summary is set, otherwise we'll program check | |
2731 | * when we trechkpt. It's possible that this might have been not set | |
2732 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the | |
2733 | * host. | |
2734 | */ | |
2735 | oris r7, r7, (TEXASR_FS)@h | |
2736 | mtspr SPRN_TEXASR, r7 | |
2737 | ||
2738 | /* | |
2739 | * We need to load up the checkpointed state for the guest. | |
2740 | * We need to do this early as it will blow away any GPRs, VSRs and | |
2741 | * some SPRs. | |
2742 | */ | |
2743 | ||
2744 | mr r31, r4 | |
2745 | addi r3, r31, VCPU_FPRS_TM | |
2746 | bl load_fp_state | |
2747 | addi r3, r31, VCPU_VRS_TM | |
2748 | bl load_vr_state | |
2749 | mr r4, r31 | |
2750 | lwz r7, VCPU_VRSAVE_TM(r4) | |
2751 | mtspr SPRN_VRSAVE, r7 | |
2752 | ||
2753 | ld r5, VCPU_LR_TM(r4) | |
2754 | lwz r6, VCPU_CR_TM(r4) | |
2755 | ld r7, VCPU_CTR_TM(r4) | |
2756 | ld r8, VCPU_AMR_TM(r4) | |
2757 | ld r9, VCPU_TAR_TM(r4) | |
0d808df0 | 2758 | ld r10, VCPU_XER_TM(r4) |
f024ee09 PM |
2759 | mtlr r5 |
2760 | mtcr r6 | |
2761 | mtctr r7 | |
2762 | mtspr SPRN_AMR, r8 | |
2763 | mtspr SPRN_TAR, r9 | |
0d808df0 | 2764 | mtxer r10 |
f024ee09 PM |
2765 | |
2766 | /* | |
2767 | * Load up PPR and DSCR values but don't put them in the actual SPRs | |
2768 | * till the last moment to avoid running with userspace PPR and DSCR for | |
2769 | * too long. | |
2770 | */ | |
2771 | ld r29, VCPU_DSCR_TM(r4) | |
2772 | ld r30, VCPU_PPR_TM(r4) | |
2773 | ||
2774 | std r2, PACATMSCRATCH(r13) /* Save TOC */ | |
2775 | ||
2776 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | |
2777 | li r5, 0 | |
2778 | mtmsrd r5, 1 | |
2779 | ||
2780 | /* Load GPRs r0-r28 */ | |
2781 | reg = 0 | |
2782 | .rept 29 | |
2783 | ld reg, VCPU_GPRS_TM(reg)(r31) | |
2784 | reg = reg + 1 | |
2785 | .endr | |
2786 | ||
2787 | mtspr SPRN_DSCR, r29 | |
2788 | mtspr SPRN_PPR, r30 | |
2789 | ||
2790 | /* Load final GPRs */ | |
2791 | ld 29, VCPU_GPRS_TM(29)(r31) | |
2792 | ld 30, VCPU_GPRS_TM(30)(r31) | |
2793 | ld 31, VCPU_GPRS_TM(31)(r31) | |
2794 | ||
2795 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ | |
2796 | TRECHKPT | |
2797 | ||
2798 | /* Now let's get back the state we need. */ | |
2799 | HMT_MEDIUM | |
2800 | GET_PACA(r13) | |
2801 | ld r29, HSTATE_DSCR(r13) | |
2802 | mtspr SPRN_DSCR, r29 | |
2803 | ld r4, HSTATE_KVM_VCPU(r13) | |
2804 | ld r1, HSTATE_HOST_R1(r13) | |
2805 | ld r2, PACATMSCRATCH(r13) | |
2806 | ||
2807 | /* Set the MSR RI since we have our registers back. */ | |
2808 | li r5, MSR_RI | |
2809 | mtmsrd r5, 1 | |
2810 | ||
2811 | ld r0, PPC_LR_STKOFF(r1) | |
2812 | mtlr r0 | |
2813 | blr | |
2814 | #endif | |
2815 | ||
44a3add8 PM |
2816 | /* |
2817 | * We come here if we get any exception or interrupt while we are | |
2818 | * executing host real mode code while in guest MMU context. | |
2819 | * For now just spin, but we should do something better. | |
2820 | */ | |
2821 | kvmppc_bad_host_intr: | |
2822 | b . | |
e4e38121 MN |
2823 | |
2824 | /* | |
2825 | * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken | |
2826 | * from VCPU_INTR_MSR and is modified based on the required TM state changes. | |
2827 | * r11 has the guest MSR value (in/out) | |
2828 | * r9 has a vcpu pointer (in) | |
2829 | * r0 is used as a scratch register | |
2830 | */ | |
2831 | kvmppc_msr_interrupt: | |
2832 | rldicl r0, r11, 64 - MSR_TS_S_LG, 62 | |
2833 | cmpwi r0, 2 /* Check if we are in transactional state.. */ | |
2834 | ld r11, VCPU_INTR_MSR(r9) | |
2835 | bne 1f | |
2836 | /* ... if transactional, change to suspended */ | |
2837 | li r0, 1 | |
2838 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG | |
2839 | blr | |
9bc01a9b PM |
2840 | |
2841 | /* | |
2842 | * This works around a hardware bug on POWER8E processors, where | |
2843 | * writing a 1 to the MMCR0[PMAO] bit doesn't generate a | |
2844 | * performance monitor interrupt. Instead, when we need to have | |
2845 | * an interrupt pending, we have to arrange for a counter to overflow. | |
2846 | */ | |
2847 | kvmppc_fix_pmao: | |
2848 | li r3, 0 | |
2849 | mtspr SPRN_MMCR2, r3 | |
2850 | lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h | |
2851 | ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN | |
2852 | mtspr SPRN_MMCR0, r3 | |
2853 | lis r3, 0x7fff | |
2854 | ori r3, r3, 0xffff | |
2855 | mtspr SPRN_PMC6, r3 | |
2856 | isync | |
2857 | blr | |
b6c295df PM |
2858 | |
2859 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
2860 | /* | |
2861 | * Start timing an activity | |
2862 | * r3 = pointer to time accumulation struct, r4 = vcpu | |
2863 | */ | |
2864 | kvmhv_start_timing: | |
2865 | ld r5, HSTATE_KVM_VCORE(r13) | |
2866 | lbz r6, VCORE_IN_GUEST(r5) | |
2867 | cmpwi r6, 0 | |
2868 | beq 5f /* if in guest, need to */ | |
2869 | ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ | |
2870 | 5: mftb r5 | |
2871 | subf r5, r6, r5 | |
2872 | std r3, VCPU_CUR_ACTIVITY(r4) | |
2873 | std r5, VCPU_ACTIVITY_START(r4) | |
2874 | blr | |
2875 | ||
2876 | /* | |
2877 | * Accumulate time to one activity and start another. | |
2878 | * r3 = pointer to new time accumulation struct, r4 = vcpu | |
2879 | */ | |
2880 | kvmhv_accumulate_time: | |
2881 | ld r5, HSTATE_KVM_VCORE(r13) | |
2882 | lbz r8, VCORE_IN_GUEST(r5) | |
2883 | cmpwi r8, 0 | |
2884 | beq 4f /* if in guest, need to */ | |
2885 | ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */ | |
2886 | 4: ld r5, VCPU_CUR_ACTIVITY(r4) | |
2887 | ld r6, VCPU_ACTIVITY_START(r4) | |
2888 | std r3, VCPU_CUR_ACTIVITY(r4) | |
2889 | mftb r7 | |
2890 | subf r7, r8, r7 | |
2891 | std r7, VCPU_ACTIVITY_START(r4) | |
2892 | cmpdi r5, 0 | |
2893 | beqlr | |
2894 | subf r3, r6, r7 | |
2895 | ld r8, TAS_SEQCOUNT(r5) | |
2896 | cmpdi r8, 0 | |
2897 | addi r8, r8, 1 | |
2898 | std r8, TAS_SEQCOUNT(r5) | |
2899 | lwsync | |
2900 | ld r7, TAS_TOTAL(r5) | |
2901 | add r7, r7, r3 | |
2902 | std r7, TAS_TOTAL(r5) | |
2903 | ld r6, TAS_MIN(r5) | |
2904 | ld r7, TAS_MAX(r5) | |
2905 | beq 3f | |
2906 | cmpd r3, r6 | |
2907 | bge 1f | |
2908 | 3: std r3, TAS_MIN(r5) | |
2909 | 1: cmpd r3, r7 | |
2910 | ble 2f | |
2911 | std r3, TAS_MAX(r5) | |
2912 | 2: lwsync | |
2913 | addi r8, r8, 1 | |
2914 | std r8, TAS_SEQCOUNT(r5) | |
2915 | blr | |
2916 | #endif |