Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
12 | * | |
13 | * Derived from book3s_rmhandlers.S and other files, which are: | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
177339d7 | 23 | #include <asm/mmu.h> |
de56a948 | 24 | #include <asm/page.h> |
177339d7 PM |
25 | #include <asm/ptrace.h> |
26 | #include <asm/hvcall.h> | |
de56a948 PM |
27 | #include <asm/asm-offsets.h> |
28 | #include <asm/exception-64s.h> | |
f0888f70 | 29 | #include <asm/kvm_book3s_asm.h> |
f64e8084 | 30 | #include <asm/book3s/64/mmu-hash.h> |
41f4e631 | 31 | #include <asm/export.h> |
e4e38121 | 32 | #include <asm/tm.h> |
fd7bacbc | 33 | #include <asm/opal.h> |
5af50993 | 34 | #include <asm/xive-regs.h> |
857b99e1 | 35 | #include <asm/thread_info.h> |
ec0c464c | 36 | #include <asm/asm-compat.h> |
2c86cd18 | 37 | #include <asm/feature-fixups.h> |
10d91611 | 38 | #include <asm/cpuidle.h> |
e4e38121 | 39 | |
2f272463 PM |
40 | /* Sign-extend HDEC if not on POWER9 */ |
41 | #define EXTEND_HDEC(reg) \ | |
42 | BEGIN_FTR_SECTION; \ | |
43 | extsw reg, reg; \ | |
44 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
45 | ||
e0b7ec05 PM |
46 | /* Values in HSTATE_NAPPING(r13) */ |
47 | #define NAPPING_CEDE 1 | |
48 | #define NAPPING_NOVCPU 2 | |
10d91611 | 49 | #define NAPPING_UNSPLIT 3 |
e0b7ec05 | 50 | |
7ceaa6dc | 51 | /* Stack frame offsets for kvmppc_hv_entry */ |
95a6432c | 52 | #define SFS 208 |
7ceaa6dc | 53 | #define STACK_SLOT_TRAP (SFS-4) |
95a6432c | 54 | #define STACK_SLOT_SHORT_PATH (SFS-8) |
7ceaa6dc PM |
55 | #define STACK_SLOT_TID (SFS-16) |
56 | #define STACK_SLOT_PSSCR (SFS-24) | |
57 | #define STACK_SLOT_PID (SFS-32) | |
58 | #define STACK_SLOT_IAMR (SFS-40) | |
59 | #define STACK_SLOT_CIABR (SFS-48) | |
60 | #define STACK_SLOT_DAWR (SFS-56) | |
61 | #define STACK_SLOT_DAWRX (SFS-64) | |
769377f7 | 62 | #define STACK_SLOT_HFSCR (SFS-72) |
c3c7470c ME |
63 | #define STACK_SLOT_AMR (SFS-80) |
64 | #define STACK_SLOT_UAMOR (SFS-88) | |
95a6432c PM |
65 | /* the following is used by the P9 short path */ |
66 | #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ | |
7ceaa6dc | 67 | |
de56a948 | 68 | /* |
19ccb76a | 69 | * Call kvmppc_hv_entry in real mode. |
de56a948 PM |
70 | * Must be called with interrupts hard-disabled. |
71 | * | |
72 | * Input Registers: | |
73 | * | |
74 | * LR = return address to continue at after eventually re-enabling MMU | |
75 | */ | |
6ed179b6 | 76 | _GLOBAL_TOC(kvmppc_hv_entry_trampoline) |
218309b7 PM |
77 | mflr r0 |
78 | std r0, PPC_LR_STKOFF(r1) | |
79 | stdu r1, -112(r1) | |
de56a948 | 80 | mfmsr r10 |
8b24e69f | 81 | std r10, HSTATE_HOST_MSR(r13) |
218309b7 | 82 | LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) |
de56a948 PM |
83 | li r0,MSR_RI |
84 | andc r0,r10,r0 | |
85 | li r6,MSR_IR | MSR_DR | |
86 | andc r6,r10,r6 | |
87 | mtmsrd r0,1 /* clear RI in MSR */ | |
88 | mtsrr0 r5 | |
89 | mtsrr1 r6 | |
222f20f1 | 90 | RFI_TO_KERNEL |
de56a948 | 91 | |
218309b7 | 92 | kvmppc_call_hv_entry: |
c0101509 PM |
93 | BEGIN_FTR_SECTION |
94 | /* On P9, do LPCR setting, if necessary */ | |
95 | ld r3, HSTATE_SPLIT_MODE(r13) | |
96 | cmpdi r3, 0 | |
97 | beq 46f | |
98 | lwz r4, KVM_SPLIT_DO_SET(r3) | |
99 | cmpwi r4, 0 | |
100 | beq 46f | |
101 | bl kvmhv_p9_set_lpcr | |
102 | nop | |
103 | 46: | |
104 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
105 | ||
e0b7ec05 | 106 | ld r4, HSTATE_KVM_VCPU(r13) |
218309b7 PM |
107 | bl kvmppc_hv_entry |
108 | ||
109 | /* Back from guest - restore host state and return to caller */ | |
110 | ||
eee7ff9d | 111 | BEGIN_FTR_SECTION |
218309b7 PM |
112 | /* Restore host DABR and DABRX */ |
113 | ld r5,HSTATE_DABR(r13) | |
114 | li r6,7 | |
115 | mtspr SPRN_DABR,r5 | |
116 | mtspr SPRN_DABRX,r6 | |
eee7ff9d | 117 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
218309b7 PM |
118 | |
119 | /* Restore SPRG3 */ | |
9d378dfa SW |
120 | ld r3,PACA_SPRG_VDSO(r13) |
121 | mtspr SPRN_SPRG_VDSO_WRITE,r3 | |
218309b7 | 122 | |
218309b7 | 123 | /* Reload the host's PMU registers */ |
41f4e631 | 124 | bl kvmhv_load_host_pmu |
218309b7 | 125 | |
e0b7ec05 PM |
126 | /* |
127 | * Reload DEC. HDEC interrupts were disabled when | |
128 | * we reloaded the host's LPCR value. | |
129 | */ | |
130 | ld r3, HSTATE_DECEXP(r13) | |
131 | mftb r4 | |
132 | subf r4, r4, r3 | |
133 | mtspr SPRN_DEC, r4 | |
134 | ||
b4deba5c PM |
135 | /* hwthread_req may have got set by cede or no vcpu, so clear it */ |
136 | li r0, 0 | |
137 | stb r0, HSTATE_HWTHREAD_REQ(r13) | |
138 | ||
218309b7 | 139 | /* |
e20bbd3d AP |
140 | * For external interrupts we need to call the Linux |
141 | * handler to process the interrupt. We do that by jumping | |
142 | * to absolute address 0x500 for external interrupts. | |
143 | * The [h]rfid at the end of the handler will return to | |
144 | * the book3s_hv_interrupts.S code. For other interrupts | |
145 | * we do the rfid to get back to the book3s_hv_interrupts.S | |
146 | * code here. | |
218309b7 PM |
147 | */ |
148 | ld r8, 112+PPC_LR_STKOFF(r1) | |
149 | addi r1, r1, 112 | |
150 | ld r7, HSTATE_HOST_MSR(r13) | |
151 | ||
8b24e69f PM |
152 | /* Return the trap number on this thread as the return value */ |
153 | mr r3, r12 | |
154 | ||
53af3ba2 PM |
155 | /* |
156 | * If we came back from the guest via a relocation-on interrupt, | |
157 | * we will be in virtual mode at this point, which makes it a | |
158 | * little easier to get back to the caller. | |
159 | */ | |
160 | mfmsr r0 | |
161 | andi. r0, r0, MSR_IR /* in real mode? */ | |
162 | bne .Lvirt_return | |
163 | ||
8b24e69f | 164 | /* RFI into the highmem handler */ |
218309b7 PM |
165 | mfmsr r6 |
166 | li r0, MSR_RI | |
167 | andc r6, r6, r0 | |
168 | mtmsrd r6, 1 /* Clear RI in MSR */ | |
169 | mtsrr0 r8 | |
170 | mtsrr1 r7 | |
222f20f1 | 171 | RFI_TO_KERNEL |
218309b7 | 172 | |
8b24e69f | 173 | /* Virtual-mode return */ |
53af3ba2 | 174 | .Lvirt_return: |
8b24e69f | 175 | mtlr r8 |
53af3ba2 PM |
176 | blr |
177 | ||
e0b7ec05 PM |
178 | kvmppc_primary_no_guest: |
179 | /* We handle this much like a ceded vcpu */ | |
fd6d53b1 | 180 | /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ |
2f272463 PM |
181 | /* HDEC may be larger than DEC for arch >= v3.00, but since the */ |
182 | /* HDEC value came from DEC in the first place, it will fit */ | |
fd6d53b1 PM |
183 | mfspr r3, SPRN_HDEC |
184 | mtspr SPRN_DEC, r3 | |
6af27c84 PM |
185 | /* |
186 | * Make sure the primary has finished the MMU switch. | |
187 | * We should never get here on a secondary thread, but | |
188 | * check it for robustness' sake. | |
189 | */ | |
190 | ld r5, HSTATE_KVM_VCORE(r13) | |
191 | 65: lbz r0, VCORE_IN_GUEST(r5) | |
192 | cmpwi r0, 0 | |
193 | beq 65b | |
194 | /* Set LPCR. */ | |
195 | ld r8,VCORE_LPCR(r5) | |
196 | mtspr SPRN_LPCR,r8 | |
197 | isync | |
e0b7ec05 PM |
198 | /* set our bit in napping_threads */ |
199 | ld r5, HSTATE_KVM_VCORE(r13) | |
200 | lbz r7, HSTATE_PTID(r13) | |
201 | li r0, 1 | |
202 | sld r0, r0, r7 | |
203 | addi r6, r5, VCORE_NAPPING_THREADS | |
204 | 1: lwarx r3, 0, r6 | |
205 | or r3, r3, r0 | |
206 | stwcx. r3, 0, r6 | |
207 | bne 1b | |
7d6c40da | 208 | /* order napping_threads update vs testing entry_exit_map */ |
e0b7ec05 PM |
209 | isync |
210 | li r12, 0 | |
211 | lwz r7, VCORE_ENTRY_EXIT(r5) | |
212 | cmpwi r7, 0x100 | |
213 | bge kvm_novcpu_exit /* another thread already exiting */ | |
214 | li r3, NAPPING_NOVCPU | |
215 | stb r3, HSTATE_NAPPING(r13) | |
e0b7ec05 | 216 | |
ccc07772 | 217 | li r3, 0 /* Don't wake on privileged (OS) doorbell */ |
e0b7ec05 PM |
218 | b kvm_do_nap |
219 | ||
37f55d30 SW |
220 | /* |
221 | * kvm_novcpu_wakeup | |
222 | * Entered from kvm_start_guest if kvm_hstate.napping is set | |
223 | * to NAPPING_NOVCPU | |
224 | * r2 = kernel TOC | |
225 | * r13 = paca | |
226 | */ | |
e0b7ec05 PM |
227 | kvm_novcpu_wakeup: |
228 | ld r1, HSTATE_HOST_R1(r13) | |
229 | ld r5, HSTATE_KVM_VCORE(r13) | |
230 | li r0, 0 | |
231 | stb r0, HSTATE_NAPPING(r13) | |
e0b7ec05 | 232 | |
e3bbbbfa PM |
233 | /* check the wake reason */ |
234 | bl kvmppc_check_wake_reason | |
6af27c84 | 235 | |
37f55d30 SW |
236 | /* |
237 | * Restore volatile registers since we could have called | |
238 | * a C routine in kvmppc_check_wake_reason. | |
239 | * r5 = VCORE | |
240 | */ | |
241 | ld r5, HSTATE_KVM_VCORE(r13) | |
242 | ||
e0b7ec05 | 243 | /* see if any other thread is already exiting */ |
e0b7ec05 PM |
244 | lwz r0, VCORE_ENTRY_EXIT(r5) |
245 | cmpwi r0, 0x100 | |
246 | bge kvm_novcpu_exit | |
247 | ||
248 | /* clear our bit in napping_threads */ | |
249 | lbz r7, HSTATE_PTID(r13) | |
250 | li r0, 1 | |
251 | sld r0, r0, r7 | |
252 | addi r6, r5, VCORE_NAPPING_THREADS | |
e3bbbbfa PM |
253 | 4: lwarx r7, 0, r6 |
254 | andc r7, r7, r0 | |
255 | stwcx. r7, 0, r6 | |
e0b7ec05 PM |
256 | bne 4b |
257 | ||
e3bbbbfa | 258 | /* See if the wake reason means we need to exit */ |
e0b7ec05 PM |
259 | cmpdi r3, 0 |
260 | bge kvm_novcpu_exit | |
e0b7ec05 | 261 | |
fd6d53b1 PM |
262 | /* See if our timeslice has expired (HDEC is negative) */ |
263 | mfspr r0, SPRN_HDEC | |
2f272463 | 264 | EXTEND_HDEC(r0) |
fd6d53b1 | 265 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER |
2f272463 | 266 | cmpdi r0, 0 |
fd6d53b1 PM |
267 | blt kvm_novcpu_exit |
268 | ||
e0b7ec05 PM |
269 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ |
270 | ld r4, HSTATE_KVM_VCPU(r13) | |
271 | cmpdi r4, 0 | |
b6c295df PM |
272 | beq kvmppc_primary_no_guest |
273 | ||
274 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
275 | addi r3, r4, VCPU_TB_RMENTRY | |
276 | bl kvmhv_start_timing | |
277 | #endif | |
278 | b kvmppc_got_guest | |
e0b7ec05 PM |
279 | |
280 | kvm_novcpu_exit: | |
6af27c84 PM |
281 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
282 | ld r4, HSTATE_KVM_VCPU(r13) | |
283 | cmpdi r4, 0 | |
284 | beq 13f | |
285 | addi r3, r4, VCPU_TB_RMEXIT | |
286 | bl kvmhv_accumulate_time | |
287 | #endif | |
eddb60fb | 288 | 13: mr r3, r12 |
7ceaa6dc | 289 | stw r12, STACK_SLOT_TRAP(r1) |
eddb60fb PM |
290 | bl kvmhv_commence_exit |
291 | nop | |
6af27c84 | 292 | b kvmhv_switch_to_host |
e0b7ec05 | 293 | |
371fefd6 | 294 | /* |
10d91611 NP |
295 | * We come in here when wakened from Linux offline idle code. |
296 | * Relocation is off | |
9d292501 | 297 | * r3 contains the SRR1 wakeup value, SRR1 is trashed. |
371fefd6 | 298 | */ |
10d91611 NP |
299 | _GLOBAL(idle_kvm_start_guest) |
300 | ld r4,PACAEMERGSP(r13) | |
301 | mfcr r5 | |
302 | mflr r0 | |
303 | std r1,0(r4) | |
304 | std r5,8(r4) | |
305 | std r0,16(r4) | |
306 | subi r1,r4,STACK_FRAME_OVERHEAD | |
307 | SAVE_NVGPRS(r1) | |
fd17dc7b | 308 | |
9d292501 NP |
309 | /* |
310 | * Could avoid this and pass it through in r3. For now, | |
311 | * code expects it to be in SRR1. | |
312 | */ | |
313 | mtspr SPRN_SRR1,r3 | |
314 | ||
a4bc64d3 NR |
315 | li r0,0 |
316 | stb r0,PACA_FTRACE_ENABLED(r13) | |
317 | ||
f0888f70 PM |
318 | li r0,KVM_HWTHREAD_IN_KVM |
319 | stb r0,HSTATE_HWTHREAD_STATE(r13) | |
371fefd6 | 320 | |
10d91611 | 321 | /* kvm cede / napping does not come through here */ |
4619ac88 | 322 | lbz r0,HSTATE_NAPPING(r13) |
10d91611 NP |
323 | twnei r0,0 |
324 | ||
325 | b 1f | |
326 | ||
327 | kvm_unsplit_wakeup: | |
328 | li r0, 0 | |
329 | stb r0, HSTATE_NAPPING(r13) | |
e0b7ec05 | 330 | |
10d91611 | 331 | 1: |
4619ac88 PM |
332 | |
333 | /* | |
334 | * We weren't napping due to cede, so this must be a secondary | |
335 | * thread being woken up to run a guest, or being woken up due | |
336 | * to a stray IPI. (Or due to some machine check or hypervisor | |
337 | * maintenance interrupt while the core is in KVM.) | |
338 | */ | |
f0888f70 PM |
339 | |
340 | /* Check the wake reason in SRR1 to see why we got here */ | |
e3bbbbfa | 341 | bl kvmppc_check_wake_reason |
37f55d30 SW |
342 | /* |
343 | * kvmppc_check_wake_reason could invoke a C routine, but we | |
344 | * have no volatile registers to restore when we return. | |
345 | */ | |
346 | ||
e3bbbbfa PM |
347 | cmpdi r3, 0 |
348 | bge kvm_no_guest | |
371fefd6 | 349 | |
b4deba5c PM |
350 | /* get vcore pointer, NULL if we have nothing to run */ |
351 | ld r5,HSTATE_KVM_VCORE(r13) | |
352 | cmpdi r5,0 | |
353 | /* if we have no vcore to run, go back to sleep */ | |
7b444c67 | 354 | beq kvm_no_guest |
f0888f70 | 355 | |
56548fc0 PM |
356 | kvm_secondary_got_guest: |
357 | ||
e0b7ec05 | 358 | /* Set HSTATE_DSCR(r13) to something sensible */ |
1db36525 | 359 | ld r6, PACA_DSCR_DEFAULT(r13) |
e0b7ec05 | 360 | std r6, HSTATE_DSCR(r13) |
2fde6d20 | 361 | |
b4deba5c PM |
362 | /* On thread 0 of a subcore, set HDEC to max */ |
363 | lbz r4, HSTATE_PTID(r13) | |
364 | cmpwi r4, 0 | |
365 | bne 63f | |
2f272463 PM |
366 | LOAD_REG_ADDR(r6, decrementer_max) |
367 | ld r6, 0(r6) | |
b4deba5c PM |
368 | mtspr SPRN_HDEC, r6 |
369 | /* and set per-LPAR registers, if doing dynamic micro-threading */ | |
370 | ld r6, HSTATE_SPLIT_MODE(r13) | |
371 | cmpdi r6, 0 | |
372 | beq 63f | |
c0101509 | 373 | BEGIN_FTR_SECTION |
b4deba5c PM |
374 | ld r0, KVM_SPLIT_RPR(r6) |
375 | mtspr SPRN_RPR, r0 | |
376 | ld r0, KVM_SPLIT_PMMAR(r6) | |
377 | mtspr SPRN_PMMAR, r0 | |
378 | ld r0, KVM_SPLIT_LDBAR(r6) | |
379 | mtspr SPRN_LDBAR, r0 | |
380 | isync | |
c0101509 PM |
381 | FTR_SECTION_ELSE |
382 | /* On P9 we use the split_info for coordinating LPCR changes */ | |
383 | lwz r4, KVM_SPLIT_DO_SET(r6) | |
384 | cmpwi r4, 0 | |
d20fe50a | 385 | beq 1f |
c0101509 PM |
386 | mr r3, r6 |
387 | bl kvmhv_p9_set_lpcr | |
388 | nop | |
d20fe50a | 389 | 1: |
c0101509 | 390 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
b4deba5c PM |
391 | 63: |
392 | /* Order load of vcpu after load of vcore */ | |
5d5b99cd | 393 | lwsync |
b4deba5c | 394 | ld r4, HSTATE_KVM_VCPU(r13) |
e0b7ec05 | 395 | bl kvmppc_hv_entry |
218309b7 PM |
396 | |
397 | /* Back from the guest, go back to nap */ | |
b4deba5c | 398 | /* Clear our vcpu and vcore pointers so we don't come back in early */ |
218309b7 | 399 | li r0, 0 |
b4deba5c | 400 | std r0, HSTATE_KVM_VCPU(r13) |
f019b7ad | 401 | /* |
b4deba5c | 402 | * Once we clear HSTATE_KVM_VCORE(r13), the code in |
5d5b99cd PM |
403 | * kvmppc_run_core() is going to assume that all our vcpu |
404 | * state is visible in memory. This lwsync makes sure | |
405 | * that that is true. | |
f019b7ad | 406 | */ |
218309b7 | 407 | lwsync |
b4deba5c | 408 | std r0, HSTATE_KVM_VCORE(r13) |
218309b7 | 409 | |
fd7bacbc MS |
410 | /* |
411 | * All secondaries exiting guest will fall through this path. | |
412 | * Before proceeding, just check for HMI interrupt and | |
413 | * invoke opal hmi handler. By now we are sure that the | |
414 | * primary thread on this core/subcore has already made partition | |
415 | * switch/TB resync and we are good to call opal hmi handler. | |
416 | */ | |
417 | cmpwi r12, BOOK3S_INTERRUPT_HMI | |
418 | bne kvm_no_guest | |
419 | ||
420 | li r3,0 /* NULL argument */ | |
421 | bl hmi_exception_realmode | |
56548fc0 PM |
422 | /* |
423 | * At this point we have finished executing in the guest. | |
424 | * We need to wait for hwthread_req to become zero, since | |
425 | * we may not turn on the MMU while hwthread_req is non-zero. | |
426 | * While waiting we also need to check if we get given a vcpu to run. | |
427 | */ | |
218309b7 | 428 | kvm_no_guest: |
56548fc0 PM |
429 | lbz r3, HSTATE_HWTHREAD_REQ(r13) |
430 | cmpwi r3, 0 | |
431 | bne 53f | |
432 | HMT_MEDIUM | |
433 | li r0, KVM_HWTHREAD_IN_KERNEL | |
218309b7 | 434 | stb r0, HSTATE_HWTHREAD_STATE(r13) |
56548fc0 PM |
435 | /* need to recheck hwthread_req after a barrier, to avoid race */ |
436 | sync | |
437 | lbz r3, HSTATE_HWTHREAD_REQ(r13) | |
438 | cmpwi r3, 0 | |
439 | bne 54f | |
10d91611 NP |
440 | |
441 | /* | |
442 | * Jump to idle_return_gpr_loss, which returns to the | |
443 | * idle_kvm_start_guest caller. | |
444 | */ | |
218309b7 PM |
445 | li r3, LPCR_PECE0 |
446 | mfspr r4, SPRN_LPCR | |
447 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | |
448 | mtspr SPRN_LPCR, r4 | |
10d91611 NP |
449 | /* set up r3 for return */ |
450 | mfspr r3,SPRN_SRR1 | |
451 | REST_NVGPRS(r1) | |
452 | addi r1, r1, STACK_FRAME_OVERHEAD | |
453 | ld r0, 16(r1) | |
454 | ld r5, 8(r1) | |
455 | ld r1, 0(r1) | |
456 | mtlr r0 | |
457 | mtcr r5 | |
458 | blr | |
56548fc0 PM |
459 | |
460 | 53: HMT_LOW | |
b4deba5c PM |
461 | ld r5, HSTATE_KVM_VCORE(r13) |
462 | cmpdi r5, 0 | |
463 | bne 60f | |
464 | ld r3, HSTATE_SPLIT_MODE(r13) | |
465 | cmpdi r3, 0 | |
466 | beq kvm_no_guest | |
c0101509 PM |
467 | lwz r0, KVM_SPLIT_DO_SET(r3) |
468 | cmpwi r0, 0 | |
469 | bne kvmhv_do_set | |
470 | lwz r0, KVM_SPLIT_DO_RESTORE(r3) | |
471 | cmpwi r0, 0 | |
472 | bne kvmhv_do_restore | |
b4deba5c PM |
473 | lbz r0, KVM_SPLIT_DO_NAP(r3) |
474 | cmpwi r0, 0 | |
56548fc0 PM |
475 | beq kvm_no_guest |
476 | HMT_MEDIUM | |
b4deba5c PM |
477 | b kvm_unsplit_nap |
478 | 60: HMT_MEDIUM | |
56548fc0 PM |
479 | b kvm_secondary_got_guest |
480 | ||
481 | 54: li r0, KVM_HWTHREAD_IN_KVM | |
482 | stb r0, HSTATE_HWTHREAD_STATE(r13) | |
483 | b kvm_no_guest | |
218309b7 | 484 | |
c0101509 PM |
485 | kvmhv_do_set: |
486 | /* Set LPCR, LPIDR etc. on P9 */ | |
487 | HMT_MEDIUM | |
488 | bl kvmhv_p9_set_lpcr | |
489 | nop | |
490 | b kvm_no_guest | |
491 | ||
492 | kvmhv_do_restore: | |
493 | HMT_MEDIUM | |
494 | bl kvmhv_p9_restore_lpcr | |
495 | nop | |
496 | b kvm_no_guest | |
497 | ||
b4deba5c PM |
498 | /* |
499 | * Here the primary thread is trying to return the core to | |
500 | * whole-core mode, so we need to nap. | |
501 | */ | |
502 | kvm_unsplit_nap: | |
fd7bacbc MS |
503 | /* |
504 | * When secondaries are napping in kvm_unsplit_nap() with | |
505 | * hwthread_req = 1, HMI goes ignored even though subcores are | |
506 | * already exited the guest. Hence HMI keeps waking up secondaries | |
507 | * from nap in a loop and secondaries always go back to nap since | |
508 | * no vcore is assigned to them. This makes impossible for primary | |
509 | * thread to get hold of secondary threads resulting into a soft | |
510 | * lockup in KVM path. | |
511 | * | |
512 | * Let us check if HMI is pending and handle it before we go to nap. | |
513 | */ | |
514 | cmpwi r12, BOOK3S_INTERRUPT_HMI | |
515 | bne 55f | |
516 | li r3, 0 /* NULL argument */ | |
517 | bl hmi_exception_realmode | |
518 | 55: | |
7f235328 GS |
519 | /* |
520 | * Ensure that secondary doesn't nap when it has | |
521 | * its vcore pointer set. | |
522 | */ | |
523 | sync /* matches smp_mb() before setting split_info.do_nap */ | |
524 | ld r0, HSTATE_KVM_VCORE(r13) | |
525 | cmpdi r0, 0 | |
526 | bne kvm_no_guest | |
b4deba5c PM |
527 | /* clear any pending message */ |
528 | BEGIN_FTR_SECTION | |
529 | lis r6, (PPC_DBELL_SERVER << (63-36))@h | |
530 | PPC_MSGCLR(6) | |
531 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
532 | /* Set kvm_split_mode.napped[tid] = 1 */ | |
533 | ld r3, HSTATE_SPLIT_MODE(r13) | |
534 | li r0, 1 | |
c0101509 | 535 | lbz r4, HSTATE_TID(r13) |
b4deba5c PM |
536 | addi r4, r4, KVM_SPLIT_NAPPED |
537 | stbx r0, r3, r4 | |
538 | /* Check the do_nap flag again after setting napped[] */ | |
539 | sync | |
540 | lbz r0, KVM_SPLIT_DO_NAP(r3) | |
541 | cmpwi r0, 0 | |
542 | beq 57f | |
10d91611 NP |
543 | li r3, NAPPING_UNSPLIT |
544 | stb r3, HSTATE_NAPPING(r13) | |
b4deba5c | 545 | li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4 |
bf53c88e PM |
546 | mfspr r5, SPRN_LPCR |
547 | rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1) | |
548 | b kvm_nap_sequence | |
b4deba5c PM |
549 | |
550 | 57: li r0, 0 | |
551 | stbx r0, r3, r4 | |
552 | b kvm_no_guest | |
553 | ||
218309b7 PM |
554 | /****************************************************************************** |
555 | * * | |
556 | * Entry code * | |
557 | * * | |
558 | *****************************************************************************/ | |
559 | ||
de56a948 PM |
560 | .global kvmppc_hv_entry |
561 | kvmppc_hv_entry: | |
562 | ||
563 | /* Required state: | |
564 | * | |
e0b7ec05 | 565 | * R4 = vcpu pointer (or NULL) |
de56a948 PM |
566 | * MSR = ~IR|DR |
567 | * R13 = PACA | |
568 | * R1 = host R1 | |
06a29e42 | 569 | * R2 = TOC |
de56a948 | 570 | * all other volatile GPRS = free |
f4c51f84 | 571 | * Does not preserve non-volatile GPRs or CR fields |
de56a948 PM |
572 | */ |
573 | mflr r0 | |
218309b7 | 574 | std r0, PPC_LR_STKOFF(r1) |
7ceaa6dc | 575 | stdu r1, -SFS(r1) |
de56a948 | 576 | |
de56a948 PM |
577 | /* Save R1 in the PACA */ |
578 | std r1, HSTATE_HOST_R1(r13) | |
579 | ||
44a3add8 PM |
580 | li r6, KVM_GUEST_MODE_HOST_HV |
581 | stb r6, HSTATE_IN_GUEST(r13) | |
582 | ||
b6c295df PM |
583 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
584 | /* Store initial timestamp */ | |
585 | cmpdi r4, 0 | |
586 | beq 1f | |
587 | addi r3, r4, VCPU_TB_RMENTRY | |
588 | bl kvmhv_start_timing | |
589 | 1: | |
590 | #endif | |
f4c51f84 | 591 | |
f4c51f84 PM |
592 | ld r5, HSTATE_KVM_VCORE(r13) |
593 | ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ | |
f4c51f84 | 594 | |
9e368f29 | 595 | /* |
c17b98cf | 596 | * POWER7/POWER8 host -> guest partition switch code. |
9e368f29 PM |
597 | * We don't have to lock against concurrent tlbies, |
598 | * but we do have to coordinate across hardware threads. | |
599 | */ | |
7d6c40da | 600 | /* Set bit in entry map iff exit map is zero. */ |
7d6c40da PM |
601 | li r7, 1 |
602 | lbz r6, HSTATE_PTID(r13) | |
603 | sld r7, r7, r6 | |
f4c51f84 PM |
604 | addi r8, r5, VCORE_ENTRY_EXIT |
605 | 21: lwarx r3, 0, r8 | |
7d6c40da | 606 | cmpwi r3, 0x100 /* any threads starting to exit? */ |
371fefd6 | 607 | bge secondary_too_late /* if so we're too late to the party */ |
7d6c40da | 608 | or r3, r3, r7 |
f4c51f84 | 609 | stwcx. r3, 0, r8 |
371fefd6 PM |
610 | bne 21b |
611 | ||
612 | /* Primary thread switches to guest partition. */ | |
371fefd6 | 613 | cmpwi r6,0 |
6af27c84 | 614 | bne 10f |
9a4506e1 | 615 | |
de56a948 | 616 | lwz r7,KVM_LPID(r9) |
7a84084c PM |
617 | BEGIN_FTR_SECTION |
618 | ld r6,KVM_SDR1(r9) | |
de56a948 PM |
619 | li r0,LPID_RSVD /* switch to reserved LPID */ |
620 | mtspr SPRN_LPID,r0 | |
621 | ptesync | |
622 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
7a84084c | 623 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
de56a948 PM |
624 | mtspr SPRN_LPID,r7 |
625 | isync | |
1b400ba0 | 626 | |
70ea13f6 | 627 | /* See if we need to flush the TLB. */ |
2940ba0c | 628 | mr r3, r9 /* kvm pointer */ |
70ea13f6 PM |
629 | lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ |
630 | li r5, 0 /* nested vcpu pointer */ | |
631 | bl kvmppc_check_need_tlb_flush | |
2940ba0c PM |
632 | nop |
633 | ld r5, HSTATE_KVM_VCORE(r13) | |
1b400ba0 | 634 | |
93b0f4dc PM |
635 | /* Add timebase offset onto timebase */ |
636 | 22: ld r8,VCORE_TB_OFFSET(r5) | |
637 | cmpdi r8,0 | |
638 | beq 37f | |
57b8daa7 | 639 | std r8, VCORE_TB_OFFSET_APPL(r5) |
93b0f4dc PM |
640 | mftb r6 /* current host timebase */ |
641 | add r8,r8,r6 | |
642 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | |
643 | mftb r7 /* check if lower 24 bits overflowed */ | |
644 | clrldi r6,r6,40 | |
645 | clrldi r7,r7,40 | |
646 | cmpld r7,r6 | |
647 | bge 37f | |
648 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | |
649 | mtspr SPRN_TBU40,r8 | |
650 | ||
388cc6e1 PM |
651 | /* Load guest PCR value to select appropriate compat mode */ |
652 | 37: ld r7, VCORE_PCR(r5) | |
653 | cmpdi r7, 0 | |
654 | beq 38f | |
655 | mtspr SPRN_PCR, r7 | |
656 | 38: | |
b005255e MN |
657 | |
658 | BEGIN_FTR_SECTION | |
88b02cf9 | 659 | /* DPDES and VTB are shared between threads */ |
b005255e | 660 | ld r8, VCORE_DPDES(r5) |
88b02cf9 | 661 | ld r7, VCORE_VTB(r5) |
b005255e | 662 | mtspr SPRN_DPDES, r8 |
88b02cf9 | 663 | mtspr SPRN_VTB, r7 |
b005255e MN |
664 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
665 | ||
fd7bacbc MS |
666 | /* Mark the subcore state as inside guest */ |
667 | bl kvmppc_subcore_enter_guest | |
668 | nop | |
669 | ld r5, HSTATE_KVM_VCORE(r13) | |
670 | ld r4, HSTATE_KVM_VCPU(r13) | |
388cc6e1 | 671 | li r0,1 |
371fefd6 | 672 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
9e368f29 | 673 | |
e0b7ec05 | 674 | /* Do we have a guest vcpu to run? */ |
6af27c84 | 675 | 10: cmpdi r4, 0 |
e0b7ec05 PM |
676 | beq kvmppc_primary_no_guest |
677 | kvmppc_got_guest: | |
e0b7ec05 PM |
678 | /* Increment yield count if they have a VPA */ |
679 | ld r3, VCPU_VPA(r4) | |
680 | cmpdi r3, 0 | |
681 | beq 25f | |
0865a583 AG |
682 | li r6, LPPACA_YIELDCOUNT |
683 | LWZX_BE r5, r3, r6 | |
e0b7ec05 | 684 | addi r5, r5, 1 |
0865a583 | 685 | STWX_BE r5, r3, r6 |
e0b7ec05 PM |
686 | li r6, 1 |
687 | stb r6, VCPU_VPA_DIRTY(r4) | |
688 | 25: | |
689 | ||
e0b7ec05 PM |
690 | /* Save purr/spurr */ |
691 | mfspr r5,SPRN_PURR | |
692 | mfspr r6,SPRN_SPURR | |
693 | std r5,HSTATE_PURR(r13) | |
694 | std r6,HSTATE_SPURR(r13) | |
695 | ld r7,VCPU_PURR(r4) | |
696 | ld r8,VCPU_SPURR(r4) | |
697 | mtspr SPRN_PURR,r7 | |
698 | mtspr SPRN_SPURR,r8 | |
e0b7ec05 | 699 | |
e9cf1e08 PM |
700 | /* Save host values of some registers */ |
701 | BEGIN_FTR_SECTION | |
702 | mfspr r5, SPRN_TIDR | |
703 | mfspr r6, SPRN_PSSCR | |
f4c51f84 | 704 | mfspr r7, SPRN_PID |
e9cf1e08 PM |
705 | std r5, STACK_SLOT_TID(r1) |
706 | std r6, STACK_SLOT_PSSCR(r1) | |
f4c51f84 | 707 | std r7, STACK_SLOT_PID(r1) |
769377f7 PM |
708 | mfspr r5, SPRN_HFSCR |
709 | std r5, STACK_SLOT_HFSCR(r1) | |
e9cf1e08 | 710 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
7ceaa6dc PM |
711 | BEGIN_FTR_SECTION |
712 | mfspr r5, SPRN_CIABR | |
713 | mfspr r6, SPRN_DAWR | |
714 | mfspr r7, SPRN_DAWRX | |
c3c7470c | 715 | mfspr r8, SPRN_IAMR |
7ceaa6dc PM |
716 | std r5, STACK_SLOT_CIABR(r1) |
717 | std r6, STACK_SLOT_DAWR(r1) | |
718 | std r7, STACK_SLOT_DAWRX(r1) | |
c3c7470c | 719 | std r8, STACK_SLOT_IAMR(r1) |
7ceaa6dc | 720 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
e9cf1e08 | 721 | |
c3c7470c ME |
722 | mfspr r5, SPRN_AMR |
723 | std r5, STACK_SLOT_AMR(r1) | |
724 | mfspr r6, SPRN_UAMOR | |
725 | std r6, STACK_SLOT_UAMOR(r1) | |
726 | ||
e0b7ec05 PM |
727 | BEGIN_FTR_SECTION |
728 | /* Set partition DABR */ | |
729 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ | |
8563bf52 | 730 | lwz r5,VCPU_DABRX(r4) |
e0b7ec05 PM |
731 | ld r6,VCPU_DABR(r4) |
732 | mtspr SPRN_DABRX,r5 | |
733 | mtspr SPRN_DABR,r6 | |
e0b7ec05 | 734 | isync |
e0b7ec05 PM |
735 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
736 | ||
e4e38121 | 737 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
4bb3c7a0 PM |
738 | /* |
739 | * Branch around the call if both CPU_FTR_TM and | |
740 | * CPU_FTR_P9_TM_HV_ASSIST are off. | |
741 | */ | |
e4e38121 | 742 | BEGIN_FTR_SECTION |
4bb3c7a0 PM |
743 | b 91f |
744 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |
67f8a8c1 | 745 | /* |
7854f754 | 746 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
67f8a8c1 | 747 | */ |
6f597c6b SG |
748 | mr r3, r4 |
749 | ld r4, VCPU_MSR(r3) | |
7854f754 | 750 | li r5, 0 /* don't preserve non-vol regs */ |
7b0e827c | 751 | bl kvmppc_restore_tm_hv |
7854f754 | 752 | nop |
6f597c6b | 753 | ld r4, HSTATE_KVM_VCPU(r13) |
4bb3c7a0 | 754 | 91: |
e4e38121 MN |
755 | #endif |
756 | ||
41f4e631 PM |
757 | /* Load guest PMU registers; r4 = vcpu pointer here */ |
758 | mr r3, r4 | |
759 | bl kvmhv_load_guest_pmu | |
e0b7ec05 PM |
760 | |
761 | /* Load up FP, VMX and VSX registers */ | |
41f4e631 | 762 | ld r4, HSTATE_KVM_VCPU(r13) |
e0b7ec05 PM |
763 | bl kvmppc_load_fp |
764 | ||
765 | ld r14, VCPU_GPR(R14)(r4) | |
766 | ld r15, VCPU_GPR(R15)(r4) | |
767 | ld r16, VCPU_GPR(R16)(r4) | |
768 | ld r17, VCPU_GPR(R17)(r4) | |
769 | ld r18, VCPU_GPR(R18)(r4) | |
770 | ld r19, VCPU_GPR(R19)(r4) | |
771 | ld r20, VCPU_GPR(R20)(r4) | |
772 | ld r21, VCPU_GPR(R21)(r4) | |
773 | ld r22, VCPU_GPR(R22)(r4) | |
774 | ld r23, VCPU_GPR(R23)(r4) | |
775 | ld r24, VCPU_GPR(R24)(r4) | |
776 | ld r25, VCPU_GPR(R25)(r4) | |
777 | ld r26, VCPU_GPR(R26)(r4) | |
778 | ld r27, VCPU_GPR(R27)(r4) | |
779 | ld r28, VCPU_GPR(R28)(r4) | |
780 | ld r29, VCPU_GPR(R29)(r4) | |
781 | ld r30, VCPU_GPR(R30)(r4) | |
782 | ld r31, VCPU_GPR(R31)(r4) | |
783 | ||
e0b7ec05 PM |
784 | /* Switch DSCR to guest value */ |
785 | ld r5, VCPU_DSCR(r4) | |
786 | mtspr SPRN_DSCR, r5 | |
e0b7ec05 | 787 | |
b005255e | 788 | BEGIN_FTR_SECTION |
c17b98cf | 789 | /* Skip next section on POWER7 */ |
b005255e MN |
790 | b 8f |
791 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |
b005255e MN |
792 | /* Load up POWER8-specific registers */ |
793 | ld r5, VCPU_IAMR(r4) | |
794 | lwz r6, VCPU_PSPB(r4) | |
795 | ld r7, VCPU_FSCR(r4) | |
796 | mtspr SPRN_IAMR, r5 | |
797 | mtspr SPRN_PSPB, r6 | |
798 | mtspr SPRN_FSCR, r7 | |
b53221e7 MN |
799 | /* |
800 | * Handle broken DAWR case by not writing it. This means we | |
801 | * can still store the DAWR register for migration. | |
802 | */ | |
c1fe190c MN |
803 | LOAD_REG_ADDR(r5, dawr_force_enable) |
804 | lbz r5, 0(r5) | |
805 | cmpdi r5, 0 | |
806 | beq 1f | |
807 | ld r5, VCPU_DAWR(r4) | |
808 | ld r6, VCPU_DAWRX(r4) | |
b005255e MN |
809 | mtspr SPRN_DAWR, r5 |
810 | mtspr SPRN_DAWRX, r6 | |
c1fe190c MN |
811 | 1: |
812 | ld r7, VCPU_CIABR(r4) | |
813 | ld r8, VCPU_TAR(r4) | |
b005255e MN |
814 | mtspr SPRN_CIABR, r7 |
815 | mtspr SPRN_TAR, r8 | |
816 | ld r5, VCPU_IC(r4) | |
7b490411 | 817 | ld r8, VCPU_EBBHR(r4) |
88b02cf9 | 818 | mtspr SPRN_IC, r5 |
b005255e MN |
819 | mtspr SPRN_EBBHR, r8 |
820 | ld r5, VCPU_EBBRR(r4) | |
821 | ld r6, VCPU_BESCR(r4) | |
83677f55 PM |
822 | lwz r7, VCPU_GUEST_PID(r4) |
823 | ld r8, VCPU_WORT(r4) | |
b005255e MN |
824 | mtspr SPRN_EBBRR, r5 |
825 | mtspr SPRN_BESCR, r6 | |
83677f55 PM |
826 | mtspr SPRN_PID, r7 |
827 | mtspr SPRN_WORT, r8 | |
828 | BEGIN_FTR_SECTION | |
e9cf1e08 | 829 | /* POWER8-only registers */ |
b005255e MN |
830 | ld r5, VCPU_TCSCR(r4) |
831 | ld r6, VCPU_ACOP(r4) | |
83677f55 PM |
832 | ld r7, VCPU_CSIGR(r4) |
833 | ld r8, VCPU_TACR(r4) | |
b005255e MN |
834 | mtspr SPRN_TCSCR, r5 |
835 | mtspr SPRN_ACOP, r6 | |
83677f55 PM |
836 | mtspr SPRN_CSIGR, r7 |
837 | mtspr SPRN_TACR, r8 | |
4bb3c7a0 | 838 | nop |
e9cf1e08 PM |
839 | FTR_SECTION_ELSE |
840 | /* POWER9-only registers */ | |
841 | ld r5, VCPU_TID(r4) | |
842 | ld r6, VCPU_PSSCR(r4) | |
4bb3c7a0 | 843 | lbz r8, HSTATE_FAKE_SUSPEND(r13) |
e9cf1e08 | 844 | oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */ |
4bb3c7a0 | 845 | rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG |
769377f7 | 846 | ld r7, VCPU_HFSCR(r4) |
e9cf1e08 PM |
847 | mtspr SPRN_TIDR, r5 |
848 | mtspr SPRN_PSSCR, r6 | |
769377f7 | 849 | mtspr SPRN_HFSCR, r7 |
e9cf1e08 | 850 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
b005255e MN |
851 | 8: |
852 | ||
e0b7ec05 PM |
853 | ld r5, VCPU_SPRG0(r4) |
854 | ld r6, VCPU_SPRG1(r4) | |
855 | ld r7, VCPU_SPRG2(r4) | |
856 | ld r8, VCPU_SPRG3(r4) | |
857 | mtspr SPRN_SPRG0, r5 | |
858 | mtspr SPRN_SPRG1, r6 | |
859 | mtspr SPRN_SPRG2, r7 | |
860 | mtspr SPRN_SPRG3, r8 | |
861 | ||
862 | /* Load up DAR and DSISR */ | |
863 | ld r5, VCPU_DAR(r4) | |
864 | lwz r6, VCPU_DSISR(r4) | |
865 | mtspr SPRN_DAR, r5 | |
866 | mtspr SPRN_DSISR, r6 | |
867 | ||
e0b7ec05 PM |
868 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
869 | ld r5,VCPU_AMR(r4) | |
870 | ld r6,VCPU_UAMOR(r4) | |
871 | li r7,-1 | |
872 | mtspr SPRN_AMR,r5 | |
873 | mtspr SPRN_UAMOR,r6 | |
874 | mtspr SPRN_AMOR,r7 | |
de56a948 PM |
875 | |
876 | /* Restore state of CTRL run bit; assume 1 on entry */ | |
877 | lwz r5,VCPU_CTRL(r4) | |
878 | andi. r5,r5,1 | |
879 | bne 4f | |
880 | mfspr r6,SPRN_CTRLF | |
881 | clrrdi r6,r6,1 | |
882 | mtspr SPRN_CTRLT,r6 | |
883 | 4: | |
6af27c84 PM |
884 | /* Secondary threads wait for primary to have done partition switch */ |
885 | ld r5, HSTATE_KVM_VCORE(r13) | |
886 | lbz r6, HSTATE_PTID(r13) | |
887 | cmpwi r6, 0 | |
888 | beq 21f | |
889 | lbz r0, VCORE_IN_GUEST(r5) | |
890 | cmpwi r0, 0 | |
891 | bne 21f | |
892 | HMT_LOW | |
b4deba5c PM |
893 | 20: lwz r3, VCORE_ENTRY_EXIT(r5) |
894 | cmpwi r3, 0x100 | |
895 | bge no_switch_exit | |
896 | lbz r0, VCORE_IN_GUEST(r5) | |
6af27c84 PM |
897 | cmpwi r0, 0 |
898 | beq 20b | |
899 | HMT_MEDIUM | |
900 | 21: | |
901 | /* Set LPCR. */ | |
902 | ld r8,VCORE_LPCR(r5) | |
903 | mtspr SPRN_LPCR,r8 | |
904 | isync | |
905 | ||
57b8daa7 PM |
906 | /* |
907 | * Set the decrementer to the guest decrementer. | |
908 | */ | |
909 | ld r8,VCPU_DEC_EXPIRES(r4) | |
910 | /* r8 is a host timebase value here, convert to guest TB */ | |
911 | ld r5,HSTATE_KVM_VCORE(r13) | |
912 | ld r6,VCORE_TB_OFFSET_APPL(r5) | |
913 | add r8,r8,r6 | |
914 | mftb r7 | |
915 | subf r3,r7,r8 | |
916 | mtspr SPRN_DEC,r3 | |
917 | ||
6af27c84 PM |
918 | /* Check if HDEC expires soon */ |
919 | mfspr r3, SPRN_HDEC | |
2f272463 PM |
920 | EXTEND_HDEC(r3) |
921 | cmpdi r3, 512 /* 1 microsecond */ | |
6af27c84 PM |
922 | blt hdec_soon |
923 | ||
6964e6a4 PM |
924 | /* For hash guest, clear out and reload the SLB */ |
925 | ld r6, VCPU_KVM(r4) | |
926 | lbz r0, KVM_RADIX(r6) | |
927 | cmpwi r0, 0 | |
928 | bne 9f | |
929 | li r6, 0 | |
930 | slbmte r6, r6 | |
931 | slbia | |
932 | ptesync | |
933 | ||
934 | /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */ | |
935 | lwz r5,VCPU_SLB_MAX(r4) | |
936 | cmpwi r5,0 | |
937 | beq 9f | |
938 | mtctr r5 | |
939 | addi r6,r4,VCPU_SLB | |
940 | 1: ld r8,VCPU_SLB_E(r6) | |
941 | ld r9,VCPU_SLB_V(r6) | |
942 | slbmte r9,r8 | |
943 | addi r6,r6,VCPU_SLB_SIZE | |
944 | bdnz 1b | |
945 | 9: | |
946 | ||
5af50993 BH |
947 | #ifdef CONFIG_KVM_XICS |
948 | /* We are entering the guest on that thread, push VCPU to XIVE */ | |
5af50993 BH |
949 | ld r11, VCPU_XIVE_SAVED_STATE(r4) |
950 | li r9, TM_QW1_OS | |
7ae9bda7 SJS |
951 | lwz r8, VCPU_XIVE_CAM_WORD(r4) |
952 | li r7, TM_QW1_OS + TM_WORD2 | |
953 | mfmsr r0 | |
954 | andi. r0, r0, MSR_DR /* in real mode? */ | |
955 | beq 2f | |
956 | ld r10, HSTATE_XIVE_TIMA_VIRT(r13) | |
957 | cmpldi cr1, r10, 0 | |
958 | beq cr1, no_xive | |
959 | eieio | |
960 | stdx r11,r9,r10 | |
961 | stwx r8,r7,r10 | |
962 | b 3f | |
963 | 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) | |
964 | cmpldi cr1, r10, 0 | |
965 | beq cr1, no_xive | |
5af50993 | 966 | eieio |
ad98dd1a | 967 | stdcix r11,r9,r10 |
7ae9bda7 SJS |
968 | stwcix r8,r7,r10 |
969 | 3: li r9, 1 | |
35c2405e | 970 | stb r9, VCPU_XIVE_PUSHED(r4) |
ad98dd1a | 971 | eieio |
2267ea76 BH |
972 | |
973 | /* | |
974 | * We clear the irq_pending flag. There is a small chance of a | |
975 | * race vs. the escalation interrupt happening on another | |
976 | * processor setting it again, but the only consequence is to | |
977 | * cause a spurrious wakeup on the next H_CEDE which is not an | |
978 | * issue. | |
979 | */ | |
980 | li r0,0 | |
981 | stb r0, VCPU_IRQ_PENDING(r4) | |
9b9b13a6 BH |
982 | |
983 | /* | |
984 | * In single escalation mode, if the escalation interrupt is | |
985 | * on, we mask it. | |
986 | */ | |
987 | lbz r0, VCPU_XIVE_ESC_ON(r4) | |
7ae9bda7 SJS |
988 | cmpwi cr1, r0,0 |
989 | beq cr1, 1f | |
9b9b13a6 | 990 | li r9, XIVE_ESB_SET_PQ_01 |
7ae9bda7 SJS |
991 | beq 4f /* in real mode? */ |
992 | ld r10, VCPU_XIVE_ESC_VADDR(r4) | |
993 | ldx r0, r10, r9 | |
994 | b 5f | |
995 | 4: ld r10, VCPU_XIVE_ESC_RADDR(r4) | |
9b9b13a6 | 996 | ldcix r0, r10, r9 |
7ae9bda7 | 997 | 5: sync |
9b9b13a6 BH |
998 | |
999 | /* We have a possible subtle race here: The escalation interrupt might | |
1000 | * have fired and be on its way to the host queue while we mask it, | |
1001 | * and if we unmask it early enough (re-cede right away), there is | |
1002 | * a theorical possibility that it fires again, thus landing in the | |
1003 | * target queue more than once which is a big no-no. | |
1004 | * | |
1005 | * Fortunately, solving this is rather easy. If the above load setting | |
1006 | * PQ to 01 returns a previous value where P is set, then we know the | |
1007 | * escalation interrupt is somewhere on its way to the host. In that | |
1008 | * case we simply don't clear the xive_esc_on flag below. It will be | |
1009 | * eventually cleared by the handler for the escalation interrupt. | |
1010 | * | |
1011 | * Then, when doing a cede, we check that flag again before re-enabling | |
1012 | * the escalation interrupt, and if set, we abort the cede. | |
1013 | */ | |
1014 | andi. r0, r0, XIVE_ESB_VAL_P | |
1015 | bne- 1f | |
1016 | ||
1017 | /* Now P is 0, we can clear the flag */ | |
1018 | li r0, 0 | |
1019 | stb r0, VCPU_XIVE_ESC_ON(r4) | |
1020 | 1: | |
5af50993 BH |
1021 | no_xive: |
1022 | #endif /* CONFIG_KVM_XICS */ | |
1023 | ||
95a6432c PM |
1024 | li r0, 0 |
1025 | stw r0, STACK_SLOT_SHORT_PATH(r1) | |
1026 | ||
df709a29 | 1027 | deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */ |
f7035ce9 PM |
1028 | /* Check if we can deliver an external or decrementer interrupt now */ |
1029 | ld r0, VCPU_PENDING_EXC(r4) | |
1030 | BEGIN_FTR_SECTION | |
1031 | /* On POWER9, also check for emulated doorbell interrupt */ | |
1032 | lbz r3, VCPU_DBELL_REQ(r4) | |
1033 | or r0, r0, r3 | |
1034 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
1035 | cmpdi r0, 0 | |
1036 | beq 71f | |
1037 | mr r3, r4 | |
1038 | bl kvmppc_guest_entry_inject_int | |
1039 | ld r4, HSTATE_KVM_VCPU(r13) | |
1040 | 71: | |
de56a948 PM |
1041 | ld r6, VCPU_SRR0(r4) |
1042 | ld r7, VCPU_SRR1(r4) | |
e3bbbbfa PM |
1043 | mtspr SPRN_SRR0, r6 |
1044 | mtspr SPRN_SRR1, r7 | |
de56a948 | 1045 | |
95a6432c PM |
1046 | fast_guest_entry_c: |
1047 | ld r10, VCPU_PC(r4) | |
1048 | ld r11, VCPU_MSR(r4) | |
4619ac88 | 1049 | /* r11 = vcpu->arch.msr & ~MSR_HV */ |
de56a948 PM |
1050 | rldicl r11, r11, 63 - MSR_HV_LG, 1 |
1051 | rotldi r11, r11, 1 + MSR_HV_LG | |
1052 | ori r11, r11, MSR_ME | |
1053 | ||
f7035ce9 PM |
1054 | ld r6, VCPU_CTR(r4) |
1055 | ld r7, VCPU_XER(r4) | |
1056 | mtctr r6 | |
1057 | mtxer r7 | |
19ccb76a | 1058 | |
27025a60 LPF |
1059 | /* |
1060 | * Required state: | |
1061 | * R4 = vcpu | |
1062 | * R10: value for HSRR0 | |
1063 | * R11: value for HSRR1 | |
1064 | * R13 = PACA | |
1065 | */ | |
de56a948 | 1066 | fast_guest_return: |
4619ac88 PM |
1067 | li r0,0 |
1068 | stb r0,VCPU_CEDED(r4) /* cancel cede */ | |
de56a948 PM |
1069 | mtspr SPRN_HSRR0,r10 |
1070 | mtspr SPRN_HSRR1,r11 | |
1071 | ||
1072 | /* Activate guest mode, so faults get handled by KVM */ | |
44a3add8 | 1073 | li r9, KVM_GUEST_MODE_GUEST_HV |
de56a948 PM |
1074 | stb r9, HSTATE_IN_GUEST(r13) |
1075 | ||
b6c295df PM |
1076 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1077 | /* Accumulate timing */ | |
1078 | addi r3, r4, VCPU_TB_GUEST | |
1079 | bl kvmhv_accumulate_time | |
1080 | #endif | |
1081 | ||
de56a948 PM |
1082 | /* Enter guest */ |
1083 | ||
0acb9111 PM |
1084 | BEGIN_FTR_SECTION |
1085 | ld r5, VCPU_CFAR(r4) | |
1086 | mtspr SPRN_CFAR, r5 | |
1087 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |
4b8473c9 PM |
1088 | BEGIN_FTR_SECTION |
1089 | ld r0, VCPU_PPR(r4) | |
1090 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
0acb9111 | 1091 | |
de56a948 | 1092 | ld r5, VCPU_LR(r4) |
fd0944ba | 1093 | ld r6, VCPU_CR(r4) |
de56a948 PM |
1094 | mtlr r5 |
1095 | mtcr r6 | |
1096 | ||
c75df6f9 MN |
1097 | ld r1, VCPU_GPR(R1)(r4) |
1098 | ld r2, VCPU_GPR(R2)(r4) | |
1099 | ld r3, VCPU_GPR(R3)(r4) | |
1100 | ld r5, VCPU_GPR(R5)(r4) | |
1101 | ld r6, VCPU_GPR(R6)(r4) | |
1102 | ld r7, VCPU_GPR(R7)(r4) | |
1103 | ld r8, VCPU_GPR(R8)(r4) | |
1104 | ld r9, VCPU_GPR(R9)(r4) | |
1105 | ld r10, VCPU_GPR(R10)(r4) | |
1106 | ld r11, VCPU_GPR(R11)(r4) | |
1107 | ld r12, VCPU_GPR(R12)(r4) | |
1108 | ld r13, VCPU_GPR(R13)(r4) | |
1109 | ||
4b8473c9 PM |
1110 | BEGIN_FTR_SECTION |
1111 | mtspr SPRN_PPR, r0 | |
1112 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
e001fa78 MN |
1113 | |
1114 | /* Move canary into DSISR to check for later */ | |
1115 | BEGIN_FTR_SECTION | |
1116 | li r0, 0x7fff | |
1117 | mtspr SPRN_HDSISR, r0 | |
1118 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
1119 | ||
4b8473c9 | 1120 | ld r0, VCPU_GPR(R0)(r4) |
c75df6f9 | 1121 | ld r4, VCPU_GPR(R4)(r4) |
222f20f1 | 1122 | HRFI_TO_GUEST |
de56a948 PM |
1123 | b . |
1124 | ||
95a6432c PM |
1125 | /* |
1126 | * Enter the guest on a P9 or later system where we have exactly | |
1127 | * one vcpu per vcore and we don't need to go to real mode | |
1128 | * (which implies that host and guest are both using radix MMU mode). | |
1129 | * r3 = vcpu pointer | |
1130 | * Most SPRs and all the VSRs have been loaded already. | |
1131 | */ | |
1132 | _GLOBAL(__kvmhv_vcpu_entry_p9) | |
1133 | EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9) | |
1134 | mflr r0 | |
1135 | std r0, PPC_LR_STKOFF(r1) | |
1136 | stdu r1, -SFS(r1) | |
1137 | ||
1138 | li r0, 1 | |
1139 | stw r0, STACK_SLOT_SHORT_PATH(r1) | |
1140 | ||
1141 | std r3, HSTATE_KVM_VCPU(r13) | |
1142 | mfcr r4 | |
1143 | stw r4, SFS+8(r1) | |
1144 | ||
1145 | std r1, HSTATE_HOST_R1(r13) | |
1146 | ||
1147 | reg = 14 | |
1148 | .rept 18 | |
1149 | std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) | |
1150 | reg = reg + 1 | |
1151 | .endr | |
1152 | ||
1153 | reg = 14 | |
1154 | .rept 18 | |
1155 | ld reg, __VCPU_GPR(reg)(r3) | |
1156 | reg = reg + 1 | |
1157 | .endr | |
1158 | ||
1159 | mfmsr r10 | |
1160 | std r10, HSTATE_HOST_MSR(r13) | |
1161 | ||
1162 | mr r4, r3 | |
1163 | b fast_guest_entry_c | |
1164 | guest_exit_short_path: | |
1165 | ||
1166 | li r0, KVM_GUEST_MODE_NONE | |
1167 | stb r0, HSTATE_IN_GUEST(r13) | |
1168 | ||
1169 | reg = 14 | |
1170 | .rept 18 | |
1171 | std reg, __VCPU_GPR(reg)(r9) | |
1172 | reg = reg + 1 | |
1173 | .endr | |
1174 | ||
1175 | reg = 14 | |
1176 | .rept 18 | |
1177 | ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1) | |
1178 | reg = reg + 1 | |
1179 | .endr | |
1180 | ||
1181 | lwz r4, SFS+8(r1) | |
1182 | mtcr r4 | |
1183 | ||
1184 | mr r3, r12 /* trap number */ | |
1185 | ||
1186 | addi r1, r1, SFS | |
1187 | ld r0, PPC_LR_STKOFF(r1) | |
1188 | mtlr r0 | |
1189 | ||
1190 | /* If we are in real mode, do a rfid to get back to the caller */ | |
1191 | mfmsr r4 | |
1192 | andi. r5, r4, MSR_IR | |
1193 | bnelr | |
1194 | rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */ | |
1195 | mtspr SPRN_SRR0, r0 | |
1196 | ld r10, HSTATE_HOST_MSR(r13) | |
1197 | rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG | |
1198 | mtspr SPRN_SRR1, r10 | |
1199 | RFI_TO_KERNEL | |
1200 | b . | |
1201 | ||
b6c295df | 1202 | secondary_too_late: |
6af27c84 | 1203 | li r12, 0 |
a8b48a4d | 1204 | stw r12, STACK_SLOT_TRAP(r1) |
b6c295df PM |
1205 | cmpdi r4, 0 |
1206 | beq 11f | |
6af27c84 PM |
1207 | stw r12, VCPU_TRAP(r4) |
1208 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
b6c295df PM |
1209 | addi r3, r4, VCPU_TB_RMEXIT |
1210 | bl kvmhv_accumulate_time | |
6af27c84 | 1211 | #endif |
b6c295df PM |
1212 | 11: b kvmhv_switch_to_host |
1213 | ||
b4deba5c PM |
1214 | no_switch_exit: |
1215 | HMT_MEDIUM | |
1216 | li r12, 0 | |
1217 | b 12f | |
b6c295df | 1218 | hdec_soon: |
6af27c84 | 1219 | li r12, BOOK3S_INTERRUPT_HV_DECREMENTER |
b4deba5c | 1220 | 12: stw r12, VCPU_TRAP(r4) |
6af27c84 PM |
1221 | mr r9, r4 |
1222 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
b6c295df PM |
1223 | addi r3, r4, VCPU_TB_RMEXIT |
1224 | bl kvmhv_accumulate_time | |
b6c295df | 1225 | #endif |
6964e6a4 | 1226 | b guest_bypass |
b6c295df | 1227 | |
de56a948 PM |
1228 | /****************************************************************************** |
1229 | * * | |
1230 | * Exit code * | |
1231 | * * | |
1232 | *****************************************************************************/ | |
1233 | ||
1234 | /* | |
1235 | * We come here from the first-level interrupt handlers. | |
1236 | */ | |
dd96b2c2 AK |
1237 | .globl kvmppc_interrupt_hv |
1238 | kvmppc_interrupt_hv: | |
de56a948 PM |
1239 | /* |
1240 | * Register contents: | |
d3918e7f | 1241 | * R12 = (guest CR << 32) | interrupt vector |
de56a948 | 1242 | * R13 = PACA |
d3918e7f | 1243 | * guest R12 saved in shadow VCPU SCRATCH0 |
a97a65d5 | 1244 | * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE |
de56a948 PM |
1245 | * guest R13 saved in SPRN_SCRATCH0 |
1246 | */ | |
a97a65d5 | 1247 | std r9, HSTATE_SCRATCH2(r13) |
44a3add8 PM |
1248 | lbz r9, HSTATE_IN_GUEST(r13) |
1249 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | |
1250 | beq kvmppc_bad_host_intr | |
dd96b2c2 AK |
1251 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
1252 | cmpwi r9, KVM_GUEST_MODE_GUEST | |
a97a65d5 | 1253 | ld r9, HSTATE_SCRATCH2(r13) |
dd96b2c2 AK |
1254 | beq kvmppc_interrupt_pr |
1255 | #endif | |
44a3add8 PM |
1256 | /* We're now back in the host but in guest MMU context */ |
1257 | li r9, KVM_GUEST_MODE_HOST_HV | |
1258 | stb r9, HSTATE_IN_GUEST(r13) | |
1259 | ||
de56a948 PM |
1260 | ld r9, HSTATE_KVM_VCPU(r13) |
1261 | ||
1262 | /* Save registers */ | |
1263 | ||
c75df6f9 MN |
1264 | std r0, VCPU_GPR(R0)(r9) |
1265 | std r1, VCPU_GPR(R1)(r9) | |
1266 | std r2, VCPU_GPR(R2)(r9) | |
1267 | std r3, VCPU_GPR(R3)(r9) | |
1268 | std r4, VCPU_GPR(R4)(r9) | |
1269 | std r5, VCPU_GPR(R5)(r9) | |
1270 | std r6, VCPU_GPR(R6)(r9) | |
1271 | std r7, VCPU_GPR(R7)(r9) | |
1272 | std r8, VCPU_GPR(R8)(r9) | |
a97a65d5 | 1273 | ld r0, HSTATE_SCRATCH2(r13) |
c75df6f9 MN |
1274 | std r0, VCPU_GPR(R9)(r9) |
1275 | std r10, VCPU_GPR(R10)(r9) | |
1276 | std r11, VCPU_GPR(R11)(r9) | |
de56a948 | 1277 | ld r3, HSTATE_SCRATCH0(r13) |
c75df6f9 | 1278 | std r3, VCPU_GPR(R12)(r9) |
d3918e7f NP |
1279 | /* CR is in the high half of r12 */ |
1280 | srdi r4, r12, 32 | |
fd0944ba | 1281 | std r4, VCPU_CR(r9) |
0acb9111 PM |
1282 | BEGIN_FTR_SECTION |
1283 | ld r3, HSTATE_CFAR(r13) | |
1284 | std r3, VCPU_CFAR(r9) | |
1285 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |
4b8473c9 PM |
1286 | BEGIN_FTR_SECTION |
1287 | ld r4, HSTATE_PPR(r13) | |
1288 | std r4, VCPU_PPR(r9) | |
1289 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
de56a948 PM |
1290 | |
1291 | /* Restore R1/R2 so we can handle faults */ | |
1292 | ld r1, HSTATE_HOST_R1(r13) | |
1293 | ld r2, PACATOC(r13) | |
1294 | ||
1295 | mfspr r10, SPRN_SRR0 | |
1296 | mfspr r11, SPRN_SRR1 | |
1297 | std r10, VCPU_SRR0(r9) | |
1298 | std r11, VCPU_SRR1(r9) | |
d3918e7f NP |
1299 | /* trap is in the low half of r12, clear CR from the high half */ |
1300 | clrldi r12, r12, 32 | |
de56a948 PM |
1301 | andi. r0, r12, 2 /* need to read HSRR0/1? */ |
1302 | beq 1f | |
1303 | mfspr r10, SPRN_HSRR0 | |
1304 | mfspr r11, SPRN_HSRR1 | |
1305 | clrrdi r12, r12, 2 | |
1306 | 1: std r10, VCPU_PC(r9) | |
1307 | std r11, VCPU_MSR(r9) | |
1308 | ||
1309 | GET_SCRATCH0(r3) | |
1310 | mflr r4 | |
c75df6f9 | 1311 | std r3, VCPU_GPR(R13)(r9) |
de56a948 PM |
1312 | std r4, VCPU_LR(r9) |
1313 | ||
de56a948 PM |
1314 | stw r12,VCPU_TRAP(r9) |
1315 | ||
8b24e69f PM |
1316 | /* |
1317 | * Now that we have saved away SRR0/1 and HSRR0/1, | |
1318 | * interrupts are recoverable in principle, so set MSR_RI. | |
1319 | * This becomes important for relocation-on interrupts from | |
1320 | * the guest, which we can get in radix mode on POWER9. | |
1321 | */ | |
1322 | li r0, MSR_RI | |
1323 | mtmsrd r0, 1 | |
1324 | ||
b6c295df PM |
1325 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1326 | addi r3, r9, VCPU_TB_RMINTR | |
1327 | mr r4, r9 | |
1328 | bl kvmhv_accumulate_time | |
1329 | ld r5, VCPU_GPR(R5)(r9) | |
1330 | ld r6, VCPU_GPR(R6)(r9) | |
1331 | ld r7, VCPU_GPR(R7)(r9) | |
1332 | ld r8, VCPU_GPR(R8)(r9) | |
1333 | #endif | |
1334 | ||
4a157d61 | 1335 | /* Save HEIR (HV emulation assist reg) in emul_inst |
697d3899 PM |
1336 | if this is an HEI (HV emulation interrupt, e40) */ |
1337 | li r3,KVM_INST_FETCH_FAILED | |
2bf27601 | 1338 | stw r3,VCPU_LAST_INST(r9) |
697d3899 PM |
1339 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
1340 | bne 11f | |
1341 | mfspr r3,SPRN_HEIR | |
4a157d61 | 1342 | 11: stw r3,VCPU_HEIR(r9) |
697d3899 PM |
1343 | |
1344 | /* these are volatile across C function calls */ | |
a97a65d5 NP |
1345 | #ifdef CONFIG_RELOCATABLE |
1346 | ld r3, HSTATE_SCRATCH1(r13) | |
1347 | mtctr r3 | |
1348 | #else | |
697d3899 | 1349 | mfctr r3 |
a97a65d5 | 1350 | #endif |
697d3899 PM |
1351 | mfxer r4 |
1352 | std r3, VCPU_CTR(r9) | |
c63517c2 | 1353 | std r4, VCPU_XER(r9) |
697d3899 | 1354 | |
df709a29 PM |
1355 | /* Save more register state */ |
1356 | mfdar r3 | |
1357 | mfdsisr r4 | |
1358 | std r3, VCPU_DAR(r9) | |
1359 | stw r4, VCPU_DSISR(r9) | |
4bb3c7a0 | 1360 | |
697d3899 PM |
1361 | /* If this is a page table miss then see if it's theirs or ours */ |
1362 | cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
1363 | beq kvmppc_hdsi | |
df709a29 PM |
1364 | std r3, VCPU_FAULT_DAR(r9) |
1365 | stw r4, VCPU_FAULT_DSISR(r9) | |
342d3db7 PM |
1366 | cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
1367 | beq kvmppc_hisi | |
697d3899 | 1368 | |
df709a29 PM |
1369 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1370 | /* For softpatch interrupt, go off and do TM instruction emulation */ | |
1371 | cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH | |
1372 | beq kvmppc_tm_emul | |
1373 | #endif | |
1374 | ||
de56a948 PM |
1375 | /* See if this is a leftover HDEC interrupt */ |
1376 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
1377 | bne 2f | |
1378 | mfspr r3,SPRN_HDEC | |
a4faf2e7 PM |
1379 | EXTEND_HDEC(r3) |
1380 | cmpdi r3,0 | |
1f09c3ed PM |
1381 | mr r4,r9 |
1382 | bge fast_guest_return | |
de56a948 | 1383 | 2: |
697d3899 | 1384 | /* See if this is an hcall we can handle in real mode */ |
a8606e20 PM |
1385 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
1386 | beq hcall_try_real_mode | |
de56a948 | 1387 | |
66feed61 PM |
1388 | /* Hypervisor doorbell - exit only if host IPI flag set */ |
1389 | cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL | |
1390 | bne 3f | |
bd0fdb19 NP |
1391 | BEGIN_FTR_SECTION |
1392 | PPC_MSGSYNC | |
2cde3716 | 1393 | lwsync |
360cae31 PM |
1394 | /* always exit if we're running a nested guest */ |
1395 | ld r0, VCPU_NESTED(r9) | |
1396 | cmpdi r0, 0 | |
1397 | bne guest_exit_cont | |
bd0fdb19 | 1398 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
66feed61 | 1399 | lbz r0, HSTATE_HOST_IPI(r13) |
06554d9f | 1400 | cmpwi r0, 0 |
df709a29 | 1401 | beq maybe_reenter_guest |
66feed61 PM |
1402 | b guest_exit_cont |
1403 | 3: | |
769377f7 PM |
1404 | /* If it's a hypervisor facility unavailable interrupt, save HFSCR */ |
1405 | cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL | |
1406 | bne 14f | |
1407 | mfspr r3, SPRN_HFSCR | |
1408 | std r3, VCPU_HFSCR(r9) | |
1409 | b guest_exit_cont | |
1410 | 14: | |
54695c30 BH |
1411 | /* External interrupt ? */ |
1412 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
df709a29 | 1413 | beq kvmppc_guest_external |
43ff3f65 PM |
1414 | /* See if it is a machine check */ |
1415 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
1416 | beq machine_check_realmode | |
df709a29 PM |
1417 | /* Or a hypervisor maintenance interrupt */ |
1418 | cmpwi r12, BOOK3S_INTERRUPT_HMI | |
1419 | beq hmi_realmode | |
1420 | ||
1421 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ | |
1422 | ||
43ff3f65 PM |
1423 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1424 | addi r3, r9, VCPU_TB_RMEXIT | |
1425 | mr r4, r9 | |
1426 | bl kvmhv_accumulate_time | |
1427 | #endif | |
5af50993 BH |
1428 | #ifdef CONFIG_KVM_XICS |
1429 | /* We are exiting, pull the VP from the XIVE */ | |
35c2405e | 1430 | lbz r0, VCPU_XIVE_PUSHED(r9) |
5af50993 BH |
1431 | cmpwi cr0, r0, 0 |
1432 | beq 1f | |
1433 | li r7, TM_SPC_PULL_OS_CTX | |
1434 | li r6, TM_QW1_OS | |
1435 | mfmsr r0 | |
2662efd0 | 1436 | andi. r0, r0, MSR_DR /* in real mode? */ |
5af50993 BH |
1437 | beq 2f |
1438 | ld r10, HSTATE_XIVE_TIMA_VIRT(r13) | |
1439 | cmpldi cr0, r10, 0 | |
1440 | beq 1f | |
1441 | /* First load to pull the context, we ignore the value */ | |
5af50993 | 1442 | eieio |
ad98dd1a | 1443 | lwzx r11, r7, r10 |
5af50993 BH |
1444 | /* Second load to recover the context state (Words 0 and 1) */ |
1445 | ldx r11, r6, r10 | |
1446 | b 3f | |
1447 | 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) | |
1448 | cmpldi cr0, r10, 0 | |
1449 | beq 1f | |
1450 | /* First load to pull the context, we ignore the value */ | |
5af50993 | 1451 | eieio |
ad98dd1a | 1452 | lwzcix r11, r7, r10 |
5af50993 BH |
1453 | /* Second load to recover the context state (Words 0 and 1) */ |
1454 | ldcix r11, r6, r10 | |
1455 | 3: std r11, VCPU_XIVE_SAVED_STATE(r9) | |
1456 | /* Fixup some of the state for the next load */ | |
1457 | li r10, 0 | |
1458 | li r0, 0xff | |
35c2405e | 1459 | stb r10, VCPU_XIVE_PUSHED(r9) |
5af50993 BH |
1460 | stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) |
1461 | stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) | |
ad98dd1a | 1462 | eieio |
5af50993 BH |
1463 | 1: |
1464 | #endif /* CONFIG_KVM_XICS */ | |
de56a948 | 1465 | |
95a6432c PM |
1466 | /* If we came in through the P9 short path, go back out to C now */ |
1467 | lwz r0, STACK_SLOT_SHORT_PATH(r1) | |
1468 | cmpwi r0, 0 | |
1469 | bne guest_exit_short_path | |
1470 | ||
6964e6a4 | 1471 | /* For hash guest, read the guest SLB and save it away */ |
f4c51f84 PM |
1472 | ld r5, VCPU_KVM(r9) |
1473 | lbz r0, KVM_RADIX(r5) | |
f4c51f84 | 1474 | li r5, 0 |
6964e6a4 PM |
1475 | cmpwi r0, 0 |
1476 | bne 3f /* for radix, save 0 entries */ | |
de56a948 PM |
1477 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ |
1478 | mtctr r0 | |
1479 | li r6,0 | |
1480 | addi r7,r9,VCPU_SLB | |
de56a948 PM |
1481 | 1: slbmfee r8,r6 |
1482 | andis. r0,r8,SLB_ESID_V@h | |
1483 | beq 2f | |
1484 | add r8,r8,r6 /* put index in */ | |
1485 | slbmfev r3,r6 | |
1486 | std r8,VCPU_SLB_E(r7) | |
1487 | std r3,VCPU_SLB_V(r7) | |
1488 | addi r7,r7,VCPU_SLB_SIZE | |
1489 | addi r5,r5,1 | |
1490 | 2: addi r6,r6,1 | |
1491 | bdnz 1b | |
6964e6a4 PM |
1492 | /* Finally clear out the SLB */ |
1493 | li r0,0 | |
1494 | slbmte r0,r0 | |
1495 | slbia | |
1496 | ptesync | |
f4c51f84 | 1497 | 3: stw r5,VCPU_SLB_MAX(r9) |
b4072df4 | 1498 | |
cda4a147 PM |
1499 | /* load host SLB entries */ |
1500 | BEGIN_MMU_FTR_SECTION | |
1501 | b 0f | |
1502 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |
1503 | ld r8,PACA_SLBSHADOWPTR(r13) | |
1504 | ||
1505 | .rept SLB_NUM_BOLTED | |
1506 | li r3, SLBSHADOW_SAVEAREA | |
1507 | LDX_BE r5, r8, r3 | |
1508 | addi r3, r3, 8 | |
1509 | LDX_BE r6, r8, r3 | |
1510 | andis. r7,r5,SLB_ESID_V@h | |
1511 | beq 1f | |
1512 | slbmte r6,r5 | |
1513 | 1: addi r8,r8,16 | |
1514 | .endr | |
1515 | 0: | |
1516 | ||
6964e6a4 | 1517 | guest_bypass: |
a8b48a4d | 1518 | stw r12, STACK_SLOT_TRAP(r1) |
57b8daa7 PM |
1519 | |
1520 | /* Save DEC */ | |
1521 | /* Do this before kvmhv_commence_exit so we know TB is guest TB */ | |
1522 | ld r3, HSTATE_KVM_VCORE(r13) | |
1523 | mfspr r5,SPRN_DEC | |
1524 | mftb r6 | |
1525 | /* On P9, if the guest has large decr enabled, don't sign extend */ | |
1526 | BEGIN_FTR_SECTION | |
1527 | ld r4, VCORE_LPCR(r3) | |
1528 | andis. r4, r4, LPCR_LD@h | |
1529 | bne 16f | |
1530 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
1531 | extsw r5,r5 | |
1532 | 16: add r5,r5,r6 | |
1533 | /* r5 is a guest timebase value here, convert to host TB */ | |
1534 | ld r4,VCORE_TB_OFFSET_APPL(r3) | |
1535 | subf r5,r4,r5 | |
1536 | std r5,VCPU_DEC_EXPIRES(r9) | |
1537 | ||
6af27c84 | 1538 | /* Increment exit count, poke other threads to exit */ |
57b8daa7 | 1539 | mr r3, r12 |
6af27c84 | 1540 | bl kvmhv_commence_exit |
eddb60fb PM |
1541 | nop |
1542 | ld r9, HSTATE_KVM_VCPU(r13) | |
6af27c84 | 1543 | |
ec257165 PM |
1544 | /* Stop others sending VCPU interrupts to this physical CPU */ |
1545 | li r0, -1 | |
1546 | stw r0, VCPU_CPU(r9) | |
1547 | stw r0, VCPU_THREAD_CPU(r9) | |
1548 | ||
de56a948 | 1549 | /* Save guest CTRL register, set runlatch to 1 */ |
6af27c84 | 1550 | mfspr r6,SPRN_CTRLF |
de56a948 PM |
1551 | stw r6,VCPU_CTRL(r9) |
1552 | andi. r0,r6,1 | |
1553 | bne 4f | |
1554 | ori r6,r6,1 | |
1555 | mtspr SPRN_CTRLT,r6 | |
1556 | 4: | |
de56a948 PM |
1557 | /* |
1558 | * Save the guest PURR/SPURR | |
1559 | */ | |
1560 | mfspr r5,SPRN_PURR | |
1561 | mfspr r6,SPRN_SPURR | |
1562 | ld r7,VCPU_PURR(r9) | |
1563 | ld r8,VCPU_SPURR(r9) | |
1564 | std r5,VCPU_PURR(r9) | |
1565 | std r6,VCPU_SPURR(r9) | |
1566 | subf r5,r7,r5 | |
1567 | subf r6,r8,r6 | |
1568 | ||
1569 | /* | |
1570 | * Restore host PURR/SPURR and add guest times | |
1571 | * so that the time in the guest gets accounted. | |
1572 | */ | |
1573 | ld r3,HSTATE_PURR(r13) | |
1574 | ld r4,HSTATE_SPURR(r13) | |
1575 | add r3,r3,r5 | |
1576 | add r4,r4,r6 | |
1577 | mtspr SPRN_PURR,r3 | |
1578 | mtspr SPRN_SPURR,r4 | |
1579 | ||
b005255e MN |
1580 | BEGIN_FTR_SECTION |
1581 | b 8f | |
1582 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |
b005255e MN |
1583 | /* Save POWER8-specific registers */ |
1584 | mfspr r5, SPRN_IAMR | |
1585 | mfspr r6, SPRN_PSPB | |
1586 | mfspr r7, SPRN_FSCR | |
1587 | std r5, VCPU_IAMR(r9) | |
1588 | stw r6, VCPU_PSPB(r9) | |
1589 | std r7, VCPU_FSCR(r9) | |
1590 | mfspr r5, SPRN_IC | |
b005255e MN |
1591 | mfspr r7, SPRN_TAR |
1592 | std r5, VCPU_IC(r9) | |
b005255e | 1593 | std r7, VCPU_TAR(r9) |
7b490411 | 1594 | mfspr r8, SPRN_EBBHR |
b005255e MN |
1595 | std r8, VCPU_EBBHR(r9) |
1596 | mfspr r5, SPRN_EBBRR | |
1597 | mfspr r6, SPRN_BESCR | |
83677f55 PM |
1598 | mfspr r7, SPRN_PID |
1599 | mfspr r8, SPRN_WORT | |
b005255e MN |
1600 | std r5, VCPU_EBBRR(r9) |
1601 | std r6, VCPU_BESCR(r9) | |
83677f55 PM |
1602 | stw r7, VCPU_GUEST_PID(r9) |
1603 | std r8, VCPU_WORT(r9) | |
1604 | BEGIN_FTR_SECTION | |
b005255e MN |
1605 | mfspr r5, SPRN_TCSCR |
1606 | mfspr r6, SPRN_ACOP | |
83677f55 PM |
1607 | mfspr r7, SPRN_CSIGR |
1608 | mfspr r8, SPRN_TACR | |
b005255e MN |
1609 | std r5, VCPU_TCSCR(r9) |
1610 | std r6, VCPU_ACOP(r9) | |
83677f55 PM |
1611 | std r7, VCPU_CSIGR(r9) |
1612 | std r8, VCPU_TACR(r9) | |
e9cf1e08 PM |
1613 | FTR_SECTION_ELSE |
1614 | mfspr r5, SPRN_TIDR | |
1615 | mfspr r6, SPRN_PSSCR | |
1616 | std r5, VCPU_TID(r9) | |
1617 | rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */ | |
1618 | rotldi r6, r6, 60 | |
1619 | std r6, VCPU_PSSCR(r9) | |
769377f7 PM |
1620 | /* Restore host HFSCR value */ |
1621 | ld r7, STACK_SLOT_HFSCR(r1) | |
1622 | mtspr SPRN_HFSCR, r7 | |
e9cf1e08 | 1623 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) |
ccec4456 PM |
1624 | /* |
1625 | * Restore various registers to 0, where non-zero values | |
1626 | * set by the guest could disrupt the host. | |
1627 | */ | |
1628 | li r0, 0 | |
4c3bb4cc | 1629 | mtspr SPRN_PSPB, r0 |
ccec4456 | 1630 | mtspr SPRN_WORT, r0 |
83677f55 PM |
1631 | BEGIN_FTR_SECTION |
1632 | mtspr SPRN_TCSCR, r0 | |
ccec4456 PM |
1633 | /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ |
1634 | li r0, 1 | |
1635 | sldi r0, r0, 31 | |
1636 | mtspr SPRN_MMCRS, r0 | |
83677f55 | 1637 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
b005255e | 1638 | |
c3c7470c ME |
1639 | /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ |
1640 | ld r8, STACK_SLOT_IAMR(r1) | |
1641 | mtspr SPRN_IAMR, r8 | |
1642 | ||
1643 | 8: /* Power7 jumps back in here */ | |
e0b7ec05 PM |
1644 | mfspr r5,SPRN_AMR |
1645 | mfspr r6,SPRN_UAMOR | |
1646 | std r5,VCPU_AMR(r9) | |
1647 | std r6,VCPU_UAMOR(r9) | |
c3c7470c ME |
1648 | ld r5,STACK_SLOT_AMR(r1) |
1649 | ld r6,STACK_SLOT_UAMOR(r1) | |
1650 | mtspr SPRN_AMR, r5 | |
4c3bb4cc | 1651 | mtspr SPRN_UAMOR, r6 |
e0b7ec05 PM |
1652 | |
1653 | /* Switch DSCR back to host value */ | |
e0b7ec05 PM |
1654 | mfspr r8, SPRN_DSCR |
1655 | ld r7, HSTATE_DSCR(r13) | |
1656 | std r8, VCPU_DSCR(r9) | |
1657 | mtspr SPRN_DSCR, r7 | |
e0b7ec05 PM |
1658 | |
1659 | /* Save non-volatile GPRs */ | |
1660 | std r14, VCPU_GPR(R14)(r9) | |
1661 | std r15, VCPU_GPR(R15)(r9) | |
1662 | std r16, VCPU_GPR(R16)(r9) | |
1663 | std r17, VCPU_GPR(R17)(r9) | |
1664 | std r18, VCPU_GPR(R18)(r9) | |
1665 | std r19, VCPU_GPR(R19)(r9) | |
1666 | std r20, VCPU_GPR(R20)(r9) | |
1667 | std r21, VCPU_GPR(R21)(r9) | |
1668 | std r22, VCPU_GPR(R22)(r9) | |
1669 | std r23, VCPU_GPR(R23)(r9) | |
1670 | std r24, VCPU_GPR(R24)(r9) | |
1671 | std r25, VCPU_GPR(R25)(r9) | |
1672 | std r26, VCPU_GPR(R26)(r9) | |
1673 | std r27, VCPU_GPR(R27)(r9) | |
1674 | std r28, VCPU_GPR(R28)(r9) | |
1675 | std r29, VCPU_GPR(R29)(r9) | |
1676 | std r30, VCPU_GPR(R30)(r9) | |
1677 | std r31, VCPU_GPR(R31)(r9) | |
1678 | ||
1679 | /* Save SPRGs */ | |
1680 | mfspr r3, SPRN_SPRG0 | |
1681 | mfspr r4, SPRN_SPRG1 | |
1682 | mfspr r5, SPRN_SPRG2 | |
1683 | mfspr r6, SPRN_SPRG3 | |
1684 | std r3, VCPU_SPRG0(r9) | |
1685 | std r4, VCPU_SPRG1(r9) | |
1686 | std r5, VCPU_SPRG2(r9) | |
1687 | std r6, VCPU_SPRG3(r9) | |
1688 | ||
1689 | /* save FP state */ | |
1690 | mr r3, r9 | |
1691 | bl kvmppc_save_fp | |
de56a948 | 1692 | |
0a8eccef | 1693 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
4bb3c7a0 PM |
1694 | /* |
1695 | * Branch around the call if both CPU_FTR_TM and | |
1696 | * CPU_FTR_P9_TM_HV_ASSIST are off. | |
1697 | */ | |
0a8eccef | 1698 | BEGIN_FTR_SECTION |
4bb3c7a0 PM |
1699 | b 91f |
1700 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |
67f8a8c1 | 1701 | /* |
7854f754 | 1702 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
67f8a8c1 | 1703 | */ |
6f597c6b SG |
1704 | mr r3, r9 |
1705 | ld r4, VCPU_MSR(r3) | |
7854f754 | 1706 | li r5, 0 /* don't preserve non-vol regs */ |
7b0e827c | 1707 | bl kvmppc_save_tm_hv |
7854f754 | 1708 | nop |
6f597c6b | 1709 | ld r9, HSTATE_KVM_VCPU(r13) |
4bb3c7a0 | 1710 | 91: |
0a8eccef PM |
1711 | #endif |
1712 | ||
e0b7ec05 PM |
1713 | /* Increment yield count if they have a VPA */ |
1714 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | |
1715 | cmpdi r8, 0 | |
1716 | beq 25f | |
0865a583 AG |
1717 | li r4, LPPACA_YIELDCOUNT |
1718 | LWZX_BE r3, r8, r4 | |
e0b7ec05 | 1719 | addi r3, r3, 1 |
0865a583 | 1720 | STWX_BE r3, r8, r4 |
e0b7ec05 PM |
1721 | li r3, 1 |
1722 | stb r3, VCPU_VPA_DIRTY(r9) | |
1723 | 25: | |
1724 | /* Save PMU registers if requested */ | |
1725 | /* r8 and cr0.eq are live here */ | |
41f4e631 PM |
1726 | mr r3, r9 |
1727 | li r4, 1 | |
e0b7ec05 | 1728 | beq 21f /* if no VPA, save PMU stuff anyway */ |
41f4e631 PM |
1729 | lbz r4, LPPACA_PMCINUSE(r8) |
1730 | 21: bl kvmhv_save_guest_pmu | |
1731 | ld r9, HSTATE_KVM_VCPU(r13) | |
de56a948 | 1732 | |
e9cf1e08 | 1733 | /* Restore host values of some registers */ |
7ceaa6dc PM |
1734 | BEGIN_FTR_SECTION |
1735 | ld r5, STACK_SLOT_CIABR(r1) | |
1736 | ld r6, STACK_SLOT_DAWR(r1) | |
1737 | ld r7, STACK_SLOT_DAWRX(r1) | |
1738 | mtspr SPRN_CIABR, r5 | |
b53221e7 MN |
1739 | /* |
1740 | * If the DAWR doesn't work, it's ok to write these here as | |
1741 | * this value should always be zero | |
1742 | */ | |
7ceaa6dc PM |
1743 | mtspr SPRN_DAWR, r6 |
1744 | mtspr SPRN_DAWRX, r7 | |
1745 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e9cf1e08 PM |
1746 | BEGIN_FTR_SECTION |
1747 | ld r5, STACK_SLOT_TID(r1) | |
1748 | ld r6, STACK_SLOT_PSSCR(r1) | |
f4c51f84 | 1749 | ld r7, STACK_SLOT_PID(r1) |
e9cf1e08 PM |
1750 | mtspr SPRN_TIDR, r5 |
1751 | mtspr SPRN_PSSCR, r6 | |
f4c51f84 | 1752 | mtspr SPRN_PID, r7 |
e9cf1e08 | 1753 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
a25bd72b BH |
1754 | |
1755 | #ifdef CONFIG_PPC_RADIX_MMU | |
1756 | /* | |
1757 | * Are we running hash or radix ? | |
1758 | */ | |
67f8a8c1 PM |
1759 | ld r5, VCPU_KVM(r9) |
1760 | lbz r0, KVM_RADIX(r5) | |
1761 | cmpwi cr2, r0, 0 | |
2bf1071a | 1762 | beq cr2, 2f |
a25bd72b | 1763 | |
df158189 PM |
1764 | /* |
1765 | * Radix: do eieio; tlbsync; ptesync sequence in case we | |
1766 | * interrupted the guest between a tlbie and a ptesync. | |
1767 | */ | |
1768 | eieio | |
1769 | tlbsync | |
1770 | ptesync | |
1771 | ||
a25bd72b BH |
1772 | /* Radix: Handle the case where the guest used an illegal PID */ |
1773 | LOAD_REG_ADDR(r4, mmu_base_pid) | |
1774 | lwz r3, VCPU_GUEST_PID(r9) | |
1775 | lwz r5, 0(r4) | |
1776 | cmpw cr0,r3,r5 | |
1777 | blt 2f | |
1778 | ||
1779 | /* | |
1780 | * Illegal PID, the HW might have prefetched and cached in the TLB | |
1781 | * some translations for the LPID 0 / guest PID combination which | |
1782 | * Linux doesn't know about, so we need to flush that PID out of | |
1783 | * the TLB. First we need to set LPIDR to 0 so tlbiel applies to | |
1784 | * the right context. | |
1785 | */ | |
1786 | li r0,0 | |
1787 | mtspr SPRN_LPID,r0 | |
1788 | isync | |
1789 | ||
1790 | /* Then do a congruence class local flush */ | |
1791 | ld r6,VCPU_KVM(r9) | |
1792 | lwz r0,KVM_TLB_SETS(r6) | |
1793 | mtctr r0 | |
1794 | li r7,0x400 /* IS field = 0b01 */ | |
1795 | ptesync | |
1796 | sldi r0,r3,32 /* RS has PID */ | |
1797 | 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */ | |
1798 | addi r7,r7,0x1000 | |
1799 | bdnz 1b | |
1800 | ptesync | |
1801 | ||
2bf1071a | 1802 | 2: |
a25bd72b | 1803 | #endif /* CONFIG_PPC_RADIX_MMU */ |
e9cf1e08 | 1804 | |
9e368f29 | 1805 | /* |
c17b98cf | 1806 | * POWER7/POWER8 guest -> host partition switch code. |
9e368f29 PM |
1807 | * We don't have to lock against tlbies but we do |
1808 | * have to coordinate the hardware threads. | |
a8b48a4d | 1809 | * Here STACK_SLOT_TRAP(r1) contains the trap number. |
9e368f29 | 1810 | */ |
b6c295df | 1811 | kvmhv_switch_to_host: |
371fefd6 | 1812 | /* Secondary threads wait for primary to do partition switch */ |
6af27c84 | 1813 | ld r5,HSTATE_KVM_VCORE(r13) |
e0b7ec05 PM |
1814 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ |
1815 | lbz r3,HSTATE_PTID(r13) | |
371fefd6 PM |
1816 | cmpwi r3,0 |
1817 | beq 15f | |
1818 | HMT_LOW | |
1819 | 13: lbz r3,VCORE_IN_GUEST(r5) | |
1820 | cmpwi r3,0 | |
1821 | bne 13b | |
1822 | HMT_MEDIUM | |
1823 | b 16f | |
1824 | ||
1825 | /* Primary thread waits for all the secondaries to exit guest */ | |
1826 | 15: lwz r3,VCORE_ENTRY_EXIT(r5) | |
b4deba5c | 1827 | rlwinm r0,r3,32-8,0xff |
371fefd6 PM |
1828 | clrldi r3,r3,56 |
1829 | cmpw r3,r0 | |
1830 | bne 15b | |
1831 | isync | |
1832 | ||
b4deba5c PM |
1833 | /* Did we actually switch to the guest at all? */ |
1834 | lbz r6, VCORE_IN_GUEST(r5) | |
1835 | cmpwi r6, 0 | |
1836 | beq 19f | |
1837 | ||
371fefd6 | 1838 | /* Primary thread switches back to host partition */ |
de56a948 | 1839 | lwz r7,KVM_HOST_LPID(r4) |
7a84084c PM |
1840 | BEGIN_FTR_SECTION |
1841 | ld r6,KVM_HOST_SDR1(r4) | |
de56a948 PM |
1842 | li r8,LPID_RSVD /* switch to reserved LPID */ |
1843 | mtspr SPRN_LPID,r8 | |
1844 | ptesync | |
7a84084c PM |
1845 | mtspr SPRN_SDR1,r6 /* switch to host page table */ |
1846 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | |
de56a948 PM |
1847 | mtspr SPRN_LPID,r7 |
1848 | isync | |
93b0f4dc | 1849 | |
b005255e | 1850 | BEGIN_FTR_SECTION |
88b02cf9 | 1851 | /* DPDES and VTB are shared between threads */ |
b005255e | 1852 | mfspr r7, SPRN_DPDES |
88b02cf9 | 1853 | mfspr r8, SPRN_VTB |
b005255e | 1854 | std r7, VCORE_DPDES(r5) |
88b02cf9 | 1855 | std r8, VCORE_VTB(r5) |
b005255e MN |
1856 | /* clear DPDES so we don't get guest doorbells in the host */ |
1857 | li r8, 0 | |
1858 | mtspr SPRN_DPDES, r8 | |
1859 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
1860 | ||
93b0f4dc | 1861 | /* Subtract timebase offset from timebase */ |
57b8daa7 | 1862 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
93b0f4dc PM |
1863 | cmpdi r8,0 |
1864 | beq 17f | |
57b8daa7 PM |
1865 | li r0, 0 |
1866 | std r0, VCORE_TB_OFFSET_APPL(r5) | |
c5fb80d3 | 1867 | mftb r6 /* current guest timebase */ |
93b0f4dc PM |
1868 | subf r8,r8,r6 |
1869 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | |
1870 | mftb r7 /* check if lower 24 bits overflowed */ | |
1871 | clrldi r6,r6,40 | |
1872 | clrldi r7,r7,40 | |
1873 | cmpld r7,r6 | |
1874 | bge 17f | |
1875 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | |
1876 | mtspr SPRN_TBU40,r8 | |
1877 | ||
df709a29 PM |
1878 | 17: |
1879 | /* | |
1880 | * If this is an HMI, we called kvmppc_realmode_hmi_handler | |
1881 | * above, which may or may not have already called | |
1882 | * kvmppc_subcore_exit_guest. Fortunately, all that | |
1883 | * kvmppc_subcore_exit_guest does is clear a flag, so calling | |
1884 | * it again here is benign even if kvmppc_realmode_hmi_handler | |
1885 | * has already called it. | |
1886 | */ | |
1887 | bl kvmppc_subcore_exit_guest | |
fd7bacbc MS |
1888 | nop |
1889 | 30: ld r5,HSTATE_KVM_VCORE(r13) | |
1890 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ | |
1891 | ||
388cc6e1 | 1892 | /* Reset PCR */ |
fd7bacbc | 1893 | ld r0, VCORE_PCR(r5) |
388cc6e1 PM |
1894 | cmpdi r0, 0 |
1895 | beq 18f | |
1896 | li r0, 0 | |
1897 | mtspr SPRN_PCR, r0 | |
1898 | 18: | |
93b0f4dc | 1899 | /* Signal secondary CPUs to continue */ |
371fefd6 | 1900 | stb r0,VCORE_IN_GUEST(r5) |
b4deba5c | 1901 | 19: lis r8,0x7fff /* MAX_INT@h */ |
de56a948 PM |
1902 | mtspr SPRN_HDEC,r8 |
1903 | ||
c0101509 PM |
1904 | 16: |
1905 | BEGIN_FTR_SECTION | |
1906 | /* On POWER9 with HPT-on-radix we need to wait for all other threads */ | |
1907 | ld r3, HSTATE_SPLIT_MODE(r13) | |
1908 | cmpdi r3, 0 | |
1909 | beq 47f | |
1910 | lwz r8, KVM_SPLIT_DO_RESTORE(r3) | |
1911 | cmpwi r8, 0 | |
1912 | beq 47f | |
c0101509 PM |
1913 | bl kvmhv_p9_restore_lpcr |
1914 | nop | |
c0101509 PM |
1915 | b 48f |
1916 | 47: | |
1917 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
1918 | ld r8,KVM_HOST_LPCR(r4) | |
de56a948 PM |
1919 | mtspr SPRN_LPCR,r8 |
1920 | isync | |
c0101509 | 1921 | 48: |
b6c295df PM |
1922 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1923 | /* Finish timing, if we have a vcpu */ | |
1924 | ld r4, HSTATE_KVM_VCPU(r13) | |
1925 | cmpdi r4, 0 | |
1926 | li r3, 0 | |
1927 | beq 2f | |
1928 | bl kvmhv_accumulate_time | |
1929 | 2: | |
1930 | #endif | |
44a3add8 PM |
1931 | /* Unset guest mode */ |
1932 | li r0, KVM_GUEST_MODE_NONE | |
1933 | stb r0, HSTATE_IN_GUEST(r13) | |
1934 | ||
a8b48a4d | 1935 | lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */ |
7ceaa6dc PM |
1936 | ld r0, SFS+PPC_LR_STKOFF(r1) |
1937 | addi r1, r1, SFS | |
218309b7 PM |
1938 | mtlr r0 |
1939 | blr | |
b4072df4 | 1940 | |
df709a29 PM |
1941 | kvmppc_guest_external: |
1942 | /* External interrupt, first check for host_ipi. If this is | |
1943 | * set, we know the host wants us out so let's do it now | |
1944 | */ | |
1945 | bl kvmppc_read_intr | |
1946 | ||
1947 | /* | |
1948 | * Restore the active volatile registers after returning from | |
1949 | * a C function. | |
1950 | */ | |
1951 | ld r9, HSTATE_KVM_VCPU(r13) | |
1952 | li r12, BOOK3S_INTERRUPT_EXTERNAL | |
1953 | ||
1954 | /* | |
1955 | * kvmppc_read_intr return codes: | |
1956 | * | |
1957 | * Exit to host (r3 > 0) | |
1958 | * 1 An interrupt is pending that needs to be handled by the host | |
1959 | * Exit guest and return to host by branching to guest_exit_cont | |
1960 | * | |
1961 | * 2 Passthrough that needs completion in the host | |
1962 | * Exit guest and return to host by branching to guest_exit_cont | |
1963 | * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD | |
1964 | * to indicate to the host to complete handling the interrupt | |
1965 | * | |
1966 | * Before returning to guest, we check if any CPU is heading out | |
1967 | * to the host and if so, we head out also. If no CPUs are heading | |
1968 | * check return values <= 0. | |
1969 | * | |
1970 | * Return to guest (r3 <= 0) | |
1971 | * 0 No external interrupt is pending | |
1972 | * -1 A guest wakeup IPI (which has now been cleared) | |
1973 | * In either case, we return to guest to deliver any pending | |
1974 | * guest interrupts. | |
1975 | * | |
1976 | * -2 A PCI passthrough external interrupt was handled | |
1977 | * (interrupt was delivered directly to guest) | |
1978 | * Return to guest to deliver any pending guest interrupts. | |
1979 | */ | |
1980 | ||
1981 | cmpdi r3, 1 | |
1982 | ble 1f | |
1983 | ||
1984 | /* Return code = 2 */ | |
1985 | li r12, BOOK3S_INTERRUPT_HV_RM_HARD | |
1986 | stw r12, VCPU_TRAP(r9) | |
1987 | b guest_exit_cont | |
1988 | ||
1989 | 1: /* Return code <= 1 */ | |
1990 | cmpdi r3, 0 | |
1991 | bgt guest_exit_cont | |
1992 | ||
1993 | /* Return code <= 0 */ | |
1994 | maybe_reenter_guest: | |
1995 | ld r5, HSTATE_KVM_VCORE(r13) | |
1996 | lwz r0, VCORE_ENTRY_EXIT(r5) | |
1997 | cmpwi r0, 0x100 | |
1998 | mr r4, r9 | |
1999 | blt deliver_guest_interrupt | |
2000 | b guest_exit_cont | |
2001 | ||
4bb3c7a0 PM |
2002 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
2003 | /* | |
2004 | * Softpatch interrupt for transactional memory emulation cases | |
2005 | * on POWER9 DD2.2. This is early in the guest exit path - we | |
2006 | * haven't saved registers or done a treclaim yet. | |
2007 | */ | |
2008 | kvmppc_tm_emul: | |
2009 | /* Save instruction image in HEIR */ | |
2010 | mfspr r3, SPRN_HEIR | |
2011 | stw r3, VCPU_HEIR(r9) | |
2012 | ||
2013 | /* | |
2014 | * The cases we want to handle here are those where the guest | |
2015 | * is in real suspend mode and is trying to transition to | |
2016 | * transactional mode. | |
2017 | */ | |
2018 | lbz r0, HSTATE_FAKE_SUSPEND(r13) | |
2019 | cmpwi r0, 0 /* keep exiting guest if in fake suspend */ | |
2020 | bne guest_exit_cont | |
2021 | rldicl r3, r11, 64 - MSR_TS_S_LG, 62 | |
2022 | cmpwi r3, 1 /* or if not in suspend state */ | |
2023 | bne guest_exit_cont | |
2024 | ||
2025 | /* Call C code to do the emulation */ | |
2026 | mr r3, r9 | |
2027 | bl kvmhv_p9_tm_emulation_early | |
2028 | nop | |
2029 | ld r9, HSTATE_KVM_VCPU(r13) | |
2030 | li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH | |
2031 | cmpwi r3, 0 | |
2032 | beq guest_exit_cont /* continue exiting if not handled */ | |
2033 | ld r10, VCPU_PC(r9) | |
2034 | ld r11, VCPU_MSR(r9) | |
2035 | b fast_interrupt_c_return /* go back to guest if handled */ | |
2036 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | |
2037 | ||
697d3899 PM |
2038 | /* |
2039 | * Check whether an HDSI is an HPTE not found fault or something else. | |
2040 | * If it is an HPTE not found fault that is due to the guest accessing | |
2041 | * a page that they have mapped but which we have paged out, then | |
2042 | * we continue on with the guest exit path. In all other cases, | |
2043 | * reflect the HDSI to the guest as a DSI. | |
2044 | */ | |
2045 | kvmppc_hdsi: | |
f4c51f84 PM |
2046 | ld r3, VCPU_KVM(r9) |
2047 | lbz r0, KVM_RADIX(r3) | |
697d3899 PM |
2048 | mfspr r4, SPRN_HDAR |
2049 | mfspr r6, SPRN_HDSISR | |
e001fa78 MN |
2050 | BEGIN_FTR_SECTION |
2051 | /* Look for DSISR canary. If we find it, retry instruction */ | |
2052 | cmpdi r6, 0x7fff | |
2053 | beq 6f | |
2054 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
2055 | cmpwi r0, 0 | |
f4c51f84 | 2056 | bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */ |
4cf302bc PM |
2057 | /* HPTE not found fault or protection fault? */ |
2058 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h | |
697d3899 | 2059 | beq 1f /* if not, send it to the guest */ |
4e5acdc2 PM |
2060 | andi. r0, r11, MSR_DR /* data relocation enabled? */ |
2061 | beq 3f | |
ef8c640c PM |
2062 | BEGIN_FTR_SECTION |
2063 | mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ | |
2064 | b 4f | |
2065 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
697d3899 | 2066 | clrrdi r0, r4, 28 |
c75df6f9 | 2067 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
cf29b215 PM |
2068 | li r0, BOOK3S_INTERRUPT_DATA_SEGMENT |
2069 | bne 7f /* if no SLB entry found */ | |
697d3899 PM |
2070 | 4: std r4, VCPU_FAULT_DAR(r9) |
2071 | stw r6, VCPU_FAULT_DSISR(r9) | |
2072 | ||
2073 | /* Search the hash table. */ | |
2074 | mr r3, r9 /* vcpu pointer */ | |
342d3db7 | 2075 | li r7, 1 /* data fault */ |
b1576fec | 2076 | bl kvmppc_hpte_hv_fault |
697d3899 PM |
2077 | ld r9, HSTATE_KVM_VCPU(r13) |
2078 | ld r10, VCPU_PC(r9) | |
2079 | ld r11, VCPU_MSR(r9) | |
2080 | li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
2081 | cmpdi r3, 0 /* retry the instruction */ | |
2082 | beq 6f | |
2083 | cmpdi r3, -1 /* handle in kernel mode */ | |
b4072df4 | 2084 | beq guest_exit_cont |
697d3899 PM |
2085 | cmpdi r3, -2 /* MMIO emulation; need instr word */ |
2086 | beq 2f | |
2087 | ||
cf29b215 | 2088 | /* Synthesize a DSI (or DSegI) for the guest */ |
697d3899 PM |
2089 | ld r4, VCPU_FAULT_DAR(r9) |
2090 | mr r6, r3 | |
cf29b215 | 2091 | 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE |
697d3899 | 2092 | mtspr SPRN_DSISR, r6 |
cf29b215 | 2093 | 7: mtspr SPRN_DAR, r4 |
697d3899 PM |
2094 | mtspr SPRN_SRR0, r10 |
2095 | mtspr SPRN_SRR1, r11 | |
cf29b215 | 2096 | mr r10, r0 |
e4e38121 | 2097 | bl kvmppc_msr_interrupt |
b4072df4 | 2098 | fast_interrupt_c_return: |
697d3899 | 2099 | 6: ld r7, VCPU_CTR(r9) |
c63517c2 | 2100 | ld r8, VCPU_XER(r9) |
697d3899 PM |
2101 | mtctr r7 |
2102 | mtxer r8 | |
2103 | mr r4, r9 | |
2104 | b fast_guest_return | |
2105 | ||
2106 | 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
2107 | ld r5, KVM_VRMA_SLB_V(r5) | |
2108 | b 4b | |
2109 | ||
2110 | /* If this is for emulated MMIO, load the instruction word */ | |
2111 | 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ | |
2112 | ||
2113 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
2114 | * we'll just continue at the next IP. */ | |
2115 | li r0, KVM_GUEST_MODE_SKIP | |
2116 | stb r0, HSTATE_IN_GUEST(r13) | |
2117 | ||
2118 | /* Do the access with MSR:DR enabled */ | |
2119 | mfmsr r3 | |
2120 | ori r4, r3, MSR_DR /* Enable paging for data */ | |
2121 | mtmsrd r4 | |
2122 | lwz r8, 0(r10) | |
2123 | mtmsrd r3 | |
2124 | ||
2125 | /* Store the result */ | |
2126 | stw r8, VCPU_LAST_INST(r9) | |
2127 | ||
2128 | /* Unset guest mode. */ | |
44a3add8 | 2129 | li r0, KVM_GUEST_MODE_HOST_HV |
697d3899 | 2130 | stb r0, HSTATE_IN_GUEST(r13) |
b4072df4 | 2131 | b guest_exit_cont |
de56a948 | 2132 | |
f4c51f84 PM |
2133 | .Lradix_hdsi: |
2134 | std r4, VCPU_FAULT_DAR(r9) | |
2135 | stw r6, VCPU_FAULT_DSISR(r9) | |
2136 | .Lradix_hisi: | |
2137 | mfspr r5, SPRN_ASDR | |
2138 | std r5, VCPU_FAULT_GPA(r9) | |
2139 | b guest_exit_cont | |
2140 | ||
342d3db7 PM |
2141 | /* |
2142 | * Similarly for an HISI, reflect it to the guest as an ISI unless | |
2143 | * it is an HPTE not found fault for a page that we have paged out. | |
2144 | */ | |
2145 | kvmppc_hisi: | |
f4c51f84 PM |
2146 | ld r3, VCPU_KVM(r9) |
2147 | lbz r0, KVM_RADIX(r3) | |
2148 | cmpwi r0, 0 | |
2149 | bne .Lradix_hisi /* for radix, just save ASDR */ | |
342d3db7 PM |
2150 | andis. r0, r11, SRR1_ISI_NOPT@h |
2151 | beq 1f | |
4e5acdc2 PM |
2152 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ |
2153 | beq 3f | |
ef8c640c PM |
2154 | BEGIN_FTR_SECTION |
2155 | mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */ | |
2156 | b 4f | |
2157 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
342d3db7 | 2158 | clrrdi r0, r10, 28 |
c75df6f9 | 2159 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
cf29b215 PM |
2160 | li r0, BOOK3S_INTERRUPT_INST_SEGMENT |
2161 | bne 7f /* if no SLB entry found */ | |
342d3db7 PM |
2162 | 4: |
2163 | /* Search the hash table. */ | |
2164 | mr r3, r9 /* vcpu pointer */ | |
2165 | mr r4, r10 | |
2166 | mr r6, r11 | |
2167 | li r7, 0 /* instruction fault */ | |
b1576fec | 2168 | bl kvmppc_hpte_hv_fault |
342d3db7 PM |
2169 | ld r9, HSTATE_KVM_VCPU(r13) |
2170 | ld r10, VCPU_PC(r9) | |
2171 | ld r11, VCPU_MSR(r9) | |
2172 | li r12, BOOK3S_INTERRUPT_H_INST_STORAGE | |
2173 | cmpdi r3, 0 /* retry the instruction */ | |
b4072df4 | 2174 | beq fast_interrupt_c_return |
342d3db7 | 2175 | cmpdi r3, -1 /* handle in kernel mode */ |
b4072df4 | 2176 | beq guest_exit_cont |
342d3db7 | 2177 | |
cf29b215 | 2178 | /* Synthesize an ISI (or ISegI) for the guest */ |
342d3db7 | 2179 | mr r11, r3 |
cf29b215 PM |
2180 | 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE |
2181 | 7: mtspr SPRN_SRR0, r10 | |
342d3db7 | 2182 | mtspr SPRN_SRR1, r11 |
cf29b215 | 2183 | mr r10, r0 |
e4e38121 | 2184 | bl kvmppc_msr_interrupt |
b4072df4 | 2185 | b fast_interrupt_c_return |
342d3db7 PM |
2186 | |
2187 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
2188 | ld r5, KVM_VRMA_SLB_V(r6) | |
2189 | b 4b | |
2190 | ||
a8606e20 PM |
2191 | /* |
2192 | * Try to handle an hcall in real mode. | |
2193 | * Returns to the guest if we handle it, or continues on up to | |
2194 | * the kernel if we can't (i.e. if we don't have a handler for | |
2195 | * it, or if the handler returns H_TOO_HARD). | |
1f09c3ed PM |
2196 | * |
2197 | * r5 - r8 contain hcall args, | |
2198 | * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca | |
a8606e20 | 2199 | */ |
a8606e20 | 2200 | hcall_try_real_mode: |
c75df6f9 | 2201 | ld r3,VCPU_GPR(R3)(r9) |
a8606e20 | 2202 | andi. r0,r11,MSR_PR |
27025a60 LPF |
2203 | /* sc 1 from userspace - reflect to guest syscall */ |
2204 | bne sc_1_fast_return | |
360cae31 PM |
2205 | /* sc 1 from nested guest - give it to L1 to handle */ |
2206 | ld r0, VCPU_NESTED(r9) | |
2207 | cmpdi r0, 0 | |
2208 | bne guest_exit_cont | |
a8606e20 PM |
2209 | clrrdi r3,r3,2 |
2210 | cmpldi r3,hcall_real_table_end - hcall_real_table | |
b4072df4 | 2211 | bge guest_exit_cont |
699a0ea0 PM |
2212 | /* See if this hcall is enabled for in-kernel handling */ |
2213 | ld r4, VCPU_KVM(r9) | |
2214 | srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ | |
2215 | sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ | |
2216 | add r4, r4, r0 | |
2217 | ld r0, KVM_ENABLED_HCALLS(r4) | |
2218 | rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ | |
2219 | srd r0, r0, r4 | |
2220 | andi. r0, r0, 1 | |
2221 | beq guest_exit_cont | |
2222 | /* Get pointer to handler, if any, and call it */ | |
a8606e20 | 2223 | LOAD_REG_ADDR(r4, hcall_real_table) |
4baa1d87 | 2224 | lwax r3,r3,r4 |
a8606e20 | 2225 | cmpwi r3,0 |
b4072df4 | 2226 | beq guest_exit_cont |
05a308c7 AB |
2227 | add r12,r3,r4 |
2228 | mtctr r12 | |
a8606e20 | 2229 | mr r3,r9 /* get vcpu pointer */ |
c75df6f9 | 2230 | ld r4,VCPU_GPR(R4)(r9) |
a8606e20 PM |
2231 | bctrl |
2232 | cmpdi r3,H_TOO_HARD | |
2233 | beq hcall_real_fallback | |
2234 | ld r4,HSTATE_KVM_VCPU(r13) | |
c75df6f9 | 2235 | std r3,VCPU_GPR(R3)(r4) |
a8606e20 PM |
2236 | ld r10,VCPU_PC(r4) |
2237 | ld r11,VCPU_MSR(r4) | |
2238 | b fast_guest_return | |
2239 | ||
27025a60 LPF |
2240 | sc_1_fast_return: |
2241 | mtspr SPRN_SRR0,r10 | |
2242 | mtspr SPRN_SRR1,r11 | |
2243 | li r10, BOOK3S_INTERRUPT_SYSCALL | |
e4e38121 | 2244 | bl kvmppc_msr_interrupt |
27025a60 LPF |
2245 | mr r4,r9 |
2246 | b fast_guest_return | |
2247 | ||
a8606e20 PM |
2248 | /* We've attempted a real mode hcall, but it's punted it back |
2249 | * to userspace. We need to restore some clobbered volatiles | |
2250 | * before resuming the pass-it-to-qemu path */ | |
2251 | hcall_real_fallback: | |
2252 | li r12,BOOK3S_INTERRUPT_SYSCALL | |
2253 | ld r9, HSTATE_KVM_VCPU(r13) | |
a8606e20 | 2254 | |
b4072df4 | 2255 | b guest_exit_cont |
a8606e20 PM |
2256 | |
2257 | .globl hcall_real_table | |
2258 | hcall_real_table: | |
2259 | .long 0 /* 0 - unused */ | |
c1fb0194 AB |
2260 | .long DOTSYM(kvmppc_h_remove) - hcall_real_table |
2261 | .long DOTSYM(kvmppc_h_enter) - hcall_real_table | |
2262 | .long DOTSYM(kvmppc_h_read) - hcall_real_table | |
cdeee518 PM |
2263 | .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table |
2264 | .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table | |
c1fb0194 | 2265 | .long DOTSYM(kvmppc_h_protect) - hcall_real_table |
e40542af | 2266 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
c1fb0194 | 2267 | .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table |
31217db7 | 2268 | .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table |
e40542af JN |
2269 | #else |
2270 | .long 0 /* 0x1c */ | |
2271 | .long 0 /* 0x20 */ | |
2272 | #endif | |
a8606e20 | 2273 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
c1fb0194 | 2274 | .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table |
eadfb1c5 | 2275 | .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table |
a8606e20 PM |
2276 | .long 0 /* 0x30 */ |
2277 | .long 0 /* 0x34 */ | |
2278 | .long 0 /* 0x38 */ | |
2279 | .long 0 /* 0x3c */ | |
2280 | .long 0 /* 0x40 */ | |
2281 | .long 0 /* 0x44 */ | |
2282 | .long 0 /* 0x48 */ | |
2283 | .long 0 /* 0x4c */ | |
2284 | .long 0 /* 0x50 */ | |
2285 | .long 0 /* 0x54 */ | |
2286 | .long 0 /* 0x58 */ | |
2287 | .long 0 /* 0x5c */ | |
2288 | .long 0 /* 0x60 */ | |
e7d26f28 | 2289 | #ifdef CONFIG_KVM_XICS |
c1fb0194 AB |
2290 | .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table |
2291 | .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table | |
2292 | .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table | |
5af50993 | 2293 | .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table |
c1fb0194 | 2294 | .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table |
e7d26f28 BH |
2295 | #else |
2296 | .long 0 /* 0x64 - H_EOI */ | |
2297 | .long 0 /* 0x68 - H_CPPR */ | |
2298 | .long 0 /* 0x6c - H_IPI */ | |
2299 | .long 0 /* 0x70 - H_IPOLL */ | |
2300 | .long 0 /* 0x74 - H_XIRR */ | |
2301 | #endif | |
a8606e20 PM |
2302 | .long 0 /* 0x78 */ |
2303 | .long 0 /* 0x7c */ | |
2304 | .long 0 /* 0x80 */ | |
2305 | .long 0 /* 0x84 */ | |
2306 | .long 0 /* 0x88 */ | |
2307 | .long 0 /* 0x8c */ | |
2308 | .long 0 /* 0x90 */ | |
2309 | .long 0 /* 0x94 */ | |
2310 | .long 0 /* 0x98 */ | |
2311 | .long 0 /* 0x9c */ | |
2312 | .long 0 /* 0xa0 */ | |
2313 | .long 0 /* 0xa4 */ | |
2314 | .long 0 /* 0xa8 */ | |
2315 | .long 0 /* 0xac */ | |
2316 | .long 0 /* 0xb0 */ | |
2317 | .long 0 /* 0xb4 */ | |
2318 | .long 0 /* 0xb8 */ | |
2319 | .long 0 /* 0xbc */ | |
2320 | .long 0 /* 0xc0 */ | |
2321 | .long 0 /* 0xc4 */ | |
2322 | .long 0 /* 0xc8 */ | |
2323 | .long 0 /* 0xcc */ | |
2324 | .long 0 /* 0xd0 */ | |
2325 | .long 0 /* 0xd4 */ | |
2326 | .long 0 /* 0xd8 */ | |
2327 | .long 0 /* 0xdc */ | |
c1fb0194 | 2328 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table |
90fd09f8 | 2329 | .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table |
a8606e20 PM |
2330 | .long 0 /* 0xe8 */ |
2331 | .long 0 /* 0xec */ | |
2332 | .long 0 /* 0xf0 */ | |
2333 | .long 0 /* 0xf4 */ | |
2334 | .long 0 /* 0xf8 */ | |
2335 | .long 0 /* 0xfc */ | |
2336 | .long 0 /* 0x100 */ | |
2337 | .long 0 /* 0x104 */ | |
2338 | .long 0 /* 0x108 */ | |
2339 | .long 0 /* 0x10c */ | |
2340 | .long 0 /* 0x110 */ | |
2341 | .long 0 /* 0x114 */ | |
2342 | .long 0 /* 0x118 */ | |
2343 | .long 0 /* 0x11c */ | |
2344 | .long 0 /* 0x120 */ | |
c1fb0194 | 2345 | .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table |
8563bf52 PM |
2346 | .long 0 /* 0x128 */ |
2347 | .long 0 /* 0x12c */ | |
2348 | .long 0 /* 0x130 */ | |
c1fb0194 | 2349 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table |
e40542af | 2350 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
31217db7 | 2351 | .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table |
d3695aa4 | 2352 | .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table |
e40542af JN |
2353 | #else |
2354 | .long 0 /* 0x138 */ | |
2355 | .long 0 /* 0x13c */ | |
2356 | #endif | |
e928e9cb ME |
2357 | .long 0 /* 0x140 */ |
2358 | .long 0 /* 0x144 */ | |
2359 | .long 0 /* 0x148 */ | |
2360 | .long 0 /* 0x14c */ | |
2361 | .long 0 /* 0x150 */ | |
2362 | .long 0 /* 0x154 */ | |
2363 | .long 0 /* 0x158 */ | |
2364 | .long 0 /* 0x15c */ | |
2365 | .long 0 /* 0x160 */ | |
2366 | .long 0 /* 0x164 */ | |
2367 | .long 0 /* 0x168 */ | |
2368 | .long 0 /* 0x16c */ | |
2369 | .long 0 /* 0x170 */ | |
2370 | .long 0 /* 0x174 */ | |
2371 | .long 0 /* 0x178 */ | |
2372 | .long 0 /* 0x17c */ | |
2373 | .long 0 /* 0x180 */ | |
2374 | .long 0 /* 0x184 */ | |
2375 | .long 0 /* 0x188 */ | |
2376 | .long 0 /* 0x18c */ | |
2377 | .long 0 /* 0x190 */ | |
2378 | .long 0 /* 0x194 */ | |
2379 | .long 0 /* 0x198 */ | |
2380 | .long 0 /* 0x19c */ | |
2381 | .long 0 /* 0x1a0 */ | |
2382 | .long 0 /* 0x1a4 */ | |
2383 | .long 0 /* 0x1a8 */ | |
2384 | .long 0 /* 0x1ac */ | |
2385 | .long 0 /* 0x1b0 */ | |
2386 | .long 0 /* 0x1b4 */ | |
2387 | .long 0 /* 0x1b8 */ | |
2388 | .long 0 /* 0x1bc */ | |
2389 | .long 0 /* 0x1c0 */ | |
2390 | .long 0 /* 0x1c4 */ | |
2391 | .long 0 /* 0x1c8 */ | |
2392 | .long 0 /* 0x1cc */ | |
2393 | .long 0 /* 0x1d0 */ | |
2394 | .long 0 /* 0x1d4 */ | |
2395 | .long 0 /* 0x1d8 */ | |
2396 | .long 0 /* 0x1dc */ | |
2397 | .long 0 /* 0x1e0 */ | |
2398 | .long 0 /* 0x1e4 */ | |
2399 | .long 0 /* 0x1e8 */ | |
2400 | .long 0 /* 0x1ec */ | |
2401 | .long 0 /* 0x1f0 */ | |
2402 | .long 0 /* 0x1f4 */ | |
2403 | .long 0 /* 0x1f8 */ | |
2404 | .long 0 /* 0x1fc */ | |
2405 | .long 0 /* 0x200 */ | |
2406 | .long 0 /* 0x204 */ | |
2407 | .long 0 /* 0x208 */ | |
2408 | .long 0 /* 0x20c */ | |
2409 | .long 0 /* 0x210 */ | |
2410 | .long 0 /* 0x214 */ | |
2411 | .long 0 /* 0x218 */ | |
2412 | .long 0 /* 0x21c */ | |
2413 | .long 0 /* 0x220 */ | |
2414 | .long 0 /* 0x224 */ | |
2415 | .long 0 /* 0x228 */ | |
2416 | .long 0 /* 0x22c */ | |
2417 | .long 0 /* 0x230 */ | |
2418 | .long 0 /* 0x234 */ | |
2419 | .long 0 /* 0x238 */ | |
2420 | .long 0 /* 0x23c */ | |
2421 | .long 0 /* 0x240 */ | |
2422 | .long 0 /* 0x244 */ | |
2423 | .long 0 /* 0x248 */ | |
2424 | .long 0 /* 0x24c */ | |
2425 | .long 0 /* 0x250 */ | |
2426 | .long 0 /* 0x254 */ | |
2427 | .long 0 /* 0x258 */ | |
2428 | .long 0 /* 0x25c */ | |
2429 | .long 0 /* 0x260 */ | |
2430 | .long 0 /* 0x264 */ | |
2431 | .long 0 /* 0x268 */ | |
2432 | .long 0 /* 0x26c */ | |
2433 | .long 0 /* 0x270 */ | |
2434 | .long 0 /* 0x274 */ | |
2435 | .long 0 /* 0x278 */ | |
2436 | .long 0 /* 0x27c */ | |
2437 | .long 0 /* 0x280 */ | |
2438 | .long 0 /* 0x284 */ | |
2439 | .long 0 /* 0x288 */ | |
2440 | .long 0 /* 0x28c */ | |
2441 | .long 0 /* 0x290 */ | |
2442 | .long 0 /* 0x294 */ | |
2443 | .long 0 /* 0x298 */ | |
2444 | .long 0 /* 0x29c */ | |
2445 | .long 0 /* 0x2a0 */ | |
2446 | .long 0 /* 0x2a4 */ | |
2447 | .long 0 /* 0x2a8 */ | |
2448 | .long 0 /* 0x2ac */ | |
2449 | .long 0 /* 0x2b0 */ | |
2450 | .long 0 /* 0x2b4 */ | |
2451 | .long 0 /* 0x2b8 */ | |
2452 | .long 0 /* 0x2bc */ | |
2453 | .long 0 /* 0x2c0 */ | |
2454 | .long 0 /* 0x2c4 */ | |
2455 | .long 0 /* 0x2c8 */ | |
2456 | .long 0 /* 0x2cc */ | |
2457 | .long 0 /* 0x2d0 */ | |
2458 | .long 0 /* 0x2d4 */ | |
2459 | .long 0 /* 0x2d8 */ | |
2460 | .long 0 /* 0x2dc */ | |
2461 | .long 0 /* 0x2e0 */ | |
2462 | .long 0 /* 0x2e4 */ | |
2463 | .long 0 /* 0x2e8 */ | |
2464 | .long 0 /* 0x2ec */ | |
2465 | .long 0 /* 0x2f0 */ | |
2466 | .long 0 /* 0x2f4 */ | |
2467 | .long 0 /* 0x2f8 */ | |
5af50993 BH |
2468 | #ifdef CONFIG_KVM_XICS |
2469 | .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table | |
2470 | #else | |
2471 | .long 0 /* 0x2fc - H_XIRR_X*/ | |
2472 | #endif | |
e928e9cb | 2473 | .long DOTSYM(kvmppc_h_random) - hcall_real_table |
ae2113a4 | 2474 | .globl hcall_real_table_end |
a8606e20 PM |
2475 | hcall_real_table_end: |
2476 | ||
8563bf52 | 2477 | _GLOBAL(kvmppc_h_set_xdabr) |
4bad7779 | 2478 | EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr) |
8563bf52 PM |
2479 | andi. r0, r5, DABRX_USER | DABRX_KERNEL |
2480 | beq 6f | |
2481 | li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI | |
2482 | andc. r0, r5, r0 | |
2483 | beq 3f | |
2484 | 6: li r3, H_PARAMETER | |
2485 | blr | |
2486 | ||
a8606e20 | 2487 | _GLOBAL(kvmppc_h_set_dabr) |
4bad7779 | 2488 | EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr) |
8563bf52 PM |
2489 | li r5, DABRX_USER | DABRX_KERNEL |
2490 | 3: | |
eee7ff9d MN |
2491 | BEGIN_FTR_SECTION |
2492 | b 2f | |
2493 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
a8606e20 | 2494 | std r4,VCPU_DABR(r3) |
8563bf52 PM |
2495 | stw r5, VCPU_DABRX(r3) |
2496 | mtspr SPRN_DABRX, r5 | |
8943633c PM |
2497 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
2498 | 1: mtspr SPRN_DABR,r4 | |
2499 | mfspr r5, SPRN_DABR | |
2500 | cmpd r4, r5 | |
2501 | bne 1b | |
2502 | isync | |
a8606e20 PM |
2503 | li r3,0 |
2504 | blr | |
2505 | ||
e8ebedbf | 2506 | 2: |
c1fe190c MN |
2507 | LOAD_REG_ADDR(r11, dawr_force_enable) |
2508 | lbz r11, 0(r11) | |
2509 | cmpdi r11, 0 | |
fabb2efc | 2510 | bne 3f |
ca9a16c3 | 2511 | li r3, H_HARDWARE |
fabb2efc MN |
2512 | blr |
2513 | 3: | |
8563bf52 | 2514 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ |
e8ebedbf | 2515 | rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW |
760a7364 | 2516 | rlwimi r5, r4, 2, DAWRX_WT |
8563bf52 PM |
2517 | clrrdi r4, r4, 3 |
2518 | std r4, VCPU_DAWR(r3) | |
2519 | std r5, VCPU_DAWRX(r3) | |
2520 | mtspr SPRN_DAWR, r4 | |
2521 | mtspr SPRN_DAWRX, r5 | |
2522 | li r3, 0 | |
a8606e20 PM |
2523 | blr |
2524 | ||
1f09c3ed | 2525 | _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ |
19ccb76a PM |
2526 | ori r11,r11,MSR_EE |
2527 | std r11,VCPU_MSR(r3) | |
2528 | li r0,1 | |
2529 | stb r0,VCPU_CEDED(r3) | |
2530 | sync /* order setting ceded vs. testing prodded */ | |
2531 | lbz r5,VCPU_PRODDED(r3) | |
2532 | cmpwi r5,0 | |
04f995a5 | 2533 | bne kvm_cede_prodded |
6af27c84 PM |
2534 | li r12,0 /* set trap to 0 to say hcall is handled */ |
2535 | stw r12,VCPU_TRAP(r3) | |
19ccb76a | 2536 | li r0,H_SUCCESS |
c75df6f9 | 2537 | std r0,VCPU_GPR(R3)(r3) |
19ccb76a PM |
2538 | |
2539 | /* | |
2540 | * Set our bit in the bitmask of napping threads unless all the | |
2541 | * other threads are already napping, in which case we send this | |
2542 | * up to the host. | |
2543 | */ | |
2544 | ld r5,HSTATE_KVM_VCORE(r13) | |
e0b7ec05 | 2545 | lbz r6,HSTATE_PTID(r13) |
19ccb76a PM |
2546 | lwz r8,VCORE_ENTRY_EXIT(r5) |
2547 | clrldi r8,r8,56 | |
2548 | li r0,1 | |
2549 | sld r0,r0,r6 | |
2550 | addi r6,r5,VCORE_NAPPING_THREADS | |
2551 | 31: lwarx r4,0,r6 | |
2552 | or r4,r4,r0 | |
7d6c40da PM |
2553 | cmpw r4,r8 |
2554 | beq kvm_cede_exit | |
19ccb76a PM |
2555 | stwcx. r4,0,r6 |
2556 | bne 31b | |
7d6c40da | 2557 | /* order napping_threads update vs testing entry_exit_map */ |
f019b7ad | 2558 | isync |
e0b7ec05 | 2559 | li r0,NAPPING_CEDE |
19ccb76a | 2560 | stb r0,HSTATE_NAPPING(r13) |
19ccb76a PM |
2561 | lwz r7,VCORE_ENTRY_EXIT(r5) |
2562 | cmpwi r7,0x100 | |
2563 | bge 33f /* another thread already exiting */ | |
2564 | ||
2565 | /* | |
2566 | * Although not specifically required by the architecture, POWER7 | |
2567 | * preserves the following registers in nap mode, even if an SMT mode | |
2568 | * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, | |
2569 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | |
2570 | */ | |
2571 | /* Save non-volatile GPRs */ | |
c75df6f9 MN |
2572 | std r14, VCPU_GPR(R14)(r3) |
2573 | std r15, VCPU_GPR(R15)(r3) | |
2574 | std r16, VCPU_GPR(R16)(r3) | |
2575 | std r17, VCPU_GPR(R17)(r3) | |
2576 | std r18, VCPU_GPR(R18)(r3) | |
2577 | std r19, VCPU_GPR(R19)(r3) | |
2578 | std r20, VCPU_GPR(R20)(r3) | |
2579 | std r21, VCPU_GPR(R21)(r3) | |
2580 | std r22, VCPU_GPR(R22)(r3) | |
2581 | std r23, VCPU_GPR(R23)(r3) | |
2582 | std r24, VCPU_GPR(R24)(r3) | |
2583 | std r25, VCPU_GPR(R25)(r3) | |
2584 | std r26, VCPU_GPR(R26)(r3) | |
2585 | std r27, VCPU_GPR(R27)(r3) | |
2586 | std r28, VCPU_GPR(R28)(r3) | |
2587 | std r29, VCPU_GPR(R29)(r3) | |
2588 | std r30, VCPU_GPR(R30)(r3) | |
2589 | std r31, VCPU_GPR(R31)(r3) | |
19ccb76a PM |
2590 | |
2591 | /* save FP state */ | |
595e4f7e | 2592 | bl kvmppc_save_fp |
19ccb76a | 2593 | |
93d17397 | 2594 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
4bb3c7a0 PM |
2595 | /* |
2596 | * Branch around the call if both CPU_FTR_TM and | |
2597 | * CPU_FTR_P9_TM_HV_ASSIST are off. | |
2598 | */ | |
93d17397 | 2599 | BEGIN_FTR_SECTION |
4bb3c7a0 PM |
2600 | b 91f |
2601 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |
67f8a8c1 | 2602 | /* |
7854f754 | 2603 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
67f8a8c1 | 2604 | */ |
6f597c6b SG |
2605 | ld r3, HSTATE_KVM_VCPU(r13) |
2606 | ld r4, VCPU_MSR(r3) | |
7854f754 | 2607 | li r5, 0 /* don't preserve non-vol regs */ |
7b0e827c | 2608 | bl kvmppc_save_tm_hv |
7854f754 | 2609 | nop |
4bb3c7a0 | 2610 | 91: |
93d17397 PM |
2611 | #endif |
2612 | ||
fd6d53b1 PM |
2613 | /* |
2614 | * Set DEC to the smaller of DEC and HDEC, so that we wake | |
2615 | * no later than the end of our timeslice (HDEC interrupts | |
2616 | * don't wake us from nap). | |
2617 | */ | |
2618 | mfspr r3, SPRN_DEC | |
2619 | mfspr r4, SPRN_HDEC | |
2620 | mftb r5 | |
1bc3fe81 PM |
2621 | BEGIN_FTR_SECTION |
2622 | /* On P9 check whether the guest has large decrementer mode enabled */ | |
2623 | ld r6, HSTATE_KVM_VCORE(r13) | |
2624 | ld r6, VCORE_LPCR(r6) | |
2625 | andis. r6, r6, LPCR_LD@h | |
2626 | bne 68f | |
2627 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
2f272463 | 2628 | extsw r3, r3 |
1bc3fe81 | 2629 | 68: EXTEND_HDEC(r4) |
2f272463 | 2630 | cmpd r3, r4 |
fd6d53b1 PM |
2631 | ble 67f |
2632 | mtspr SPRN_DEC, r4 | |
2633 | 67: | |
2634 | /* save expiry time of guest decrementer */ | |
fd6d53b1 PM |
2635 | add r3, r3, r5 |
2636 | ld r4, HSTATE_KVM_VCPU(r13) | |
2637 | ld r5, HSTATE_KVM_VCORE(r13) | |
57b8daa7 | 2638 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
fd6d53b1 PM |
2639 | subf r3, r6, r3 /* convert to host TB value */ |
2640 | std r3, VCPU_DEC_EXPIRES(r4) | |
2641 | ||
b6c295df PM |
2642 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
2643 | ld r4, HSTATE_KVM_VCPU(r13) | |
2644 | addi r3, r4, VCPU_TB_CEDE | |
2645 | bl kvmhv_accumulate_time | |
2646 | #endif | |
2647 | ||
ccc07772 PM |
2648 | lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */ |
2649 | ||
10d91611 NP |
2650 | /* Go back to host stack */ |
2651 | ld r1, HSTATE_HOST_R1(r13) | |
2652 | ||
19ccb76a | 2653 | /* |
aa31e843 | 2654 | * Take a nap until a decrementer or external or doobell interrupt |
ccc07772 | 2655 | * occurs, with PECE1 and PECE0 set in LPCR. |
66feed61 | 2656 | * On POWER8, set PECEDH, and if we are ceding, also set PECEDP. |
ccc07772 | 2657 | * Also clear the runlatch bit before napping. |
19ccb76a | 2658 | */ |
56548fc0 | 2659 | kvm_do_nap: |
1f09c3ed PM |
2660 | mfspr r0, SPRN_CTRLF |
2661 | clrrdi r0, r0, 1 | |
2662 | mtspr SPRN_CTRLT, r0 | |
582b910e | 2663 | |
f0888f70 PM |
2664 | li r0,1 |
2665 | stb r0,HSTATE_HWTHREAD_REQ(r13) | |
19ccb76a PM |
2666 | mfspr r5,SPRN_LPCR |
2667 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 | |
aa31e843 | 2668 | BEGIN_FTR_SECTION |
66feed61 | 2669 | ori r5, r5, LPCR_PECEDH |
ccc07772 | 2670 | rlwimi r5, r3, 0, LPCR_PECEDP |
aa31e843 | 2671 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
bf53c88e PM |
2672 | |
2673 | kvm_nap_sequence: /* desired LPCR value in r5 */ | |
2674 | BEGIN_FTR_SECTION | |
2675 | /* | |
2676 | * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset) | |
2677 | * enable state loss = 1 (allow SMT mode switch) | |
2678 | * requested level = 0 (just stop dispatching) | |
2679 | */ | |
2680 | lis r3, (PSSCR_EC | PSSCR_ESL)@h | |
bf53c88e PM |
2681 | /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */ |
2682 | li r4, LPCR_PECE_HVEE@higher | |
2683 | sldi r4, r4, 32 | |
2684 | or r5, r5, r4 | |
10d91611 NP |
2685 | FTR_SECTION_ELSE |
2686 | li r3, PNV_THREAD_NAP | |
2687 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) | |
19ccb76a PM |
2688 | mtspr SPRN_LPCR,r5 |
2689 | isync | |
10d91611 | 2690 | |
bf53c88e | 2691 | BEGIN_FTR_SECTION |
10d91611 | 2692 | bl isa300_idle_stop_mayloss |
bf53c88e | 2693 | FTR_SECTION_ELSE |
10d91611 NP |
2694 | bl isa206_idle_insn_mayloss |
2695 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) | |
2696 | ||
2697 | mfspr r0, SPRN_CTRLF | |
2698 | ori r0, r0, 1 | |
2699 | mtspr SPRN_CTRLT, r0 | |
2700 | ||
2701 | mtspr SPRN_SRR1, r3 | |
2702 | ||
2703 | li r0, 0 | |
2704 | stb r0, PACA_FTRACE_ENABLED(r13) | |
2705 | ||
2706 | li r0, KVM_HWTHREAD_IN_KVM | |
2707 | stb r0, HSTATE_HWTHREAD_STATE(r13) | |
2708 | ||
2709 | lbz r0, HSTATE_NAPPING(r13) | |
2710 | cmpwi r0, NAPPING_CEDE | |
2711 | beq kvm_end_cede | |
2712 | cmpwi r0, NAPPING_NOVCPU | |
2713 | beq kvm_novcpu_wakeup | |
2714 | cmpwi r0, NAPPING_UNSPLIT | |
2715 | beq kvm_unsplit_wakeup | |
2716 | twi 31,0,0 /* Nap state must not be zero */ | |
19ccb76a | 2717 | |
e3bbbbfa PM |
2718 | 33: mr r4, r3 |
2719 | li r3, 0 | |
2720 | li r12, 0 | |
2721 | b 34f | |
2722 | ||
19ccb76a | 2723 | kvm_end_cede: |
10d91611 NP |
2724 | /* Woken by external or decrementer interrupt */ |
2725 | ||
4619ac88 PM |
2726 | /* get vcpu pointer */ |
2727 | ld r4, HSTATE_KVM_VCPU(r13) | |
2728 | ||
b6c295df PM |
2729 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
2730 | addi r3, r4, VCPU_TB_RMINTR | |
2731 | bl kvmhv_accumulate_time | |
2732 | #endif | |
2733 | ||
93d17397 | 2734 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
4bb3c7a0 PM |
2735 | /* |
2736 | * Branch around the call if both CPU_FTR_TM and | |
2737 | * CPU_FTR_P9_TM_HV_ASSIST are off. | |
2738 | */ | |
93d17397 | 2739 | BEGIN_FTR_SECTION |
4bb3c7a0 PM |
2740 | b 91f |
2741 | END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0) | |
67f8a8c1 | 2742 | /* |
7854f754 | 2743 | * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR) |
67f8a8c1 | 2744 | */ |
6f597c6b SG |
2745 | mr r3, r4 |
2746 | ld r4, VCPU_MSR(r3) | |
7854f754 | 2747 | li r5, 0 /* don't preserve non-vol regs */ |
7b0e827c | 2748 | bl kvmppc_restore_tm_hv |
7854f754 | 2749 | nop |
6f597c6b | 2750 | ld r4, HSTATE_KVM_VCPU(r13) |
4bb3c7a0 | 2751 | 91: |
93d17397 PM |
2752 | #endif |
2753 | ||
19ccb76a PM |
2754 | /* load up FP state */ |
2755 | bl kvmppc_load_fp | |
2756 | ||
fd6d53b1 PM |
2757 | /* Restore guest decrementer */ |
2758 | ld r3, VCPU_DEC_EXPIRES(r4) | |
2759 | ld r5, HSTATE_KVM_VCORE(r13) | |
57b8daa7 | 2760 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
fd6d53b1 PM |
2761 | add r3, r3, r6 /* convert host TB to guest TB value */ |
2762 | mftb r7 | |
2763 | subf r3, r7, r3 | |
2764 | mtspr SPRN_DEC, r3 | |
2765 | ||
19ccb76a | 2766 | /* Load NV GPRS */ |
c75df6f9 MN |
2767 | ld r14, VCPU_GPR(R14)(r4) |
2768 | ld r15, VCPU_GPR(R15)(r4) | |
2769 | ld r16, VCPU_GPR(R16)(r4) | |
2770 | ld r17, VCPU_GPR(R17)(r4) | |
2771 | ld r18, VCPU_GPR(R18)(r4) | |
2772 | ld r19, VCPU_GPR(R19)(r4) | |
2773 | ld r20, VCPU_GPR(R20)(r4) | |
2774 | ld r21, VCPU_GPR(R21)(r4) | |
2775 | ld r22, VCPU_GPR(R22)(r4) | |
2776 | ld r23, VCPU_GPR(R23)(r4) | |
2777 | ld r24, VCPU_GPR(R24)(r4) | |
2778 | ld r25, VCPU_GPR(R25)(r4) | |
2779 | ld r26, VCPU_GPR(R26)(r4) | |
2780 | ld r27, VCPU_GPR(R27)(r4) | |
2781 | ld r28, VCPU_GPR(R28)(r4) | |
2782 | ld r29, VCPU_GPR(R29)(r4) | |
2783 | ld r30, VCPU_GPR(R30)(r4) | |
2784 | ld r31, VCPU_GPR(R31)(r4) | |
37f55d30 | 2785 | |
e3bbbbfa PM |
2786 | /* Check the wake reason in SRR1 to see why we got here */ |
2787 | bl kvmppc_check_wake_reason | |
19ccb76a | 2788 | |
37f55d30 SW |
2789 | /* |
2790 | * Restore volatile registers since we could have called a | |
2791 | * C routine in kvmppc_check_wake_reason | |
2792 | * r4 = VCPU | |
2793 | * r3 tells us whether we need to return to host or not | |
2794 | * WARNING: it gets checked further down: | |
2795 | * should not modify r3 until this check is done. | |
2796 | */ | |
2797 | ld r4, HSTATE_KVM_VCPU(r13) | |
2798 | ||
19ccb76a | 2799 | /* clear our bit in vcore->napping_threads */ |
e3bbbbfa PM |
2800 | 34: ld r5,HSTATE_KVM_VCORE(r13) |
2801 | lbz r7,HSTATE_PTID(r13) | |
19ccb76a | 2802 | li r0,1 |
e3bbbbfa | 2803 | sld r0,r0,r7 |
19ccb76a PM |
2804 | addi r6,r5,VCORE_NAPPING_THREADS |
2805 | 32: lwarx r7,0,r6 | |
2806 | andc r7,r7,r0 | |
2807 | stwcx. r7,0,r6 | |
2808 | bne 32b | |
2809 | li r0,0 | |
2810 | stb r0,HSTATE_NAPPING(r13) | |
2811 | ||
37f55d30 | 2812 | /* See if the wake reason saved in r3 means we need to exit */ |
e3bbbbfa | 2813 | stw r12, VCPU_TRAP(r4) |
4619ac88 | 2814 | mr r9, r4 |
e3bbbbfa PM |
2815 | cmpdi r3, 0 |
2816 | bgt guest_exit_cont | |
df709a29 | 2817 | b maybe_reenter_guest |
19ccb76a PM |
2818 | |
2819 | /* cede when already previously prodded case */ | |
04f995a5 PM |
2820 | kvm_cede_prodded: |
2821 | li r0,0 | |
19ccb76a PM |
2822 | stb r0,VCPU_PRODDED(r3) |
2823 | sync /* order testing prodded vs. clearing ceded */ | |
2824 | stb r0,VCPU_CEDED(r3) | |
2825 | li r3,H_SUCCESS | |
2826 | blr | |
2827 | ||
2828 | /* we've ceded but we want to give control to the host */ | |
04f995a5 | 2829 | kvm_cede_exit: |
6af27c84 | 2830 | ld r9, HSTATE_KVM_VCPU(r13) |
9b9b13a6 BH |
2831 | #ifdef CONFIG_KVM_XICS |
2832 | /* Abort if we still have a pending escalation */ | |
2833 | lbz r5, VCPU_XIVE_ESC_ON(r9) | |
2834 | cmpwi r5, 0 | |
2835 | beq 1f | |
2836 | li r0, 0 | |
2837 | stb r0, VCPU_CEDED(r9) | |
2838 | 1: /* Enable XIVE escalation */ | |
2839 | li r5, XIVE_ESB_SET_PQ_00 | |
2840 | mfmsr r0 | |
2841 | andi. r0, r0, MSR_DR /* in real mode? */ | |
2842 | beq 1f | |
2843 | ld r10, VCPU_XIVE_ESC_VADDR(r9) | |
2844 | cmpdi r10, 0 | |
2845 | beq 3f | |
2846 | ldx r0, r10, r5 | |
2847 | b 2f | |
2848 | 1: ld r10, VCPU_XIVE_ESC_RADDR(r9) | |
2849 | cmpdi r10, 0 | |
2850 | beq 3f | |
2851 | ldcix r0, r10, r5 | |
2852 | 2: sync | |
2853 | li r0, 1 | |
2854 | stb r0, VCPU_XIVE_ESC_ON(r9) | |
2855 | #endif /* CONFIG_KVM_XICS */ | |
2856 | 3: b guest_exit_cont | |
19ccb76a | 2857 | |
884dfb72 | 2858 | /* Try to do machine check recovery in real mode */ |
b4072df4 PM |
2859 | machine_check_realmode: |
2860 | mr r3, r9 /* get vcpu pointer */ | |
b1576fec | 2861 | bl kvmppc_realmode_machine_check |
b4072df4 | 2862 | nop |
884dfb72 | 2863 | /* all machine checks go to virtual mode for further handling */ |
b4072df4 PM |
2864 | ld r9, HSTATE_KVM_VCPU(r13) |
2865 | li r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
884dfb72 | 2866 | b guest_exit_cont |
b4072df4 | 2867 | |
df709a29 PM |
2868 | /* |
2869 | * Call C code to handle a HMI in real mode. | |
2870 | * Only the primary thread does the call, secondary threads are handled | |
2871 | * by calling hmi_exception_realmode() after kvmppc_hv_entry returns. | |
2872 | * r9 points to the vcpu on entry | |
2873 | */ | |
2874 | hmi_realmode: | |
2875 | lbz r0, HSTATE_PTID(r13) | |
2876 | cmpwi r0, 0 | |
2877 | bne guest_exit_cont | |
2878 | bl kvmppc_realmode_hmi_handler | |
2879 | ld r9, HSTATE_KVM_VCPU(r13) | |
2880 | li r12, BOOK3S_INTERRUPT_HMI | |
2881 | b guest_exit_cont | |
2882 | ||
e3bbbbfa PM |
2883 | /* |
2884 | * Check the reason we woke from nap, and take appropriate action. | |
1f09c3ed | 2885 | * Returns (in r3): |
e3bbbbfa PM |
2886 | * 0 if nothing needs to be done |
2887 | * 1 if something happened that needs to be handled by the host | |
66feed61 | 2888 | * -1 if there was a guest wakeup (IPI or msgsnd) |
e3c13e56 SW |
2889 | * -2 if we handled a PCI passthrough interrupt (returned by |
2890 | * kvmppc_read_intr only) | |
e3bbbbfa PM |
2891 | * |
2892 | * Also sets r12 to the interrupt vector for any interrupt that needs | |
2893 | * to be handled now by the host (0x500 for external interrupt), or zero. | |
37f55d30 SW |
2894 | * Modifies all volatile registers (since it may call a C function). |
2895 | * This routine calls kvmppc_read_intr, a C function, if an external | |
2896 | * interrupt is pending. | |
e3bbbbfa PM |
2897 | */ |
2898 | kvmppc_check_wake_reason: | |
2899 | mfspr r6, SPRN_SRR1 | |
aa31e843 PM |
2900 | BEGIN_FTR_SECTION |
2901 | rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ | |
2902 | FTR_SECTION_ELSE | |
2903 | rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ | |
2904 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) | |
2905 | cmpwi r6, 8 /* was it an external interrupt? */ | |
37f55d30 | 2906 | beq 7f /* if so, see what it was */ |
e3bbbbfa PM |
2907 | li r3, 0 |
2908 | li r12, 0 | |
2909 | cmpwi r6, 6 /* was it the decrementer? */ | |
2910 | beq 0f | |
aa31e843 PM |
2911 | BEGIN_FTR_SECTION |
2912 | cmpwi r6, 5 /* privileged doorbell? */ | |
2913 | beq 0f | |
5d00f66b PM |
2914 | cmpwi r6, 3 /* hypervisor doorbell? */ |
2915 | beq 3f | |
aa31e843 | 2916 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
fd7bacbc MS |
2917 | cmpwi r6, 0xa /* Hypervisor maintenance ? */ |
2918 | beq 4f | |
e3bbbbfa PM |
2919 | li r3, 1 /* anything else, return 1 */ |
2920 | 0: blr | |
2921 | ||
5d00f66b PM |
2922 | /* hypervisor doorbell */ |
2923 | 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL | |
70aa3961 GS |
2924 | |
2925 | /* | |
2926 | * Clear the doorbell as we will invoke the handler | |
2927 | * explicitly in the guest exit path. | |
2928 | */ | |
2929 | lis r6, (PPC_DBELL_SERVER << (63-36))@h | |
2930 | PPC_MSGCLR(6) | |
66feed61 | 2931 | /* see if it's a host IPI */ |
5d00f66b | 2932 | li r3, 1 |
2cde3716 NP |
2933 | BEGIN_FTR_SECTION |
2934 | PPC_MSGSYNC | |
2935 | lwsync | |
2936 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |
66feed61 PM |
2937 | lbz r0, HSTATE_HOST_IPI(r13) |
2938 | cmpwi r0, 0 | |
2939 | bnelr | |
70aa3961 | 2940 | /* if not, return -1 */ |
66feed61 | 2941 | li r3, -1 |
5d00f66b PM |
2942 | blr |
2943 | ||
fd7bacbc MS |
2944 | /* Woken up due to Hypervisor maintenance interrupt */ |
2945 | 4: li r12, BOOK3S_INTERRUPT_HMI | |
2946 | li r3, 1 | |
2947 | blr | |
2948 | ||
37f55d30 SW |
2949 | /* external interrupt - create a stack frame so we can call C */ |
2950 | 7: mflr r0 | |
2951 | std r0, PPC_LR_STKOFF(r1) | |
2952 | stdu r1, -PPC_MIN_STKFRM(r1) | |
2953 | bl kvmppc_read_intr | |
2954 | nop | |
2955 | li r12, BOOK3S_INTERRUPT_EXTERNAL | |
f7af5209 SW |
2956 | cmpdi r3, 1 |
2957 | ble 1f | |
2958 | ||
2959 | /* | |
2960 | * Return code of 2 means PCI passthrough interrupt, but | |
2961 | * we need to return back to host to complete handling the | |
2962 | * interrupt. Trap reason is expected in r12 by guest | |
2963 | * exit code. | |
2964 | */ | |
2965 | li r12, BOOK3S_INTERRUPT_HV_RM_HARD | |
2966 | 1: | |
37f55d30 SW |
2967 | ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) |
2968 | addi r1, r1, PPC_MIN_STKFRM | |
2969 | mtlr r0 | |
2970 | blr | |
371fefd6 | 2971 | |
de56a948 PM |
2972 | /* |
2973 | * Save away FP, VMX and VSX registers. | |
2974 | * r3 = vcpu pointer | |
595e4f7e PM |
2975 | * N.B. r30 and r31 are volatile across this function, |
2976 | * thus it is not callable from C. | |
a8606e20 | 2977 | */ |
595e4f7e PM |
2978 | kvmppc_save_fp: |
2979 | mflr r30 | |
2980 | mr r31,r3 | |
8943633c PM |
2981 | mfmsr r5 |
2982 | ori r8,r5,MSR_FP | |
de56a948 PM |
2983 | #ifdef CONFIG_ALTIVEC |
2984 | BEGIN_FTR_SECTION | |
2985 | oris r8,r8,MSR_VEC@h | |
2986 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
2987 | #endif | |
2988 | #ifdef CONFIG_VSX | |
2989 | BEGIN_FTR_SECTION | |
2990 | oris r8,r8,MSR_VSX@h | |
2991 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
2992 | #endif | |
2993 | mtmsrd r8 | |
595e4f7e | 2994 | addi r3,r3,VCPU_FPRS |
9bf163f8 | 2995 | bl store_fp_state |
de56a948 PM |
2996 | #ifdef CONFIG_ALTIVEC |
2997 | BEGIN_FTR_SECTION | |
595e4f7e | 2998 | addi r3,r31,VCPU_VRS |
9bf163f8 | 2999 | bl store_vr_state |
de56a948 PM |
3000 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
3001 | #endif | |
3002 | mfspr r6,SPRN_VRSAVE | |
e724f080 | 3003 | stw r6,VCPU_VRSAVE(r31) |
595e4f7e | 3004 | mtlr r30 |
de56a948 PM |
3005 | blr |
3006 | ||
3007 | /* | |
3008 | * Load up FP, VMX and VSX registers | |
3009 | * r4 = vcpu pointer | |
595e4f7e PM |
3010 | * N.B. r30 and r31 are volatile across this function, |
3011 | * thus it is not callable from C. | |
de56a948 | 3012 | */ |
de56a948 | 3013 | kvmppc_load_fp: |
595e4f7e PM |
3014 | mflr r30 |
3015 | mr r31,r4 | |
de56a948 PM |
3016 | mfmsr r9 |
3017 | ori r8,r9,MSR_FP | |
3018 | #ifdef CONFIG_ALTIVEC | |
3019 | BEGIN_FTR_SECTION | |
3020 | oris r8,r8,MSR_VEC@h | |
3021 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
3022 | #endif | |
3023 | #ifdef CONFIG_VSX | |
3024 | BEGIN_FTR_SECTION | |
3025 | oris r8,r8,MSR_VSX@h | |
3026 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
3027 | #endif | |
3028 | mtmsrd r8 | |
595e4f7e | 3029 | addi r3,r4,VCPU_FPRS |
9bf163f8 | 3030 | bl load_fp_state |
de56a948 PM |
3031 | #ifdef CONFIG_ALTIVEC |
3032 | BEGIN_FTR_SECTION | |
595e4f7e | 3033 | addi r3,r31,VCPU_VRS |
9bf163f8 | 3034 | bl load_vr_state |
de56a948 PM |
3035 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
3036 | #endif | |
e724f080 | 3037 | lwz r7,VCPU_VRSAVE(r31) |
de56a948 | 3038 | mtspr SPRN_VRSAVE,r7 |
595e4f7e PM |
3039 | mtlr r30 |
3040 | mr r4,r31 | |
de56a948 | 3041 | blr |
44a3add8 | 3042 | |
f024ee09 PM |
3043 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
3044 | /* | |
3045 | * Save transactional state and TM-related registers. | |
6f597c6b SG |
3046 | * Called with r3 pointing to the vcpu struct and r4 containing |
3047 | * the guest MSR value. | |
7854f754 PM |
3048 | * r5 is non-zero iff non-volatile register state needs to be maintained. |
3049 | * If r5 == 0, this can modify all checkpointed registers, but | |
6f597c6b | 3050 | * restores r1 and r2 before exit. |
f024ee09 | 3051 | */ |
7854f754 PM |
3052 | _GLOBAL_TOC(kvmppc_save_tm_hv) |
3053 | EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv) | |
7b0e827c PM |
3054 | /* See if we need to handle fake suspend mode */ |
3055 | BEGIN_FTR_SECTION | |
caa3be92 | 3056 | b __kvmppc_save_tm |
7b0e827c PM |
3057 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) |
3058 | ||
3059 | lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */ | |
3060 | cmpwi r0, 0 | |
caa3be92 | 3061 | beq __kvmppc_save_tm |
7b0e827c PM |
3062 | |
3063 | /* The following code handles the fake_suspend = 1 case */ | |
f024ee09 PM |
3064 | mflr r0 |
3065 | std r0, PPC_LR_STKOFF(r1) | |
87a11bb6 | 3066 | stdu r1, -PPC_MIN_STKFRM(r1) |
f024ee09 PM |
3067 | |
3068 | /* Turn on TM. */ | |
3069 | mfmsr r8 | |
3070 | li r0, 1 | |
3071 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | |
3072 | mtmsrd r8 | |
3073 | ||
87a11bb6 SJS |
3074 | rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */ |
3075 | beq 4f | |
7b0e827c | 3076 | BEGIN_FTR_SECTION |
87a11bb6 | 3077 | bl pnv_power9_force_smt4_catch |
7b0e827c | 3078 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
87a11bb6 | 3079 | nop |
4bb3c7a0 | 3080 | |
7b0e827c PM |
3081 | /* We have to treclaim here because that's the only way to do S->N */ |
3082 | li r3, TM_CAUSE_KVM_RESCHED | |
f024ee09 PM |
3083 | TRECLAIM(R3) |
3084 | ||
4bb3c7a0 PM |
3085 | /* |
3086 | * We were in fake suspend, so we are not going to save the | |
3087 | * register state as the guest checkpointed state (since | |
3088 | * we already have it), therefore we can now use any volatile GPR. | |
7854f754 PM |
3089 | * In fact treclaim in fake suspend state doesn't modify |
3090 | * any registers. | |
4bb3c7a0 | 3091 | */ |
7b0e827c | 3092 | |
7854f754 | 3093 | BEGIN_FTR_SECTION |
87a11bb6 | 3094 | bl pnv_power9_force_smt4_release |
7854f754 | 3095 | END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG) |
87a11bb6 SJS |
3096 | nop |
3097 | ||
3098 | 4: | |
4bb3c7a0 PM |
3099 | mfspr r3, SPRN_PSSCR |
3100 | /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */ | |
3101 | li r0, PSSCR_FAKE_SUSPEND | |
3102 | andc r3, r3, r0 | |
3103 | mtspr SPRN_PSSCR, r3 | |
4bb3c7a0 | 3104 | |
681c617b | 3105 | /* Don't save TEXASR, use value from last exit in real suspend state */ |
f024ee09 | 3106 | ld r9, HSTATE_KVM_VCPU(r13) |
f024ee09 PM |
3107 | mfspr r5, SPRN_TFHAR |
3108 | mfspr r6, SPRN_TFIAR | |
f024ee09 PM |
3109 | std r5, VCPU_TFHAR(r9) |
3110 | std r6, VCPU_TFIAR(r9) | |
f024ee09 | 3111 | |
87a11bb6 | 3112 | addi r1, r1, PPC_MIN_STKFRM |
f024ee09 PM |
3113 | ld r0, PPC_LR_STKOFF(r1) |
3114 | mtlr r0 | |
3115 | blr | |
3116 | ||
3117 | /* | |
3118 | * Restore transactional state and TM-related registers. | |
6f597c6b SG |
3119 | * Called with r3 pointing to the vcpu struct |
3120 | * and r4 containing the guest MSR value. | |
7854f754 | 3121 | * r5 is non-zero iff non-volatile register state needs to be maintained. |
f024ee09 | 3122 | * This potentially modifies all checkpointed registers. |
6f597c6b | 3123 | * It restores r1 and r2 from the PACA. |
f024ee09 | 3124 | */ |
7854f754 PM |
3125 | _GLOBAL_TOC(kvmppc_restore_tm_hv) |
3126 | EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv) | |
7b0e827c PM |
3127 | /* |
3128 | * If we are doing TM emulation for the guest on a POWER9 DD2, | |
3129 | * then we don't actually do a trechkpt -- we either set up | |
3130 | * fake-suspend mode, or emulate a TM rollback. | |
3131 | */ | |
3132 | BEGIN_FTR_SECTION | |
caa3be92 | 3133 | b __kvmppc_restore_tm |
7b0e827c | 3134 | END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST) |
f024ee09 PM |
3135 | mflr r0 |
3136 | std r0, PPC_LR_STKOFF(r1) | |
3137 | ||
7b0e827c PM |
3138 | li r0, 0 |
3139 | stb r0, HSTATE_FAKE_SUSPEND(r13) | |
3140 | ||
3141 | /* Turn on TM so we can restore TM SPRs */ | |
f024ee09 | 3142 | mfmsr r5 |
7b0e827c PM |
3143 | li r0, 1 |
3144 | rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG | |
f024ee09 PM |
3145 | mtmsrd r5 |
3146 | ||
3147 | /* | |
3148 | * The user may change these outside of a transaction, so they must | |
3149 | * always be context switched. | |
3150 | */ | |
6f597c6b SG |
3151 | ld r5, VCPU_TFHAR(r3) |
3152 | ld r6, VCPU_TFIAR(r3) | |
3153 | ld r7, VCPU_TEXASR(r3) | |
f024ee09 PM |
3154 | mtspr SPRN_TFHAR, r5 |
3155 | mtspr SPRN_TFIAR, r6 | |
3156 | mtspr SPRN_TEXASR, r7 | |
3157 | ||
6f597c6b | 3158 | rldicl. r5, r4, 64 - MSR_TS_S_LG, 62 |
f024ee09 | 3159 | beqlr /* TM not active in guest */ |
f024ee09 | 3160 | |
7b0e827c | 3161 | /* Make sure the failure summary is set */ |
f024ee09 PM |
3162 | oris r7, r7, (TEXASR_FS)@h |
3163 | mtspr SPRN_TEXASR, r7 | |
3164 | ||
4bb3c7a0 PM |
3165 | cmpwi r5, 1 /* check for suspended state */ |
3166 | bgt 10f | |
3167 | stb r5, HSTATE_FAKE_SUSPEND(r13) | |
7b0e827c | 3168 | b 9f /* and return */ |
4bb3c7a0 PM |
3169 | 10: stdu r1, -PPC_MIN_STKFRM(r1) |
3170 | /* guest is in transactional state, so simulate rollback */ | |
4bb3c7a0 PM |
3171 | bl kvmhv_emulate_tm_rollback |
3172 | nop | |
4bb3c7a0 | 3173 | addi r1, r1, PPC_MIN_STKFRM |
7b0e827c PM |
3174 | 9: ld r0, PPC_LR_STKOFF(r1) |
3175 | mtlr r0 | |
3176 | blr | |
7b0e827c | 3177 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
f024ee09 | 3178 | |
44a3add8 PM |
3179 | /* |
3180 | * We come here if we get any exception or interrupt while we are | |
3181 | * executing host real mode code while in guest MMU context. | |
857b99e1 PM |
3182 | * r12 is (CR << 32) | vector |
3183 | * r13 points to our PACA | |
3184 | * r12 is saved in HSTATE_SCRATCH0(r13) | |
3185 | * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE | |
3186 | * r9 is saved in HSTATE_SCRATCH2(r13) | |
3187 | * r13 is saved in HSPRG1 | |
3188 | * cfar is saved in HSTATE_CFAR(r13) | |
3189 | * ppr is saved in HSTATE_PPR(r13) | |
44a3add8 PM |
3190 | */ |
3191 | kvmppc_bad_host_intr: | |
857b99e1 PM |
3192 | /* |
3193 | * Switch to the emergency stack, but start half-way down in | |
3194 | * case we were already on it. | |
3195 | */ | |
3196 | mr r9, r1 | |
3197 | std r1, PACAR1(r13) | |
3198 | ld r1, PACAEMERGSP(r13) | |
3199 | subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE | |
3200 | std r9, 0(r1) | |
3201 | std r0, GPR0(r1) | |
3202 | std r9, GPR1(r1) | |
3203 | std r2, GPR2(r1) | |
3204 | SAVE_4GPRS(3, r1) | |
3205 | SAVE_2GPRS(7, r1) | |
3206 | srdi r0, r12, 32 | |
3207 | clrldi r12, r12, 32 | |
3208 | std r0, _CCR(r1) | |
3209 | std r12, _TRAP(r1) | |
3210 | andi. r0, r12, 2 | |
3211 | beq 1f | |
3212 | mfspr r3, SPRN_HSRR0 | |
3213 | mfspr r4, SPRN_HSRR1 | |
3214 | mfspr r5, SPRN_HDAR | |
3215 | mfspr r6, SPRN_HDSISR | |
3216 | b 2f | |
3217 | 1: mfspr r3, SPRN_SRR0 | |
3218 | mfspr r4, SPRN_SRR1 | |
3219 | mfspr r5, SPRN_DAR | |
3220 | mfspr r6, SPRN_DSISR | |
3221 | 2: std r3, _NIP(r1) | |
3222 | std r4, _MSR(r1) | |
3223 | std r5, _DAR(r1) | |
3224 | std r6, _DSISR(r1) | |
3225 | ld r9, HSTATE_SCRATCH2(r13) | |
3226 | ld r12, HSTATE_SCRATCH0(r13) | |
3227 | GET_SCRATCH0(r0) | |
3228 | SAVE_4GPRS(9, r1) | |
3229 | std r0, GPR13(r1) | |
3230 | SAVE_NVGPRS(r1) | |
3231 | ld r5, HSTATE_CFAR(r13) | |
3232 | std r5, ORIG_GPR3(r1) | |
3233 | mflr r3 | |
3234 | #ifdef CONFIG_RELOCATABLE | |
3235 | ld r4, HSTATE_SCRATCH1(r13) | |
3236 | #else | |
3237 | mfctr r4 | |
3238 | #endif | |
3239 | mfxer r5 | |
4e26bc4a | 3240 | lbz r6, PACAIRQSOFTMASK(r13) |
857b99e1 PM |
3241 | std r3, _LINK(r1) |
3242 | std r4, _CTR(r1) | |
3243 | std r5, _XER(r1) | |
3244 | std r6, SOFTE(r1) | |
3245 | ld r2, PACATOC(r13) | |
3246 | LOAD_REG_IMMEDIATE(3, 0x7265677368657265) | |
3247 | std r3, STACK_FRAME_OVERHEAD-16(r1) | |
3248 | ||
3249 | /* | |
3250 | * On POWER9 do a minimal restore of the MMU and call C code, | |
3251 | * which will print a message and panic. | |
3252 | * XXX On POWER7 and POWER8, we just spin here since we don't | |
3253 | * know what the other threads are doing (and we don't want to | |
3254 | * coordinate with them) - but at least we now have register state | |
3255 | * in memory that we might be able to look at from another CPU. | |
3256 | */ | |
3257 | BEGIN_FTR_SECTION | |
44a3add8 | 3258 | b . |
857b99e1 PM |
3259 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) |
3260 | ld r9, HSTATE_KVM_VCPU(r13) | |
3261 | ld r10, VCPU_KVM(r9) | |
3262 | ||
3263 | li r0, 0 | |
3264 | mtspr SPRN_AMR, r0 | |
3265 | mtspr SPRN_IAMR, r0 | |
3266 | mtspr SPRN_CIABR, r0 | |
3267 | mtspr SPRN_DAWRX, r0 | |
3268 | ||
857b99e1 PM |
3269 | BEGIN_MMU_FTR_SECTION |
3270 | b 4f | |
3271 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |
3272 | ||
3273 | slbmte r0, r0 | |
3274 | slbia | |
3275 | ptesync | |
3276 | ld r8, PACA_SLBSHADOWPTR(r13) | |
3277 | .rept SLB_NUM_BOLTED | |
3278 | li r3, SLBSHADOW_SAVEAREA | |
3279 | LDX_BE r5, r8, r3 | |
3280 | addi r3, r3, 8 | |
3281 | LDX_BE r6, r8, r3 | |
3282 | andis. r7, r5, SLB_ESID_V@h | |
3283 | beq 3f | |
3284 | slbmte r6, r5 | |
3285 | 3: addi r8, r8, 16 | |
3286 | .endr | |
3287 | ||
3288 | 4: lwz r7, KVM_HOST_LPID(r10) | |
3289 | mtspr SPRN_LPID, r7 | |
3290 | mtspr SPRN_PID, r0 | |
3291 | ld r8, KVM_HOST_LPCR(r10) | |
3292 | mtspr SPRN_LPCR, r8 | |
3293 | isync | |
3294 | li r0, KVM_GUEST_MODE_NONE | |
3295 | stb r0, HSTATE_IN_GUEST(r13) | |
3296 | ||
3297 | /* | |
3298 | * Turn on the MMU and jump to C code | |
3299 | */ | |
3300 | bcl 20, 31, .+4 | |
3301 | 5: mflr r3 | |
3302 | addi r3, r3, 9f - 5b | |
eadce3b4 NP |
3303 | li r4, -1 |
3304 | rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */ | |
857b99e1 PM |
3305 | ld r4, PACAKMSR(r13) |
3306 | mtspr SPRN_SRR0, r3 | |
3307 | mtspr SPRN_SRR1, r4 | |
222f20f1 | 3308 | RFI_TO_KERNEL |
857b99e1 PM |
3309 | 9: addi r3, r1, STACK_FRAME_OVERHEAD |
3310 | bl kvmppc_bad_interrupt | |
3311 | b 9b | |
e4e38121 MN |
3312 | |
3313 | /* | |
3314 | * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken | |
3315 | * from VCPU_INTR_MSR and is modified based on the required TM state changes. | |
3316 | * r11 has the guest MSR value (in/out) | |
3317 | * r9 has a vcpu pointer (in) | |
3318 | * r0 is used as a scratch register | |
3319 | */ | |
3320 | kvmppc_msr_interrupt: | |
3321 | rldicl r0, r11, 64 - MSR_TS_S_LG, 62 | |
3322 | cmpwi r0, 2 /* Check if we are in transactional state.. */ | |
3323 | ld r11, VCPU_INTR_MSR(r9) | |
3324 | bne 1f | |
3325 | /* ... if transactional, change to suspended */ | |
3326 | li r0, 1 | |
3327 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG | |
3328 | blr | |
9bc01a9b | 3329 | |
41f4e631 PM |
3330 | /* |
3331 | * Load up guest PMU state. R3 points to the vcpu struct. | |
3332 | */ | |
3333 | _GLOBAL(kvmhv_load_guest_pmu) | |
3334 | EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu) | |
3335 | mr r4, r3 | |
3336 | mflr r0 | |
3337 | li r3, 1 | |
3338 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
3339 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
3340 | isync | |
3341 | BEGIN_FTR_SECTION | |
3342 | ld r3, VCPU_MMCR(r4) | |
3343 | andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | |
3344 | cmpwi r5, MMCR0_PMAO | |
3345 | beql kvmppc_fix_pmao | |
3346 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | |
3347 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ | |
3348 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | |
3349 | lwz r6, VCPU_PMC + 8(r4) | |
3350 | lwz r7, VCPU_PMC + 12(r4) | |
3351 | lwz r8, VCPU_PMC + 16(r4) | |
3352 | lwz r9, VCPU_PMC + 20(r4) | |
3353 | mtspr SPRN_PMC1, r3 | |
3354 | mtspr SPRN_PMC2, r5 | |
3355 | mtspr SPRN_PMC3, r6 | |
3356 | mtspr SPRN_PMC4, r7 | |
3357 | mtspr SPRN_PMC5, r8 | |
3358 | mtspr SPRN_PMC6, r9 | |
3359 | ld r3, VCPU_MMCR(r4) | |
3360 | ld r5, VCPU_MMCR + 8(r4) | |
3361 | ld r6, VCPU_MMCR + 16(r4) | |
3362 | ld r7, VCPU_SIAR(r4) | |
3363 | ld r8, VCPU_SDAR(r4) | |
3364 | mtspr SPRN_MMCR1, r5 | |
3365 | mtspr SPRN_MMCRA, r6 | |
3366 | mtspr SPRN_SIAR, r7 | |
3367 | mtspr SPRN_SDAR, r8 | |
3368 | BEGIN_FTR_SECTION | |
3369 | ld r5, VCPU_MMCR + 24(r4) | |
3370 | ld r6, VCPU_SIER(r4) | |
3371 | mtspr SPRN_MMCR2, r5 | |
3372 | mtspr SPRN_SIER, r6 | |
3373 | BEGIN_FTR_SECTION_NESTED(96) | |
3374 | lwz r7, VCPU_PMC + 24(r4) | |
3375 | lwz r8, VCPU_PMC + 28(r4) | |
3376 | ld r9, VCPU_MMCR + 32(r4) | |
3377 | mtspr SPRN_SPMC1, r7 | |
3378 | mtspr SPRN_SPMC2, r8 | |
3379 | mtspr SPRN_MMCRS, r9 | |
3380 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) | |
3381 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
3382 | mtspr SPRN_MMCR0, r3 | |
3383 | isync | |
3384 | mtlr r0 | |
3385 | blr | |
3386 | ||
3387 | /* | |
3388 | * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu. | |
3389 | */ | |
3390 | _GLOBAL(kvmhv_load_host_pmu) | |
3391 | EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu) | |
3392 | mflr r0 | |
3393 | lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */ | |
3394 | cmpwi r4, 0 | |
3395 | beq 23f /* skip if not */ | |
3396 | BEGIN_FTR_SECTION | |
3397 | ld r3, HSTATE_MMCR0(r13) | |
3398 | andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | |
3399 | cmpwi r4, MMCR0_PMAO | |
3400 | beql kvmppc_fix_pmao | |
3401 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | |
3402 | lwz r3, HSTATE_PMC1(r13) | |
3403 | lwz r4, HSTATE_PMC2(r13) | |
3404 | lwz r5, HSTATE_PMC3(r13) | |
3405 | lwz r6, HSTATE_PMC4(r13) | |
3406 | lwz r8, HSTATE_PMC5(r13) | |
3407 | lwz r9, HSTATE_PMC6(r13) | |
3408 | mtspr SPRN_PMC1, r3 | |
3409 | mtspr SPRN_PMC2, r4 | |
3410 | mtspr SPRN_PMC3, r5 | |
3411 | mtspr SPRN_PMC4, r6 | |
3412 | mtspr SPRN_PMC5, r8 | |
3413 | mtspr SPRN_PMC6, r9 | |
3414 | ld r3, HSTATE_MMCR0(r13) | |
3415 | ld r4, HSTATE_MMCR1(r13) | |
3416 | ld r5, HSTATE_MMCRA(r13) | |
3417 | ld r6, HSTATE_SIAR(r13) | |
3418 | ld r7, HSTATE_SDAR(r13) | |
3419 | mtspr SPRN_MMCR1, r4 | |
3420 | mtspr SPRN_MMCRA, r5 | |
3421 | mtspr SPRN_SIAR, r6 | |
3422 | mtspr SPRN_SDAR, r7 | |
3423 | BEGIN_FTR_SECTION | |
3424 | ld r8, HSTATE_MMCR2(r13) | |
3425 | ld r9, HSTATE_SIER(r13) | |
3426 | mtspr SPRN_MMCR2, r8 | |
3427 | mtspr SPRN_SIER, r9 | |
3428 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
3429 | mtspr SPRN_MMCR0, r3 | |
3430 | isync | |
3431 | mtlr r0 | |
3432 | 23: blr | |
3433 | ||
3434 | /* | |
3435 | * Save guest PMU state into the vcpu struct. | |
3436 | * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA) | |
3437 | */ | |
3438 | _GLOBAL(kvmhv_save_guest_pmu) | |
3439 | EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu) | |
3440 | mr r9, r3 | |
3441 | mr r8, r4 | |
3442 | BEGIN_FTR_SECTION | |
3443 | /* | |
3444 | * POWER8 seems to have a hardware bug where setting | |
3445 | * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] | |
3446 | * when some counters are already negative doesn't seem | |
3447 | * to cause a performance monitor alert (and hence interrupt). | |
3448 | * The effect of this is that when saving the PMU state, | |
3449 | * if there is no PMU alert pending when we read MMCR0 | |
3450 | * before freezing the counters, but one becomes pending | |
3451 | * before we read the counters, we lose it. | |
3452 | * To work around this, we need a way to freeze the counters | |
3453 | * before reading MMCR0. Normally, freezing the counters | |
3454 | * is done by writing MMCR0 (to set MMCR0[FC]) which | |
3455 | * unavoidably writes MMCR0[PMA0] as well. On POWER8, | |
3456 | * we can also freeze the counters using MMCR2, by writing | |
3457 | * 1s to all the counter freeze condition bits (there are | |
3458 | * 9 bits each for 6 counters). | |
3459 | */ | |
3460 | li r3, -1 /* set all freeze bits */ | |
3461 | clrrdi r3, r3, 10 | |
3462 | mfspr r10, SPRN_MMCR2 | |
3463 | mtspr SPRN_MMCR2, r3 | |
3464 | isync | |
3465 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
3466 | li r3, 1 | |
3467 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
3468 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | |
3469 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
3470 | mfspr r6, SPRN_MMCRA | |
3471 | /* Clear MMCRA in order to disable SDAR updates */ | |
3472 | li r7, 0 | |
3473 | mtspr SPRN_MMCRA, r7 | |
3474 | isync | |
3475 | cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */ | |
3476 | bne 21f | |
3477 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | |
3478 | b 22f | |
3479 | 21: mfspr r5, SPRN_MMCR1 | |
3480 | mfspr r7, SPRN_SIAR | |
3481 | mfspr r8, SPRN_SDAR | |
3482 | std r4, VCPU_MMCR(r9) | |
3483 | std r5, VCPU_MMCR + 8(r9) | |
3484 | std r6, VCPU_MMCR + 16(r9) | |
3485 | BEGIN_FTR_SECTION | |
3486 | std r10, VCPU_MMCR + 24(r9) | |
3487 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
3488 | std r7, VCPU_SIAR(r9) | |
3489 | std r8, VCPU_SDAR(r9) | |
3490 | mfspr r3, SPRN_PMC1 | |
3491 | mfspr r4, SPRN_PMC2 | |
3492 | mfspr r5, SPRN_PMC3 | |
3493 | mfspr r6, SPRN_PMC4 | |
3494 | mfspr r7, SPRN_PMC5 | |
3495 | mfspr r8, SPRN_PMC6 | |
3496 | stw r3, VCPU_PMC(r9) | |
3497 | stw r4, VCPU_PMC + 4(r9) | |
3498 | stw r5, VCPU_PMC + 8(r9) | |
3499 | stw r6, VCPU_PMC + 12(r9) | |
3500 | stw r7, VCPU_PMC + 16(r9) | |
3501 | stw r8, VCPU_PMC + 20(r9) | |
3502 | BEGIN_FTR_SECTION | |
3503 | mfspr r5, SPRN_SIER | |
3504 | std r5, VCPU_SIER(r9) | |
3505 | BEGIN_FTR_SECTION_NESTED(96) | |
3506 | mfspr r6, SPRN_SPMC1 | |
3507 | mfspr r7, SPRN_SPMC2 | |
3508 | mfspr r8, SPRN_MMCRS | |
3509 | stw r6, VCPU_PMC + 24(r9) | |
3510 | stw r7, VCPU_PMC + 28(r9) | |
3511 | std r8, VCPU_MMCR + 32(r9) | |
3512 | lis r4, 0x8000 | |
3513 | mtspr SPRN_MMCRS, r4 | |
3514 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) | |
3515 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
3516 | 22: blr | |
3517 | ||
9bc01a9b PM |
3518 | /* |
3519 | * This works around a hardware bug on POWER8E processors, where | |
3520 | * writing a 1 to the MMCR0[PMAO] bit doesn't generate a | |
3521 | * performance monitor interrupt. Instead, when we need to have | |
3522 | * an interrupt pending, we have to arrange for a counter to overflow. | |
3523 | */ | |
3524 | kvmppc_fix_pmao: | |
3525 | li r3, 0 | |
3526 | mtspr SPRN_MMCR2, r3 | |
3527 | lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h | |
3528 | ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN | |
3529 | mtspr SPRN_MMCR0, r3 | |
3530 | lis r3, 0x7fff | |
3531 | ori r3, r3, 0xffff | |
3532 | mtspr SPRN_PMC6, r3 | |
3533 | isync | |
3534 | blr | |
b6c295df PM |
3535 | |
3536 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING | |
3537 | /* | |
3538 | * Start timing an activity | |
3539 | * r3 = pointer to time accumulation struct, r4 = vcpu | |
3540 | */ | |
3541 | kvmhv_start_timing: | |
3542 | ld r5, HSTATE_KVM_VCORE(r13) | |
57b8daa7 PM |
3543 | ld r6, VCORE_TB_OFFSET_APPL(r5) |
3544 | mftb r5 | |
3545 | subf r5, r6, r5 /* subtract current timebase offset */ | |
b6c295df PM |
3546 | std r3, VCPU_CUR_ACTIVITY(r4) |
3547 | std r5, VCPU_ACTIVITY_START(r4) | |
3548 | blr | |
3549 | ||
3550 | /* | |
3551 | * Accumulate time to one activity and start another. | |
3552 | * r3 = pointer to new time accumulation struct, r4 = vcpu | |
3553 | */ | |
3554 | kvmhv_accumulate_time: | |
3555 | ld r5, HSTATE_KVM_VCORE(r13) | |
57b8daa7 PM |
3556 | ld r8, VCORE_TB_OFFSET_APPL(r5) |
3557 | ld r5, VCPU_CUR_ACTIVITY(r4) | |
b6c295df PM |
3558 | ld r6, VCPU_ACTIVITY_START(r4) |
3559 | std r3, VCPU_CUR_ACTIVITY(r4) | |
3560 | mftb r7 | |
57b8daa7 | 3561 | subf r7, r8, r7 /* subtract current timebase offset */ |
b6c295df PM |
3562 | std r7, VCPU_ACTIVITY_START(r4) |
3563 | cmpdi r5, 0 | |
3564 | beqlr | |
3565 | subf r3, r6, r7 | |
3566 | ld r8, TAS_SEQCOUNT(r5) | |
3567 | cmpdi r8, 0 | |
3568 | addi r8, r8, 1 | |
3569 | std r8, TAS_SEQCOUNT(r5) | |
3570 | lwsync | |
3571 | ld r7, TAS_TOTAL(r5) | |
3572 | add r7, r7, r3 | |
3573 | std r7, TAS_TOTAL(r5) | |
3574 | ld r6, TAS_MIN(r5) | |
3575 | ld r7, TAS_MAX(r5) | |
3576 | beq 3f | |
3577 | cmpd r3, r6 | |
3578 | bge 1f | |
3579 | 3: std r3, TAS_MIN(r5) | |
3580 | 1: cmpd r3, r7 | |
3581 | ble 2f | |
3582 | std r3, TAS_MAX(r5) | |
3583 | 2: lwsync | |
3584 | addi r8, r8, 1 | |
3585 | std r8, TAS_SEQCOUNT(r5) | |
3586 | blr | |
3587 | #endif |