Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
12 | * | |
13 | * Derived from book3s_rmhandlers.S and other files, which are: | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <asm/ppc_asm.h> | |
21 | #include <asm/kvm_asm.h> | |
22 | #include <asm/reg.h> | |
177339d7 | 23 | #include <asm/mmu.h> |
de56a948 | 24 | #include <asm/page.h> |
177339d7 PM |
25 | #include <asm/ptrace.h> |
26 | #include <asm/hvcall.h> | |
de56a948 PM |
27 | #include <asm/asm-offsets.h> |
28 | #include <asm/exception-64s.h> | |
f0888f70 | 29 | #include <asm/kvm_book3s_asm.h> |
b4072df4 | 30 | #include <asm/mmu-hash64.h> |
e4e38121 MN |
31 | #include <asm/tm.h> |
32 | ||
33 | #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) | |
de56a948 | 34 | |
e0b7ec05 PM |
35 | /* Values in HSTATE_NAPPING(r13) */ |
36 | #define NAPPING_CEDE 1 | |
37 | #define NAPPING_NOVCPU 2 | |
38 | ||
de56a948 | 39 | /* |
19ccb76a | 40 | * Call kvmppc_hv_entry in real mode. |
de56a948 PM |
41 | * Must be called with interrupts hard-disabled. |
42 | * | |
43 | * Input Registers: | |
44 | * | |
45 | * LR = return address to continue at after eventually re-enabling MMU | |
46 | */ | |
6ed179b6 | 47 | _GLOBAL_TOC(kvmppc_hv_entry_trampoline) |
218309b7 PM |
48 | mflr r0 |
49 | std r0, PPC_LR_STKOFF(r1) | |
50 | stdu r1, -112(r1) | |
de56a948 | 51 | mfmsr r10 |
218309b7 | 52 | LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) |
de56a948 PM |
53 | li r0,MSR_RI |
54 | andc r0,r10,r0 | |
55 | li r6,MSR_IR | MSR_DR | |
56 | andc r6,r10,r6 | |
57 | mtmsrd r0,1 /* clear RI in MSR */ | |
58 | mtsrr0 r5 | |
59 | mtsrr1 r6 | |
60 | RFI | |
61 | ||
218309b7 | 62 | kvmppc_call_hv_entry: |
e0b7ec05 | 63 | ld r4, HSTATE_KVM_VCPU(r13) |
218309b7 PM |
64 | bl kvmppc_hv_entry |
65 | ||
66 | /* Back from guest - restore host state and return to caller */ | |
67 | ||
eee7ff9d | 68 | BEGIN_FTR_SECTION |
218309b7 PM |
69 | /* Restore host DABR and DABRX */ |
70 | ld r5,HSTATE_DABR(r13) | |
71 | li r6,7 | |
72 | mtspr SPRN_DABR,r5 | |
73 | mtspr SPRN_DABRX,r6 | |
eee7ff9d | 74 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
218309b7 PM |
75 | |
76 | /* Restore SPRG3 */ | |
9d378dfa SW |
77 | ld r3,PACA_SPRG_VDSO(r13) |
78 | mtspr SPRN_SPRG_VDSO_WRITE,r3 | |
218309b7 | 79 | |
218309b7 PM |
80 | /* Reload the host's PMU registers */ |
81 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | |
82 | lbz r4, LPPACA_PMCINUSE(r3) | |
83 | cmpwi r4, 0 | |
84 | beq 23f /* skip if not */ | |
9bc01a9b PM |
85 | BEGIN_FTR_SECTION |
86 | ld r3, HSTATE_MMCR(r13) | |
87 | andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | |
88 | cmpwi r4, MMCR0_PMAO | |
89 | beql kvmppc_fix_pmao | |
90 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | |
218309b7 PM |
91 | lwz r3, HSTATE_PMC(r13) |
92 | lwz r4, HSTATE_PMC + 4(r13) | |
93 | lwz r5, HSTATE_PMC + 8(r13) | |
94 | lwz r6, HSTATE_PMC + 12(r13) | |
95 | lwz r8, HSTATE_PMC + 16(r13) | |
96 | lwz r9, HSTATE_PMC + 20(r13) | |
218309b7 PM |
97 | mtspr SPRN_PMC1, r3 |
98 | mtspr SPRN_PMC2, r4 | |
99 | mtspr SPRN_PMC3, r5 | |
100 | mtspr SPRN_PMC4, r6 | |
101 | mtspr SPRN_PMC5, r8 | |
102 | mtspr SPRN_PMC6, r9 | |
218309b7 PM |
103 | ld r3, HSTATE_MMCR(r13) |
104 | ld r4, HSTATE_MMCR + 8(r13) | |
105 | ld r5, HSTATE_MMCR + 16(r13) | |
72cde5a8 PM |
106 | ld r6, HSTATE_MMCR + 24(r13) |
107 | ld r7, HSTATE_MMCR + 32(r13) | |
218309b7 PM |
108 | mtspr SPRN_MMCR1, r4 |
109 | mtspr SPRN_MMCRA, r5 | |
72cde5a8 PM |
110 | mtspr SPRN_SIAR, r6 |
111 | mtspr SPRN_SDAR, r7 | |
112 | BEGIN_FTR_SECTION | |
113 | ld r8, HSTATE_MMCR + 40(r13) | |
114 | ld r9, HSTATE_MMCR + 48(r13) | |
115 | mtspr SPRN_MMCR2, r8 | |
116 | mtspr SPRN_SIER, r9 | |
117 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
218309b7 PM |
118 | mtspr SPRN_MMCR0, r3 |
119 | isync | |
120 | 23: | |
121 | ||
e0b7ec05 PM |
122 | /* |
123 | * Reload DEC. HDEC interrupts were disabled when | |
124 | * we reloaded the host's LPCR value. | |
125 | */ | |
126 | ld r3, HSTATE_DECEXP(r13) | |
127 | mftb r4 | |
128 | subf r4, r4, r3 | |
129 | mtspr SPRN_DEC, r4 | |
130 | ||
218309b7 PM |
131 | /* |
132 | * For external and machine check interrupts, we need | |
133 | * to call the Linux handler to process the interrupt. | |
134 | * We do that by jumping to absolute address 0x500 for | |
135 | * external interrupts, or the machine_check_fwnmi label | |
136 | * for machine checks (since firmware might have patched | |
137 | * the vector area at 0x200). The [h]rfid at the end of the | |
138 | * handler will return to the book3s_hv_interrupts.S code. | |
139 | * For other interrupts we do the rfid to get back | |
140 | * to the book3s_hv_interrupts.S code here. | |
141 | */ | |
142 | ld r8, 112+PPC_LR_STKOFF(r1) | |
143 | addi r1, r1, 112 | |
144 | ld r7, HSTATE_HOST_MSR(r13) | |
145 | ||
146 | cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
147 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
218309b7 | 148 | beq 11f |
0869b6fd MS |
149 | cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI |
150 | beq cr2, 14f /* HMI check */ | |
218309b7 PM |
151 | |
152 | /* RFI into the highmem handler, or branch to interrupt handler */ | |
153 | mfmsr r6 | |
154 | li r0, MSR_RI | |
155 | andc r6, r6, r0 | |
156 | mtmsrd r6, 1 /* Clear RI in MSR */ | |
157 | mtsrr0 r8 | |
158 | mtsrr1 r7 | |
218309b7 PM |
159 | beq cr1, 13f /* machine check */ |
160 | RFI | |
161 | ||
162 | /* On POWER7, we have external interrupts set to use HSRR0/1 */ | |
163 | 11: mtspr SPRN_HSRR0, r8 | |
164 | mtspr SPRN_HSRR1, r7 | |
165 | ba 0x500 | |
166 | ||
167 | 13: b machine_check_fwnmi | |
168 | ||
0869b6fd MS |
169 | 14: mtspr SPRN_HSRR0, r8 |
170 | mtspr SPRN_HSRR1, r7 | |
171 | b hmi_exception_after_realmode | |
172 | ||
e0b7ec05 PM |
173 | kvmppc_primary_no_guest: |
174 | /* We handle this much like a ceded vcpu */ | |
175 | /* set our bit in napping_threads */ | |
176 | ld r5, HSTATE_KVM_VCORE(r13) | |
177 | lbz r7, HSTATE_PTID(r13) | |
178 | li r0, 1 | |
179 | sld r0, r0, r7 | |
180 | addi r6, r5, VCORE_NAPPING_THREADS | |
181 | 1: lwarx r3, 0, r6 | |
182 | or r3, r3, r0 | |
183 | stwcx. r3, 0, r6 | |
184 | bne 1b | |
185 | /* order napping_threads update vs testing entry_exit_count */ | |
186 | isync | |
187 | li r12, 0 | |
188 | lwz r7, VCORE_ENTRY_EXIT(r5) | |
189 | cmpwi r7, 0x100 | |
190 | bge kvm_novcpu_exit /* another thread already exiting */ | |
191 | li r3, NAPPING_NOVCPU | |
192 | stb r3, HSTATE_NAPPING(r13) | |
193 | li r3, 1 | |
194 | stb r3, HSTATE_HWTHREAD_REQ(r13) | |
195 | ||
196 | b kvm_do_nap | |
197 | ||
198 | kvm_novcpu_wakeup: | |
199 | ld r1, HSTATE_HOST_R1(r13) | |
200 | ld r5, HSTATE_KVM_VCORE(r13) | |
201 | li r0, 0 | |
202 | stb r0, HSTATE_NAPPING(r13) | |
203 | stb r0, HSTATE_HWTHREAD_REQ(r13) | |
204 | ||
e3bbbbfa PM |
205 | /* check the wake reason */ |
206 | bl kvmppc_check_wake_reason | |
207 | ||
e0b7ec05 | 208 | /* see if any other thread is already exiting */ |
e0b7ec05 PM |
209 | lwz r0, VCORE_ENTRY_EXIT(r5) |
210 | cmpwi r0, 0x100 | |
211 | bge kvm_novcpu_exit | |
212 | ||
213 | /* clear our bit in napping_threads */ | |
214 | lbz r7, HSTATE_PTID(r13) | |
215 | li r0, 1 | |
216 | sld r0, r0, r7 | |
217 | addi r6, r5, VCORE_NAPPING_THREADS | |
e3bbbbfa PM |
218 | 4: lwarx r7, 0, r6 |
219 | andc r7, r7, r0 | |
220 | stwcx. r7, 0, r6 | |
e0b7ec05 PM |
221 | bne 4b |
222 | ||
e3bbbbfa | 223 | /* See if the wake reason means we need to exit */ |
e0b7ec05 PM |
224 | cmpdi r3, 0 |
225 | bge kvm_novcpu_exit | |
e0b7ec05 PM |
226 | |
227 | /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ | |
228 | ld r4, HSTATE_KVM_VCPU(r13) | |
229 | cmpdi r4, 0 | |
230 | bne kvmppc_got_guest | |
231 | ||
232 | kvm_novcpu_exit: | |
233 | b hdec_soon | |
234 | ||
371fefd6 | 235 | /* |
e0b7ec05 | 236 | * We come in here when wakened from nap mode. |
371fefd6 PM |
237 | * Relocation is off and most register values are lost. |
238 | * r13 points to the PACA. | |
239 | */ | |
240 | .globl kvm_start_guest | |
241 | kvm_start_guest: | |
fd17dc7b PM |
242 | |
243 | /* Set runlatch bit the minute you wake up from nap */ | |
244 | mfspr r1, SPRN_CTRLF | |
245 | ori r1, r1, 1 | |
246 | mtspr SPRN_CTRLT, r1 | |
247 | ||
19ccb76a PM |
248 | ld r2,PACATOC(r13) |
249 | ||
f0888f70 PM |
250 | li r0,KVM_HWTHREAD_IN_KVM |
251 | stb r0,HSTATE_HWTHREAD_STATE(r13) | |
371fefd6 | 252 | |
f0888f70 PM |
253 | /* NV GPR values from power7_idle() will no longer be valid */ |
254 | li r0,1 | |
255 | stb r0,PACA_NAPSTATELOST(r13) | |
371fefd6 | 256 | |
4619ac88 PM |
257 | /* were we napping due to cede? */ |
258 | lbz r0,HSTATE_NAPPING(r13) | |
e0b7ec05 PM |
259 | cmpwi r0,NAPPING_CEDE |
260 | beq kvm_end_cede | |
261 | cmpwi r0,NAPPING_NOVCPU | |
262 | beq kvm_novcpu_wakeup | |
263 | ||
264 | ld r1,PACAEMERGSP(r13) | |
265 | subi r1,r1,STACK_FRAME_OVERHEAD | |
4619ac88 PM |
266 | |
267 | /* | |
268 | * We weren't napping due to cede, so this must be a secondary | |
269 | * thread being woken up to run a guest, or being woken up due | |
270 | * to a stray IPI. (Or due to some machine check or hypervisor | |
271 | * maintenance interrupt while the core is in KVM.) | |
272 | */ | |
f0888f70 PM |
273 | |
274 | /* Check the wake reason in SRR1 to see why we got here */ | |
e3bbbbfa PM |
275 | bl kvmppc_check_wake_reason |
276 | cmpdi r3, 0 | |
277 | bge kvm_no_guest | |
371fefd6 | 278 | |
4619ac88 | 279 | /* get vcpu pointer, NULL if we have no vcpu to run */ |
7b444c67 PM |
280 | ld r4,HSTATE_KVM_VCPU(r13) |
281 | cmpdi r4,0 | |
f0888f70 | 282 | /* if we have no vcpu to run, go back to sleep */ |
7b444c67 | 283 | beq kvm_no_guest |
f0888f70 | 284 | |
e0b7ec05 | 285 | /* Set HSTATE_DSCR(r13) to something sensible */ |
1739ea9e | 286 | ld r6, PACA_DSCR(r13) |
e0b7ec05 | 287 | std r6, HSTATE_DSCR(r13) |
2fde6d20 | 288 | |
e0b7ec05 | 289 | bl kvmppc_hv_entry |
218309b7 PM |
290 | |
291 | /* Back from the guest, go back to nap */ | |
292 | /* Clear our vcpu pointer so we don't come back in early */ | |
293 | li r0, 0 | |
294 | std r0, HSTATE_KVM_VCPU(r13) | |
f019b7ad PM |
295 | /* |
296 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | |
297 | * the nap_count, because once the increment to nap_count is | |
298 | * visible we could be given another vcpu. | |
299 | */ | |
218309b7 | 300 | lwsync |
218309b7 PM |
301 | |
302 | /* increment the nap count and then go to nap mode */ | |
303 | ld r4, HSTATE_KVM_VCORE(r13) | |
304 | addi r4, r4, VCORE_NAP_COUNT | |
218309b7 PM |
305 | 51: lwarx r3, 0, r4 |
306 | addi r3, r3, 1 | |
307 | stwcx. r3, 0, r4 | |
308 | bne 51b | |
309 | ||
310 | kvm_no_guest: | |
311 | li r0, KVM_HWTHREAD_IN_NAP | |
312 | stb r0, HSTATE_HWTHREAD_STATE(r13) | |
e0b7ec05 | 313 | kvm_do_nap: |
582b910e PM |
314 | /* Clear the runlatch bit before napping */ |
315 | mfspr r2, SPRN_CTRLF | |
316 | clrrdi r2, r2, 1 | |
317 | mtspr SPRN_CTRLT, r2 | |
318 | ||
218309b7 PM |
319 | li r3, LPCR_PECE0 |
320 | mfspr r4, SPRN_LPCR | |
321 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | |
322 | mtspr SPRN_LPCR, r4 | |
323 | isync | |
324 | std r0, HSTATE_SCRATCH0(r13) | |
325 | ptesync | |
326 | ld r0, HSTATE_SCRATCH0(r13) | |
327 | 1: cmpd r0, r0 | |
328 | bne 1b | |
329 | nap | |
330 | b . | |
331 | ||
332 | /****************************************************************************** | |
333 | * * | |
334 | * Entry code * | |
335 | * * | |
336 | *****************************************************************************/ | |
337 | ||
de56a948 PM |
338 | .global kvmppc_hv_entry |
339 | kvmppc_hv_entry: | |
340 | ||
341 | /* Required state: | |
342 | * | |
e0b7ec05 | 343 | * R4 = vcpu pointer (or NULL) |
de56a948 PM |
344 | * MSR = ~IR|DR |
345 | * R13 = PACA | |
346 | * R1 = host R1 | |
06a29e42 | 347 | * R2 = TOC |
de56a948 PM |
348 | * all other volatile GPRS = free |
349 | */ | |
350 | mflr r0 | |
218309b7 PM |
351 | std r0, PPC_LR_STKOFF(r1) |
352 | stdu r1, -112(r1) | |
de56a948 | 353 | |
de56a948 PM |
354 | /* Save R1 in the PACA */ |
355 | std r1, HSTATE_HOST_R1(r13) | |
356 | ||
44a3add8 PM |
357 | li r6, KVM_GUEST_MODE_HOST_HV |
358 | stb r6, HSTATE_IN_GUEST(r13) | |
359 | ||
de56a948 PM |
360 | /* Clear out SLB */ |
361 | li r6,0 | |
362 | slbmte r6,r6 | |
363 | slbia | |
364 | ptesync | |
365 | ||
9e368f29 | 366 | /* |
c17b98cf | 367 | * POWER7/POWER8 host -> guest partition switch code. |
9e368f29 PM |
368 | * We don't have to lock against concurrent tlbies, |
369 | * but we do have to coordinate across hardware threads. | |
370 | */ | |
371fefd6 PM |
371 | /* Increment entry count iff exit count is zero. */ |
372 | ld r5,HSTATE_KVM_VCORE(r13) | |
373 | addi r9,r5,VCORE_ENTRY_EXIT | |
374 | 21: lwarx r3,0,r9 | |
375 | cmpwi r3,0x100 /* any threads starting to exit? */ | |
376 | bge secondary_too_late /* if so we're too late to the party */ | |
377 | addi r3,r3,1 | |
378 | stwcx. r3,0,r9 | |
379 | bne 21b | |
380 | ||
381 | /* Primary thread switches to guest partition. */ | |
e0b7ec05 PM |
382 | ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ |
383 | lbz r6,HSTATE_PTID(r13) | |
371fefd6 PM |
384 | cmpwi r6,0 |
385 | bne 20f | |
de56a948 PM |
386 | ld r6,KVM_SDR1(r9) |
387 | lwz r7,KVM_LPID(r9) | |
388 | li r0,LPID_RSVD /* switch to reserved LPID */ | |
389 | mtspr SPRN_LPID,r0 | |
390 | ptesync | |
391 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
392 | mtspr SPRN_LPID,r7 | |
393 | isync | |
1b400ba0 PM |
394 | |
395 | /* See if we need to flush the TLB */ | |
396 | lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ | |
397 | clrldi r7,r6,64-6 /* extract bit number (6 bits) */ | |
398 | srdi r6,r6,6 /* doubleword number */ | |
399 | sldi r6,r6,3 /* address offset */ | |
400 | add r6,r6,r9 | |
401 | addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ | |
371fefd6 | 402 | li r0,1 |
1b400ba0 PM |
403 | sld r0,r0,r7 |
404 | ld r7,0(r6) | |
405 | and. r7,r7,r0 | |
406 | beq 22f | |
407 | 23: ldarx r7,0,r6 /* if set, clear the bit */ | |
408 | andc r7,r7,r0 | |
409 | stdcx. r7,0,r6 | |
410 | bne 23b | |
ca252055 PM |
411 | /* Flush the TLB of any entries for this LPID */ |
412 | /* use arch 2.07S as a proxy for POWER8 */ | |
413 | BEGIN_FTR_SECTION | |
414 | li r6,512 /* POWER8 has 512 sets */ | |
415 | FTR_SECTION_ELSE | |
416 | li r6,128 /* POWER7 has 128 sets */ | |
417 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) | |
1b400ba0 PM |
418 | mtctr r6 |
419 | li r7,0x800 /* IS field = 0b10 */ | |
420 | ptesync | |
421 | 28: tlbiel r7 | |
422 | addi r7,r7,0x1000 | |
423 | bdnz 28b | |
424 | ptesync | |
425 | ||
93b0f4dc PM |
426 | /* Add timebase offset onto timebase */ |
427 | 22: ld r8,VCORE_TB_OFFSET(r5) | |
428 | cmpdi r8,0 | |
429 | beq 37f | |
430 | mftb r6 /* current host timebase */ | |
431 | add r8,r8,r6 | |
432 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | |
433 | mftb r7 /* check if lower 24 bits overflowed */ | |
434 | clrldi r6,r6,40 | |
435 | clrldi r7,r7,40 | |
436 | cmpld r7,r6 | |
437 | bge 37f | |
438 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | |
439 | mtspr SPRN_TBU40,r8 | |
440 | ||
388cc6e1 PM |
441 | /* Load guest PCR value to select appropriate compat mode */ |
442 | 37: ld r7, VCORE_PCR(r5) | |
443 | cmpdi r7, 0 | |
444 | beq 38f | |
445 | mtspr SPRN_PCR, r7 | |
446 | 38: | |
b005255e MN |
447 | |
448 | BEGIN_FTR_SECTION | |
449 | /* DPDES is shared between threads */ | |
450 | ld r8, VCORE_DPDES(r5) | |
451 | mtspr SPRN_DPDES, r8 | |
452 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
453 | ||
388cc6e1 | 454 | li r0,1 |
371fefd6 PM |
455 | stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ |
456 | b 10f | |
457 | ||
458 | /* Secondary threads wait for primary to have done partition switch */ | |
459 | 20: lbz r0,VCORE_IN_GUEST(r5) | |
460 | cmpwi r0,0 | |
461 | beq 20b | |
aa04b4cc | 462 | |
19ccb76a | 463 | /* Set LPCR and RMOR. */ |
a0144e2a | 464 | 10: ld r8,VCORE_LPCR(r5) |
19ccb76a | 465 | mtspr SPRN_LPCR,r8 |
aa04b4cc PM |
466 | ld r8,KVM_RMOR(r9) |
467 | mtspr SPRN_RMOR,r8 | |
de56a948 PM |
468 | isync |
469 | ||
470 | /* Check if HDEC expires soon */ | |
471 | mfspr r3,SPRN_HDEC | |
e0b7ec05 | 472 | cmpwi r3,512 /* 1 microsecond */ |
de56a948 | 473 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
de56a948 | 474 | blt hdec_soon |
9e368f29 | 475 | |
e0b7ec05 PM |
476 | /* Do we have a guest vcpu to run? */ |
477 | cmpdi r4, 0 | |
478 | beq kvmppc_primary_no_guest | |
479 | kvmppc_got_guest: | |
de56a948 PM |
480 | |
481 | /* Load up guest SLB entries */ | |
e0b7ec05 | 482 | lwz r5,VCPU_SLB_MAX(r4) |
de56a948 PM |
483 | cmpwi r5,0 |
484 | beq 9f | |
485 | mtctr r5 | |
486 | addi r6,r4,VCPU_SLB | |
487 | 1: ld r8,VCPU_SLB_E(r6) | |
488 | ld r9,VCPU_SLB_V(r6) | |
489 | slbmte r9,r8 | |
490 | addi r6,r6,VCPU_SLB_SIZE | |
491 | bdnz 1b | |
492 | 9: | |
e0b7ec05 PM |
493 | /* Increment yield count if they have a VPA */ |
494 | ld r3, VCPU_VPA(r4) | |
495 | cmpdi r3, 0 | |
496 | beq 25f | |
0865a583 AG |
497 | li r6, LPPACA_YIELDCOUNT |
498 | LWZX_BE r5, r3, r6 | |
e0b7ec05 | 499 | addi r5, r5, 1 |
0865a583 | 500 | STWX_BE r5, r3, r6 |
e0b7ec05 PM |
501 | li r6, 1 |
502 | stb r6, VCPU_VPA_DIRTY(r4) | |
503 | 25: | |
504 | ||
e0b7ec05 PM |
505 | /* Save purr/spurr */ |
506 | mfspr r5,SPRN_PURR | |
507 | mfspr r6,SPRN_SPURR | |
508 | std r5,HSTATE_PURR(r13) | |
509 | std r6,HSTATE_SPURR(r13) | |
510 | ld r7,VCPU_PURR(r4) | |
511 | ld r8,VCPU_SPURR(r4) | |
512 | mtspr SPRN_PURR,r7 | |
513 | mtspr SPRN_SPURR,r8 | |
e0b7ec05 PM |
514 | |
515 | BEGIN_FTR_SECTION | |
516 | /* Set partition DABR */ | |
517 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ | |
8563bf52 | 518 | lwz r5,VCPU_DABRX(r4) |
e0b7ec05 PM |
519 | ld r6,VCPU_DABR(r4) |
520 | mtspr SPRN_DABRX,r5 | |
521 | mtspr SPRN_DABR,r6 | |
e0b7ec05 | 522 | isync |
e0b7ec05 PM |
523 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
524 | ||
e4e38121 MN |
525 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
526 | BEGIN_FTR_SECTION | |
527 | b skip_tm | |
528 | END_FTR_SECTION_IFCLR(CPU_FTR_TM) | |
529 | ||
530 | /* Turn on TM/FP/VSX/VMX so we can restore them. */ | |
531 | mfmsr r5 | |
532 | li r6, MSR_TM >> 32 | |
533 | sldi r6, r6, 32 | |
534 | or r5, r5, r6 | |
535 | ori r5, r5, MSR_FP | |
536 | oris r5, r5, (MSR_VEC | MSR_VSX)@h | |
537 | mtmsrd r5 | |
538 | ||
539 | /* | |
540 | * The user may change these outside of a transaction, so they must | |
541 | * always be context switched. | |
542 | */ | |
543 | ld r5, VCPU_TFHAR(r4) | |
544 | ld r6, VCPU_TFIAR(r4) | |
545 | ld r7, VCPU_TEXASR(r4) | |
546 | mtspr SPRN_TFHAR, r5 | |
547 | mtspr SPRN_TFIAR, r6 | |
548 | mtspr SPRN_TEXASR, r7 | |
549 | ||
550 | ld r5, VCPU_MSR(r4) | |
551 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | |
552 | beq skip_tm /* TM not active in guest */ | |
553 | ||
554 | /* Make sure the failure summary is set, otherwise we'll program check | |
555 | * when we trechkpt. It's possible that this might have been not set | |
556 | * on a kvmppc_set_one_reg() call but we shouldn't let this crash the | |
557 | * host. | |
558 | */ | |
559 | oris r7, r7, (TEXASR_FS)@h | |
560 | mtspr SPRN_TEXASR, r7 | |
561 | ||
562 | /* | |
563 | * We need to load up the checkpointed state for the guest. | |
564 | * We need to do this early as it will blow away any GPRs, VSRs and | |
565 | * some SPRs. | |
566 | */ | |
567 | ||
568 | mr r31, r4 | |
569 | addi r3, r31, VCPU_FPRS_TM | |
9bf163f8 | 570 | bl load_fp_state |
e4e38121 | 571 | addi r3, r31, VCPU_VRS_TM |
9bf163f8 | 572 | bl load_vr_state |
e4e38121 MN |
573 | mr r4, r31 |
574 | lwz r7, VCPU_VRSAVE_TM(r4) | |
575 | mtspr SPRN_VRSAVE, r7 | |
576 | ||
577 | ld r5, VCPU_LR_TM(r4) | |
578 | lwz r6, VCPU_CR_TM(r4) | |
579 | ld r7, VCPU_CTR_TM(r4) | |
580 | ld r8, VCPU_AMR_TM(r4) | |
581 | ld r9, VCPU_TAR_TM(r4) | |
582 | mtlr r5 | |
583 | mtcr r6 | |
584 | mtctr r7 | |
585 | mtspr SPRN_AMR, r8 | |
586 | mtspr SPRN_TAR, r9 | |
587 | ||
588 | /* | |
589 | * Load up PPR and DSCR values but don't put them in the actual SPRs | |
590 | * till the last moment to avoid running with userspace PPR and DSCR for | |
591 | * too long. | |
592 | */ | |
593 | ld r29, VCPU_DSCR_TM(r4) | |
594 | ld r30, VCPU_PPR_TM(r4) | |
595 | ||
596 | std r2, PACATMSCRATCH(r13) /* Save TOC */ | |
597 | ||
598 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | |
599 | li r5, 0 | |
600 | mtmsrd r5, 1 | |
601 | ||
602 | /* Load GPRs r0-r28 */ | |
603 | reg = 0 | |
604 | .rept 29 | |
605 | ld reg, VCPU_GPRS_TM(reg)(r31) | |
606 | reg = reg + 1 | |
607 | .endr | |
608 | ||
609 | mtspr SPRN_DSCR, r29 | |
610 | mtspr SPRN_PPR, r30 | |
611 | ||
612 | /* Load final GPRs */ | |
613 | ld 29, VCPU_GPRS_TM(29)(r31) | |
614 | ld 30, VCPU_GPRS_TM(30)(r31) | |
615 | ld 31, VCPU_GPRS_TM(31)(r31) | |
616 | ||
617 | /* TM checkpointed state is now setup. All GPRs are now volatile. */ | |
618 | TRECHKPT | |
619 | ||
620 | /* Now let's get back the state we need. */ | |
621 | HMT_MEDIUM | |
622 | GET_PACA(r13) | |
623 | ld r29, HSTATE_DSCR(r13) | |
624 | mtspr SPRN_DSCR, r29 | |
625 | ld r4, HSTATE_KVM_VCPU(r13) | |
626 | ld r1, HSTATE_HOST_R1(r13) | |
627 | ld r2, PACATMSCRATCH(r13) | |
628 | ||
629 | /* Set the MSR RI since we have our registers back. */ | |
630 | li r5, MSR_RI | |
631 | mtmsrd r5, 1 | |
632 | skip_tm: | |
633 | #endif | |
634 | ||
e0b7ec05 PM |
635 | /* Load guest PMU registers */ |
636 | /* R4 is live here (vcpu pointer) */ | |
637 | li r3, 1 | |
638 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
639 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
640 | isync | |
9bc01a9b PM |
641 | BEGIN_FTR_SECTION |
642 | ld r3, VCPU_MMCR(r4) | |
643 | andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO | |
644 | cmpwi r5, MMCR0_PMAO | |
645 | beql kvmppc_fix_pmao | |
646 | END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) | |
e0b7ec05 PM |
647 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ |
648 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | |
649 | lwz r6, VCPU_PMC + 8(r4) | |
650 | lwz r7, VCPU_PMC + 12(r4) | |
651 | lwz r8, VCPU_PMC + 16(r4) | |
652 | lwz r9, VCPU_PMC + 20(r4) | |
e0b7ec05 PM |
653 | mtspr SPRN_PMC1, r3 |
654 | mtspr SPRN_PMC2, r5 | |
655 | mtspr SPRN_PMC3, r6 | |
656 | mtspr SPRN_PMC4, r7 | |
657 | mtspr SPRN_PMC5, r8 | |
658 | mtspr SPRN_PMC6, r9 | |
e0b7ec05 PM |
659 | ld r3, VCPU_MMCR(r4) |
660 | ld r5, VCPU_MMCR + 8(r4) | |
661 | ld r6, VCPU_MMCR + 16(r4) | |
662 | ld r7, VCPU_SIAR(r4) | |
663 | ld r8, VCPU_SDAR(r4) | |
664 | mtspr SPRN_MMCR1, r5 | |
665 | mtspr SPRN_MMCRA, r6 | |
666 | mtspr SPRN_SIAR, r7 | |
667 | mtspr SPRN_SDAR, r8 | |
b005255e MN |
668 | BEGIN_FTR_SECTION |
669 | ld r5, VCPU_MMCR + 24(r4) | |
670 | ld r6, VCPU_SIER(r4) | |
671 | lwz r7, VCPU_PMC + 24(r4) | |
672 | lwz r8, VCPU_PMC + 28(r4) | |
673 | ld r9, VCPU_MMCR + 32(r4) | |
674 | mtspr SPRN_MMCR2, r5 | |
675 | mtspr SPRN_SIER, r6 | |
676 | mtspr SPRN_SPMC1, r7 | |
677 | mtspr SPRN_SPMC2, r8 | |
678 | mtspr SPRN_MMCRS, r9 | |
679 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e0b7ec05 PM |
680 | mtspr SPRN_MMCR0, r3 |
681 | isync | |
682 | ||
683 | /* Load up FP, VMX and VSX registers */ | |
684 | bl kvmppc_load_fp | |
685 | ||
686 | ld r14, VCPU_GPR(R14)(r4) | |
687 | ld r15, VCPU_GPR(R15)(r4) | |
688 | ld r16, VCPU_GPR(R16)(r4) | |
689 | ld r17, VCPU_GPR(R17)(r4) | |
690 | ld r18, VCPU_GPR(R18)(r4) | |
691 | ld r19, VCPU_GPR(R19)(r4) | |
692 | ld r20, VCPU_GPR(R20)(r4) | |
693 | ld r21, VCPU_GPR(R21)(r4) | |
694 | ld r22, VCPU_GPR(R22)(r4) | |
695 | ld r23, VCPU_GPR(R23)(r4) | |
696 | ld r24, VCPU_GPR(R24)(r4) | |
697 | ld r25, VCPU_GPR(R25)(r4) | |
698 | ld r26, VCPU_GPR(R26)(r4) | |
699 | ld r27, VCPU_GPR(R27)(r4) | |
700 | ld r28, VCPU_GPR(R28)(r4) | |
701 | ld r29, VCPU_GPR(R29)(r4) | |
702 | ld r30, VCPU_GPR(R30)(r4) | |
703 | ld r31, VCPU_GPR(R31)(r4) | |
704 | ||
e0b7ec05 PM |
705 | /* Switch DSCR to guest value */ |
706 | ld r5, VCPU_DSCR(r4) | |
707 | mtspr SPRN_DSCR, r5 | |
e0b7ec05 | 708 | |
b005255e | 709 | BEGIN_FTR_SECTION |
c17b98cf | 710 | /* Skip next section on POWER7 */ |
b005255e MN |
711 | b 8f |
712 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |
713 | /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ | |
714 | mfmsr r8 | |
715 | li r0, 1 | |
716 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | |
717 | mtmsrd r8 | |
718 | ||
719 | /* Load up POWER8-specific registers */ | |
720 | ld r5, VCPU_IAMR(r4) | |
721 | lwz r6, VCPU_PSPB(r4) | |
722 | ld r7, VCPU_FSCR(r4) | |
723 | mtspr SPRN_IAMR, r5 | |
724 | mtspr SPRN_PSPB, r6 | |
725 | mtspr SPRN_FSCR, r7 | |
726 | ld r5, VCPU_DAWR(r4) | |
727 | ld r6, VCPU_DAWRX(r4) | |
728 | ld r7, VCPU_CIABR(r4) | |
729 | ld r8, VCPU_TAR(r4) | |
730 | mtspr SPRN_DAWR, r5 | |
731 | mtspr SPRN_DAWRX, r6 | |
732 | mtspr SPRN_CIABR, r7 | |
733 | mtspr SPRN_TAR, r8 | |
734 | ld r5, VCPU_IC(r4) | |
735 | ld r6, VCPU_VTB(r4) | |
736 | mtspr SPRN_IC, r5 | |
737 | mtspr SPRN_VTB, r6 | |
7b490411 | 738 | ld r8, VCPU_EBBHR(r4) |
b005255e MN |
739 | mtspr SPRN_EBBHR, r8 |
740 | ld r5, VCPU_EBBRR(r4) | |
741 | ld r6, VCPU_BESCR(r4) | |
742 | ld r7, VCPU_CSIGR(r4) | |
743 | ld r8, VCPU_TACR(r4) | |
744 | mtspr SPRN_EBBRR, r5 | |
745 | mtspr SPRN_BESCR, r6 | |
746 | mtspr SPRN_CSIGR, r7 | |
747 | mtspr SPRN_TACR, r8 | |
748 | ld r5, VCPU_TCSCR(r4) | |
749 | ld r6, VCPU_ACOP(r4) | |
750 | lwz r7, VCPU_GUEST_PID(r4) | |
751 | ld r8, VCPU_WORT(r4) | |
752 | mtspr SPRN_TCSCR, r5 | |
753 | mtspr SPRN_ACOP, r6 | |
754 | mtspr SPRN_PID, r7 | |
755 | mtspr SPRN_WORT, r8 | |
756 | 8: | |
757 | ||
e0b7ec05 PM |
758 | /* |
759 | * Set the decrementer to the guest decrementer. | |
760 | */ | |
761 | ld r8,VCPU_DEC_EXPIRES(r4) | |
c5fb80d3 PM |
762 | /* r8 is a host timebase value here, convert to guest TB */ |
763 | ld r5,HSTATE_KVM_VCORE(r13) | |
764 | ld r6,VCORE_TB_OFFSET(r5) | |
765 | add r8,r8,r6 | |
e0b7ec05 PM |
766 | mftb r7 |
767 | subf r3,r7,r8 | |
768 | mtspr SPRN_DEC,r3 | |
769 | stw r3,VCPU_DEC(r4) | |
770 | ||
771 | ld r5, VCPU_SPRG0(r4) | |
772 | ld r6, VCPU_SPRG1(r4) | |
773 | ld r7, VCPU_SPRG2(r4) | |
774 | ld r8, VCPU_SPRG3(r4) | |
775 | mtspr SPRN_SPRG0, r5 | |
776 | mtspr SPRN_SPRG1, r6 | |
777 | mtspr SPRN_SPRG2, r7 | |
778 | mtspr SPRN_SPRG3, r8 | |
779 | ||
780 | /* Load up DAR and DSISR */ | |
781 | ld r5, VCPU_DAR(r4) | |
782 | lwz r6, VCPU_DSISR(r4) | |
783 | mtspr SPRN_DAR, r5 | |
784 | mtspr SPRN_DSISR, r6 | |
785 | ||
e0b7ec05 PM |
786 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
787 | ld r5,VCPU_AMR(r4) | |
788 | ld r6,VCPU_UAMOR(r4) | |
789 | li r7,-1 | |
790 | mtspr SPRN_AMR,r5 | |
791 | mtspr SPRN_UAMOR,r6 | |
792 | mtspr SPRN_AMOR,r7 | |
de56a948 PM |
793 | |
794 | /* Restore state of CTRL run bit; assume 1 on entry */ | |
795 | lwz r5,VCPU_CTRL(r4) | |
796 | andi. r5,r5,1 | |
797 | bne 4f | |
798 | mfspr r6,SPRN_CTRLF | |
799 | clrrdi r6,r6,1 | |
800 | mtspr SPRN_CTRLT,r6 | |
801 | 4: | |
802 | ld r6, VCPU_CTR(r4) | |
803 | lwz r7, VCPU_XER(r4) | |
804 | ||
805 | mtctr r6 | |
806 | mtxer r7 | |
807 | ||
e3bbbbfa | 808 | kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ |
4619ac88 PM |
809 | ld r10, VCPU_PC(r4) |
810 | ld r11, VCPU_MSR(r4) | |
de56a948 PM |
811 | ld r6, VCPU_SRR0(r4) |
812 | ld r7, VCPU_SRR1(r4) | |
e3bbbbfa PM |
813 | mtspr SPRN_SRR0, r6 |
814 | mtspr SPRN_SRR1, r7 | |
de56a948 | 815 | |
e3bbbbfa | 816 | deliver_guest_interrupt: |
4619ac88 | 817 | /* r11 = vcpu->arch.msr & ~MSR_HV */ |
de56a948 PM |
818 | rldicl r11, r11, 63 - MSR_HV_LG, 1 |
819 | rotldi r11, r11, 1 + MSR_HV_LG | |
820 | ori r11, r11, MSR_ME | |
821 | ||
19ccb76a | 822 | /* Check if we can deliver an external or decrementer interrupt now */ |
e3bbbbfa PM |
823 | ld r0, VCPU_PENDING_EXC(r4) |
824 | rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 | |
825 | cmpdi cr1, r0, 0 | |
826 | andi. r8, r11, MSR_EE | |
e3bbbbfa PM |
827 | mfspr r8, SPRN_LPCR |
828 | /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ | |
829 | rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH | |
830 | mtspr SPRN_LPCR, r8 | |
19ccb76a | 831 | isync |
19ccb76a | 832 | beq 5f |
e3bbbbfa PM |
833 | li r0, BOOK3S_INTERRUPT_EXTERNAL |
834 | bne cr1, 12f | |
835 | mfspr r0, SPRN_DEC | |
836 | cmpwi r0, 0 | |
837 | li r0, BOOK3S_INTERRUPT_DECREMENTER | |
838 | bge 5f | |
19ccb76a | 839 | |
e3bbbbfa | 840 | 12: mtspr SPRN_SRR0, r10 |
19ccb76a | 841 | mr r10,r0 |
e3bbbbfa | 842 | mtspr SPRN_SRR1, r11 |
e4e38121 MN |
843 | mr r9, r4 |
844 | bl kvmppc_msr_interrupt | |
e3bbbbfa | 845 | 5: |
19ccb76a | 846 | |
27025a60 LPF |
847 | /* |
848 | * Required state: | |
849 | * R4 = vcpu | |
850 | * R10: value for HSRR0 | |
851 | * R11: value for HSRR1 | |
852 | * R13 = PACA | |
853 | */ | |
de56a948 | 854 | fast_guest_return: |
4619ac88 PM |
855 | li r0,0 |
856 | stb r0,VCPU_CEDED(r4) /* cancel cede */ | |
de56a948 PM |
857 | mtspr SPRN_HSRR0,r10 |
858 | mtspr SPRN_HSRR1,r11 | |
859 | ||
860 | /* Activate guest mode, so faults get handled by KVM */ | |
44a3add8 | 861 | li r9, KVM_GUEST_MODE_GUEST_HV |
de56a948 PM |
862 | stb r9, HSTATE_IN_GUEST(r13) |
863 | ||
864 | /* Enter guest */ | |
865 | ||
0acb9111 PM |
866 | BEGIN_FTR_SECTION |
867 | ld r5, VCPU_CFAR(r4) | |
868 | mtspr SPRN_CFAR, r5 | |
869 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |
4b8473c9 PM |
870 | BEGIN_FTR_SECTION |
871 | ld r0, VCPU_PPR(r4) | |
872 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
0acb9111 | 873 | |
de56a948 PM |
874 | ld r5, VCPU_LR(r4) |
875 | lwz r6, VCPU_CR(r4) | |
876 | mtlr r5 | |
877 | mtcr r6 | |
878 | ||
c75df6f9 MN |
879 | ld r1, VCPU_GPR(R1)(r4) |
880 | ld r2, VCPU_GPR(R2)(r4) | |
881 | ld r3, VCPU_GPR(R3)(r4) | |
882 | ld r5, VCPU_GPR(R5)(r4) | |
883 | ld r6, VCPU_GPR(R6)(r4) | |
884 | ld r7, VCPU_GPR(R7)(r4) | |
885 | ld r8, VCPU_GPR(R8)(r4) | |
886 | ld r9, VCPU_GPR(R9)(r4) | |
887 | ld r10, VCPU_GPR(R10)(r4) | |
888 | ld r11, VCPU_GPR(R11)(r4) | |
889 | ld r12, VCPU_GPR(R12)(r4) | |
890 | ld r13, VCPU_GPR(R13)(r4) | |
891 | ||
4b8473c9 PM |
892 | BEGIN_FTR_SECTION |
893 | mtspr SPRN_PPR, r0 | |
894 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
895 | ld r0, VCPU_GPR(R0)(r4) | |
c75df6f9 | 896 | ld r4, VCPU_GPR(R4)(r4) |
de56a948 PM |
897 | |
898 | hrfid | |
899 | b . | |
900 | ||
901 | /****************************************************************************** | |
902 | * * | |
903 | * Exit code * | |
904 | * * | |
905 | *****************************************************************************/ | |
906 | ||
907 | /* | |
908 | * We come here from the first-level interrupt handlers. | |
909 | */ | |
dd96b2c2 AK |
910 | .globl kvmppc_interrupt_hv |
911 | kvmppc_interrupt_hv: | |
de56a948 PM |
912 | /* |
913 | * Register contents: | |
914 | * R12 = interrupt vector | |
915 | * R13 = PACA | |
916 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | |
917 | * guest R13 saved in SPRN_SCRATCH0 | |
918 | */ | |
36e7bb38 | 919 | std r9, HSTATE_SCRATCH2(r13) |
44a3add8 PM |
920 | |
921 | lbz r9, HSTATE_IN_GUEST(r13) | |
922 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | |
923 | beq kvmppc_bad_host_intr | |
dd96b2c2 AK |
924 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
925 | cmpwi r9, KVM_GUEST_MODE_GUEST | |
36e7bb38 | 926 | ld r9, HSTATE_SCRATCH2(r13) |
dd96b2c2 AK |
927 | beq kvmppc_interrupt_pr |
928 | #endif | |
44a3add8 PM |
929 | /* We're now back in the host but in guest MMU context */ |
930 | li r9, KVM_GUEST_MODE_HOST_HV | |
931 | stb r9, HSTATE_IN_GUEST(r13) | |
932 | ||
de56a948 PM |
933 | ld r9, HSTATE_KVM_VCPU(r13) |
934 | ||
935 | /* Save registers */ | |
936 | ||
c75df6f9 MN |
937 | std r0, VCPU_GPR(R0)(r9) |
938 | std r1, VCPU_GPR(R1)(r9) | |
939 | std r2, VCPU_GPR(R2)(r9) | |
940 | std r3, VCPU_GPR(R3)(r9) | |
941 | std r4, VCPU_GPR(R4)(r9) | |
942 | std r5, VCPU_GPR(R5)(r9) | |
943 | std r6, VCPU_GPR(R6)(r9) | |
944 | std r7, VCPU_GPR(R7)(r9) | |
945 | std r8, VCPU_GPR(R8)(r9) | |
36e7bb38 | 946 | ld r0, HSTATE_SCRATCH2(r13) |
c75df6f9 MN |
947 | std r0, VCPU_GPR(R9)(r9) |
948 | std r10, VCPU_GPR(R10)(r9) | |
949 | std r11, VCPU_GPR(R11)(r9) | |
de56a948 PM |
950 | ld r3, HSTATE_SCRATCH0(r13) |
951 | lwz r4, HSTATE_SCRATCH1(r13) | |
c75df6f9 | 952 | std r3, VCPU_GPR(R12)(r9) |
de56a948 | 953 | stw r4, VCPU_CR(r9) |
0acb9111 PM |
954 | BEGIN_FTR_SECTION |
955 | ld r3, HSTATE_CFAR(r13) | |
956 | std r3, VCPU_CFAR(r9) | |
957 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |
4b8473c9 PM |
958 | BEGIN_FTR_SECTION |
959 | ld r4, HSTATE_PPR(r13) | |
960 | std r4, VCPU_PPR(r9) | |
961 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
de56a948 PM |
962 | |
963 | /* Restore R1/R2 so we can handle faults */ | |
964 | ld r1, HSTATE_HOST_R1(r13) | |
965 | ld r2, PACATOC(r13) | |
966 | ||
967 | mfspr r10, SPRN_SRR0 | |
968 | mfspr r11, SPRN_SRR1 | |
969 | std r10, VCPU_SRR0(r9) | |
970 | std r11, VCPU_SRR1(r9) | |
971 | andi. r0, r12, 2 /* need to read HSRR0/1? */ | |
972 | beq 1f | |
973 | mfspr r10, SPRN_HSRR0 | |
974 | mfspr r11, SPRN_HSRR1 | |
975 | clrrdi r12, r12, 2 | |
976 | 1: std r10, VCPU_PC(r9) | |
977 | std r11, VCPU_MSR(r9) | |
978 | ||
979 | GET_SCRATCH0(r3) | |
980 | mflr r4 | |
c75df6f9 | 981 | std r3, VCPU_GPR(R13)(r9) |
de56a948 PM |
982 | std r4, VCPU_LR(r9) |
983 | ||
de56a948 PM |
984 | stw r12,VCPU_TRAP(r9) |
985 | ||
697d3899 PM |
986 | /* Save HEIR (HV emulation assist reg) in last_inst |
987 | if this is an HEI (HV emulation interrupt, e40) */ | |
988 | li r3,KVM_INST_FETCH_FAILED | |
697d3899 PM |
989 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
990 | bne 11f | |
991 | mfspr r3,SPRN_HEIR | |
697d3899 PM |
992 | 11: stw r3,VCPU_LAST_INST(r9) |
993 | ||
994 | /* these are volatile across C function calls */ | |
995 | mfctr r3 | |
996 | mfxer r4 | |
997 | std r3, VCPU_CTR(r9) | |
998 | stw r4, VCPU_XER(r9) | |
999 | ||
697d3899 PM |
1000 | /* If this is a page table miss then see if it's theirs or ours */ |
1001 | cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
1002 | beq kvmppc_hdsi | |
342d3db7 PM |
1003 | cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE |
1004 | beq kvmppc_hisi | |
697d3899 | 1005 | |
de56a948 PM |
1006 | /* See if this is a leftover HDEC interrupt */ |
1007 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | |
1008 | bne 2f | |
1009 | mfspr r3,SPRN_HDEC | |
1010 | cmpwi r3,0 | |
1011 | bge ignore_hdec | |
1012 | 2: | |
697d3899 | 1013 | /* See if this is an hcall we can handle in real mode */ |
a8606e20 PM |
1014 | cmpwi r12,BOOK3S_INTERRUPT_SYSCALL |
1015 | beq hcall_try_real_mode | |
de56a948 | 1016 | |
54695c30 BH |
1017 | /* External interrupt ? */ |
1018 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | |
1019 | bne+ ext_interrupt_to_host | |
1020 | ||
1021 | /* External interrupt, first check for host_ipi. If this is | |
1022 | * set, we know the host wants us out so let's do it now | |
1023 | */ | |
c934243c PM |
1024 | bl kvmppc_read_intr |
1025 | cmpdi r3, 0 | |
1026 | bgt ext_interrupt_to_host | |
54695c30 | 1027 | |
4619ac88 PM |
1028 | /* Check if any CPU is heading out to the host, if so head out too */ |
1029 | ld r5, HSTATE_KVM_VCORE(r13) | |
1030 | lwz r0, VCORE_ENTRY_EXIT(r5) | |
1031 | cmpwi r0, 0x100 | |
1032 | bge ext_interrupt_to_host | |
1033 | ||
e3bbbbfa PM |
1034 | /* Return to guest after delivering any pending interrupt */ |
1035 | mr r4, r9 | |
1036 | b deliver_guest_interrupt | |
54695c30 | 1037 | |
54695c30 | 1038 | ext_interrupt_to_host: |
de56a948 | 1039 | |
b4072df4 | 1040 | guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ |
de56a948 | 1041 | /* Save more register state */ |
de56a948 PM |
1042 | mfdar r6 |
1043 | mfdsisr r7 | |
de56a948 PM |
1044 | std r6, VCPU_DAR(r9) |
1045 | stw r7, VCPU_DSISR(r9) | |
697d3899 | 1046 | /* don't overwrite fault_dar/fault_dsisr if HDSI */ |
de56a948 PM |
1047 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE |
1048 | beq 6f | |
697d3899 | 1049 | std r6, VCPU_FAULT_DAR(r9) |
de56a948 PM |
1050 | stw r7, VCPU_FAULT_DSISR(r9) |
1051 | ||
b4072df4 PM |
1052 | /* See if it is a machine check */ |
1053 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
1054 | beq machine_check_realmode | |
1055 | mc_cont: | |
1056 | ||
de56a948 | 1057 | /* Save guest CTRL register, set runlatch to 1 */ |
697d3899 | 1058 | 6: mfspr r6,SPRN_CTRLF |
de56a948 PM |
1059 | stw r6,VCPU_CTRL(r9) |
1060 | andi. r0,r6,1 | |
1061 | bne 4f | |
1062 | ori r6,r6,1 | |
1063 | mtspr SPRN_CTRLT,r6 | |
1064 | 4: | |
1065 | /* Read the guest SLB and save it away */ | |
1066 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ | |
1067 | mtctr r0 | |
1068 | li r6,0 | |
1069 | addi r7,r9,VCPU_SLB | |
1070 | li r5,0 | |
1071 | 1: slbmfee r8,r6 | |
1072 | andis. r0,r8,SLB_ESID_V@h | |
1073 | beq 2f | |
1074 | add r8,r8,r6 /* put index in */ | |
1075 | slbmfev r3,r6 | |
1076 | std r8,VCPU_SLB_E(r7) | |
1077 | std r3,VCPU_SLB_V(r7) | |
1078 | addi r7,r7,VCPU_SLB_SIZE | |
1079 | addi r5,r5,1 | |
1080 | 2: addi r6,r6,1 | |
1081 | bdnz 1b | |
1082 | stw r5,VCPU_SLB_MAX(r9) | |
1083 | ||
1084 | /* | |
1085 | * Save the guest PURR/SPURR | |
1086 | */ | |
1087 | mfspr r5,SPRN_PURR | |
1088 | mfspr r6,SPRN_SPURR | |
1089 | ld r7,VCPU_PURR(r9) | |
1090 | ld r8,VCPU_SPURR(r9) | |
1091 | std r5,VCPU_PURR(r9) | |
1092 | std r6,VCPU_SPURR(r9) | |
1093 | subf r5,r7,r5 | |
1094 | subf r6,r8,r6 | |
1095 | ||
1096 | /* | |
1097 | * Restore host PURR/SPURR and add guest times | |
1098 | * so that the time in the guest gets accounted. | |
1099 | */ | |
1100 | ld r3,HSTATE_PURR(r13) | |
1101 | ld r4,HSTATE_SPURR(r13) | |
1102 | add r3,r3,r5 | |
1103 | add r4,r4,r6 | |
1104 | mtspr SPRN_PURR,r3 | |
1105 | mtspr SPRN_SPURR,r4 | |
1106 | ||
e0b7ec05 PM |
1107 | /* Save DEC */ |
1108 | mfspr r5,SPRN_DEC | |
1109 | mftb r6 | |
1110 | extsw r5,r5 | |
1111 | add r5,r5,r6 | |
c5fb80d3 PM |
1112 | /* r5 is a guest timebase value here, convert to host TB */ |
1113 | ld r3,HSTATE_KVM_VCORE(r13) | |
1114 | ld r4,VCORE_TB_OFFSET(r3) | |
1115 | subf r5,r4,r5 | |
e0b7ec05 PM |
1116 | std r5,VCPU_DEC_EXPIRES(r9) |
1117 | ||
b005255e MN |
1118 | BEGIN_FTR_SECTION |
1119 | b 8f | |
1120 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |
b005255e MN |
1121 | /* Save POWER8-specific registers */ |
1122 | mfspr r5, SPRN_IAMR | |
1123 | mfspr r6, SPRN_PSPB | |
1124 | mfspr r7, SPRN_FSCR | |
1125 | std r5, VCPU_IAMR(r9) | |
1126 | stw r6, VCPU_PSPB(r9) | |
1127 | std r7, VCPU_FSCR(r9) | |
1128 | mfspr r5, SPRN_IC | |
1129 | mfspr r6, SPRN_VTB | |
1130 | mfspr r7, SPRN_TAR | |
1131 | std r5, VCPU_IC(r9) | |
1132 | std r6, VCPU_VTB(r9) | |
1133 | std r7, VCPU_TAR(r9) | |
7b490411 | 1134 | mfspr r8, SPRN_EBBHR |
b005255e MN |
1135 | std r8, VCPU_EBBHR(r9) |
1136 | mfspr r5, SPRN_EBBRR | |
1137 | mfspr r6, SPRN_BESCR | |
1138 | mfspr r7, SPRN_CSIGR | |
1139 | mfspr r8, SPRN_TACR | |
1140 | std r5, VCPU_EBBRR(r9) | |
1141 | std r6, VCPU_BESCR(r9) | |
1142 | std r7, VCPU_CSIGR(r9) | |
1143 | std r8, VCPU_TACR(r9) | |
1144 | mfspr r5, SPRN_TCSCR | |
1145 | mfspr r6, SPRN_ACOP | |
1146 | mfspr r7, SPRN_PID | |
1147 | mfspr r8, SPRN_WORT | |
1148 | std r5, VCPU_TCSCR(r9) | |
1149 | std r6, VCPU_ACOP(r9) | |
1150 | stw r7, VCPU_GUEST_PID(r9) | |
1151 | std r8, VCPU_WORT(r9) | |
1152 | 8: | |
1153 | ||
e0b7ec05 | 1154 | /* Save and reset AMR and UAMOR before turning on the MMU */ |
e0b7ec05 PM |
1155 | mfspr r5,SPRN_AMR |
1156 | mfspr r6,SPRN_UAMOR | |
1157 | std r5,VCPU_AMR(r9) | |
1158 | std r6,VCPU_UAMOR(r9) | |
1159 | li r6,0 | |
1160 | mtspr SPRN_AMR,r6 | |
e0b7ec05 PM |
1161 | |
1162 | /* Switch DSCR back to host value */ | |
e0b7ec05 PM |
1163 | mfspr r8, SPRN_DSCR |
1164 | ld r7, HSTATE_DSCR(r13) | |
1165 | std r8, VCPU_DSCR(r9) | |
1166 | mtspr SPRN_DSCR, r7 | |
e0b7ec05 PM |
1167 | |
1168 | /* Save non-volatile GPRs */ | |
1169 | std r14, VCPU_GPR(R14)(r9) | |
1170 | std r15, VCPU_GPR(R15)(r9) | |
1171 | std r16, VCPU_GPR(R16)(r9) | |
1172 | std r17, VCPU_GPR(R17)(r9) | |
1173 | std r18, VCPU_GPR(R18)(r9) | |
1174 | std r19, VCPU_GPR(R19)(r9) | |
1175 | std r20, VCPU_GPR(R20)(r9) | |
1176 | std r21, VCPU_GPR(R21)(r9) | |
1177 | std r22, VCPU_GPR(R22)(r9) | |
1178 | std r23, VCPU_GPR(R23)(r9) | |
1179 | std r24, VCPU_GPR(R24)(r9) | |
1180 | std r25, VCPU_GPR(R25)(r9) | |
1181 | std r26, VCPU_GPR(R26)(r9) | |
1182 | std r27, VCPU_GPR(R27)(r9) | |
1183 | std r28, VCPU_GPR(R28)(r9) | |
1184 | std r29, VCPU_GPR(R29)(r9) | |
1185 | std r30, VCPU_GPR(R30)(r9) | |
1186 | std r31, VCPU_GPR(R31)(r9) | |
1187 | ||
1188 | /* Save SPRGs */ | |
1189 | mfspr r3, SPRN_SPRG0 | |
1190 | mfspr r4, SPRN_SPRG1 | |
1191 | mfspr r5, SPRN_SPRG2 | |
1192 | mfspr r6, SPRN_SPRG3 | |
1193 | std r3, VCPU_SPRG0(r9) | |
1194 | std r4, VCPU_SPRG1(r9) | |
1195 | std r5, VCPU_SPRG2(r9) | |
1196 | std r6, VCPU_SPRG3(r9) | |
1197 | ||
1198 | /* save FP state */ | |
1199 | mr r3, r9 | |
1200 | bl kvmppc_save_fp | |
de56a948 | 1201 | |
0a8eccef PM |
1202 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1203 | BEGIN_FTR_SECTION | |
1204 | b 2f | |
1205 | END_FTR_SECTION_IFCLR(CPU_FTR_TM) | |
1206 | /* Turn on TM. */ | |
1207 | mfmsr r8 | |
1208 | li r0, 1 | |
1209 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | |
1210 | mtmsrd r8 | |
1211 | ||
1212 | ld r5, VCPU_MSR(r9) | |
1213 | rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 | |
1214 | beq 1f /* TM not active in guest. */ | |
1215 | ||
1216 | li r3, TM_CAUSE_KVM_RESCHED | |
1217 | ||
1218 | /* Clear the MSR RI since r1, r13 are all going to be foobar. */ | |
1219 | li r5, 0 | |
1220 | mtmsrd r5, 1 | |
1221 | ||
1222 | /* All GPRs are volatile at this point. */ | |
1223 | TRECLAIM(R3) | |
1224 | ||
1225 | /* Temporarily store r13 and r9 so we have some regs to play with */ | |
1226 | SET_SCRATCH0(r13) | |
1227 | GET_PACA(r13) | |
1228 | std r9, PACATMSCRATCH(r13) | |
1229 | ld r9, HSTATE_KVM_VCPU(r13) | |
1230 | ||
1231 | /* Get a few more GPRs free. */ | |
1232 | std r29, VCPU_GPRS_TM(29)(r9) | |
1233 | std r30, VCPU_GPRS_TM(30)(r9) | |
1234 | std r31, VCPU_GPRS_TM(31)(r9) | |
1235 | ||
1236 | /* Save away PPR and DSCR soon so don't run with user values. */ | |
1237 | mfspr r31, SPRN_PPR | |
1238 | HMT_MEDIUM | |
1239 | mfspr r30, SPRN_DSCR | |
1240 | ld r29, HSTATE_DSCR(r13) | |
1241 | mtspr SPRN_DSCR, r29 | |
1242 | ||
1243 | /* Save all but r9, r13 & r29-r31 */ | |
1244 | reg = 0 | |
1245 | .rept 29 | |
1246 | .if (reg != 9) && (reg != 13) | |
1247 | std reg, VCPU_GPRS_TM(reg)(r9) | |
1248 | .endif | |
1249 | reg = reg + 1 | |
1250 | .endr | |
1251 | /* ... now save r13 */ | |
1252 | GET_SCRATCH0(r4) | |
1253 | std r4, VCPU_GPRS_TM(13)(r9) | |
1254 | /* ... and save r9 */ | |
1255 | ld r4, PACATMSCRATCH(r13) | |
1256 | std r4, VCPU_GPRS_TM(9)(r9) | |
1257 | ||
1258 | /* Reload stack pointer and TOC. */ | |
1259 | ld r1, HSTATE_HOST_R1(r13) | |
1260 | ld r2, PACATOC(r13) | |
1261 | ||
1262 | /* Set MSR RI now we have r1 and r13 back. */ | |
1263 | li r5, MSR_RI | |
1264 | mtmsrd r5, 1 | |
1265 | ||
1266 | /* Save away checkpinted SPRs. */ | |
1267 | std r31, VCPU_PPR_TM(r9) | |
1268 | std r30, VCPU_DSCR_TM(r9) | |
1269 | mflr r5 | |
1270 | mfcr r6 | |
1271 | mfctr r7 | |
1272 | mfspr r8, SPRN_AMR | |
1273 | mfspr r10, SPRN_TAR | |
1274 | std r5, VCPU_LR_TM(r9) | |
1275 | stw r6, VCPU_CR_TM(r9) | |
1276 | std r7, VCPU_CTR_TM(r9) | |
1277 | std r8, VCPU_AMR_TM(r9) | |
1278 | std r10, VCPU_TAR_TM(r9) | |
1279 | ||
1280 | /* Restore r12 as trap number. */ | |
1281 | lwz r12, VCPU_TRAP(r9) | |
1282 | ||
1283 | /* Save FP/VSX. */ | |
1284 | addi r3, r9, VCPU_FPRS_TM | |
9bf163f8 | 1285 | bl store_fp_state |
0a8eccef | 1286 | addi r3, r9, VCPU_VRS_TM |
9bf163f8 | 1287 | bl store_vr_state |
0a8eccef PM |
1288 | mfspr r6, SPRN_VRSAVE |
1289 | stw r6, VCPU_VRSAVE_TM(r9) | |
1290 | 1: | |
1291 | /* | |
1292 | * We need to save these SPRs after the treclaim so that the software | |
1293 | * error code is recorded correctly in the TEXASR. Also the user may | |
1294 | * change these outside of a transaction, so they must always be | |
1295 | * context switched. | |
1296 | */ | |
1297 | mfspr r5, SPRN_TFHAR | |
1298 | mfspr r6, SPRN_TFIAR | |
1299 | mfspr r7, SPRN_TEXASR | |
1300 | std r5, VCPU_TFHAR(r9) | |
1301 | std r6, VCPU_TFIAR(r9) | |
1302 | std r7, VCPU_TEXASR(r9) | |
1303 | 2: | |
1304 | #endif | |
1305 | ||
e0b7ec05 PM |
1306 | /* Increment yield count if they have a VPA */ |
1307 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | |
1308 | cmpdi r8, 0 | |
1309 | beq 25f | |
0865a583 AG |
1310 | li r4, LPPACA_YIELDCOUNT |
1311 | LWZX_BE r3, r8, r4 | |
e0b7ec05 | 1312 | addi r3, r3, 1 |
0865a583 | 1313 | STWX_BE r3, r8, r4 |
e0b7ec05 PM |
1314 | li r3, 1 |
1315 | stb r3, VCPU_VPA_DIRTY(r9) | |
1316 | 25: | |
1317 | /* Save PMU registers if requested */ | |
1318 | /* r8 and cr0.eq are live here */ | |
9bc01a9b PM |
1319 | BEGIN_FTR_SECTION |
1320 | /* | |
1321 | * POWER8 seems to have a hardware bug where setting | |
1322 | * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] | |
1323 | * when some counters are already negative doesn't seem | |
1324 | * to cause a performance monitor alert (and hence interrupt). | |
1325 | * The effect of this is that when saving the PMU state, | |
1326 | * if there is no PMU alert pending when we read MMCR0 | |
1327 | * before freezing the counters, but one becomes pending | |
1328 | * before we read the counters, we lose it. | |
1329 | * To work around this, we need a way to freeze the counters | |
1330 | * before reading MMCR0. Normally, freezing the counters | |
1331 | * is done by writing MMCR0 (to set MMCR0[FC]) which | |
1332 | * unavoidably writes MMCR0[PMA0] as well. On POWER8, | |
1333 | * we can also freeze the counters using MMCR2, by writing | |
1334 | * 1s to all the counter freeze condition bits (there are | |
1335 | * 9 bits each for 6 counters). | |
1336 | */ | |
1337 | li r3, -1 /* set all freeze bits */ | |
1338 | clrrdi r3, r3, 10 | |
1339 | mfspr r10, SPRN_MMCR2 | |
1340 | mtspr SPRN_MMCR2, r3 | |
1341 | isync | |
1342 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e0b7ec05 PM |
1343 | li r3, 1 |
1344 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | |
1345 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | |
1346 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | |
1347 | mfspr r6, SPRN_MMCRA | |
c17b98cf | 1348 | /* Clear MMCRA in order to disable SDAR updates */ |
e0b7ec05 PM |
1349 | li r7, 0 |
1350 | mtspr SPRN_MMCRA, r7 | |
e0b7ec05 PM |
1351 | isync |
1352 | beq 21f /* if no VPA, save PMU stuff anyway */ | |
1353 | lbz r7, LPPACA_PMCINUSE(r8) | |
1354 | cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ | |
1355 | bne 21f | |
1356 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | |
1357 | b 22f | |
1358 | 21: mfspr r5, SPRN_MMCR1 | |
1359 | mfspr r7, SPRN_SIAR | |
1360 | mfspr r8, SPRN_SDAR | |
1361 | std r4, VCPU_MMCR(r9) | |
1362 | std r5, VCPU_MMCR + 8(r9) | |
1363 | std r6, VCPU_MMCR + 16(r9) | |
9bc01a9b PM |
1364 | BEGIN_FTR_SECTION |
1365 | std r10, VCPU_MMCR + 24(r9) | |
1366 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e0b7ec05 PM |
1367 | std r7, VCPU_SIAR(r9) |
1368 | std r8, VCPU_SDAR(r9) | |
1369 | mfspr r3, SPRN_PMC1 | |
1370 | mfspr r4, SPRN_PMC2 | |
1371 | mfspr r5, SPRN_PMC3 | |
1372 | mfspr r6, SPRN_PMC4 | |
1373 | mfspr r7, SPRN_PMC5 | |
1374 | mfspr r8, SPRN_PMC6 | |
e0b7ec05 PM |
1375 | stw r3, VCPU_PMC(r9) |
1376 | stw r4, VCPU_PMC + 4(r9) | |
1377 | stw r5, VCPU_PMC + 8(r9) | |
1378 | stw r6, VCPU_PMC + 12(r9) | |
1379 | stw r7, VCPU_PMC + 16(r9) | |
1380 | stw r8, VCPU_PMC + 20(r9) | |
b005255e | 1381 | BEGIN_FTR_SECTION |
b005255e MN |
1382 | mfspr r5, SPRN_SIER |
1383 | mfspr r6, SPRN_SPMC1 | |
1384 | mfspr r7, SPRN_SPMC2 | |
1385 | mfspr r8, SPRN_MMCRS | |
b005255e MN |
1386 | std r5, VCPU_SIER(r9) |
1387 | stw r6, VCPU_PMC + 24(r9) | |
1388 | stw r7, VCPU_PMC + 28(r9) | |
1389 | std r8, VCPU_MMCR + 32(r9) | |
1390 | lis r4, 0x8000 | |
1391 | mtspr SPRN_MMCRS, r4 | |
1392 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
e0b7ec05 | 1393 | 22: |
de56a948 PM |
1394 | /* Clear out SLB */ |
1395 | li r5,0 | |
1396 | slbmte r5,r5 | |
1397 | slbia | |
1398 | ptesync | |
1399 | ||
e0b7ec05 | 1400 | hdec_soon: /* r12 = trap, r13 = paca */ |
9e368f29 | 1401 | /* |
c17b98cf | 1402 | * POWER7/POWER8 guest -> host partition switch code. |
9e368f29 PM |
1403 | * We don't have to lock against tlbies but we do |
1404 | * have to coordinate the hardware threads. | |
1405 | */ | |
371fefd6 PM |
1406 | /* Increment the threads-exiting-guest count in the 0xff00 |
1407 | bits of vcore->entry_exit_count */ | |
371fefd6 PM |
1408 | ld r5,HSTATE_KVM_VCORE(r13) |
1409 | addi r6,r5,VCORE_ENTRY_EXIT | |
1410 | 41: lwarx r3,0,r6 | |
1411 | addi r0,r3,0x100 | |
1412 | stwcx. r0,0,r6 | |
1413 | bne 41b | |
f019b7ad | 1414 | isync /* order stwcx. vs. reading napping_threads */ |
371fefd6 PM |
1415 | |
1416 | /* | |
1417 | * At this point we have an interrupt that we have to pass | |
1418 | * up to the kernel or qemu; we can't handle it in real mode. | |
1419 | * Thus we have to do a partition switch, so we have to | |
1420 | * collect the other threads, if we are the first thread | |
1421 | * to take an interrupt. To do this, we set the HDEC to 0, | |
1422 | * which causes an HDEC interrupt in all threads within 2ns | |
1423 | * because the HDEC register is shared between all 4 threads. | |
1424 | * However, we don't need to bother if this is an HDEC | |
1425 | * interrupt, since the other threads will already be on their | |
1426 | * way here in that case. | |
1427 | */ | |
19ccb76a PM |
1428 | cmpwi r3,0x100 /* Are we the first here? */ |
1429 | bge 43f | |
371fefd6 PM |
1430 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER |
1431 | beq 40f | |
371fefd6 PM |
1432 | li r0,0 |
1433 | mtspr SPRN_HDEC,r0 | |
1434 | 40: | |
19ccb76a PM |
1435 | /* |
1436 | * Send an IPI to any napping threads, since an HDEC interrupt | |
1437 | * doesn't wake CPUs up from nap. | |
1438 | */ | |
1439 | lwz r3,VCORE_NAPPING_THREADS(r5) | |
e0b7ec05 | 1440 | lbz r4,HSTATE_PTID(r13) |
19ccb76a | 1441 | li r0,1 |
2f584a14 | 1442 | sld r0,r0,r4 |
19ccb76a PM |
1443 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
1444 | beq 43f | |
f019b7ad PM |
1445 | /* Order entry/exit update vs. IPIs */ |
1446 | sync | |
19ccb76a PM |
1447 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
1448 | subf r6,r4,r13 | |
1449 | 42: andi. r0,r3,1 | |
1450 | beq 44f | |
1451 | ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ | |
1452 | li r0,IPI_PRIORITY | |
54695c30 | 1453 | li r7,XICS_MFRR |
19ccb76a PM |
1454 | stbcix r0,r7,r8 /* trigger the IPI */ |
1455 | 44: srdi. r3,r3,1 | |
1456 | addi r6,r6,PACA_SIZE | |
1457 | bne 42b | |
371fefd6 | 1458 | |
e0b7ec05 | 1459 | secondary_too_late: |
371fefd6 | 1460 | /* Secondary threads wait for primary to do partition switch */ |
e0b7ec05 PM |
1461 | 43: ld r5,HSTATE_KVM_VCORE(r13) |
1462 | ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ | |
1463 | lbz r3,HSTATE_PTID(r13) | |
371fefd6 PM |
1464 | cmpwi r3,0 |
1465 | beq 15f | |
1466 | HMT_LOW | |
1467 | 13: lbz r3,VCORE_IN_GUEST(r5) | |
1468 | cmpwi r3,0 | |
1469 | bne 13b | |
1470 | HMT_MEDIUM | |
1471 | b 16f | |
1472 | ||
1473 | /* Primary thread waits for all the secondaries to exit guest */ | |
1474 | 15: lwz r3,VCORE_ENTRY_EXIT(r5) | |
1475 | srwi r0,r3,8 | |
1476 | clrldi r3,r3,56 | |
1477 | cmpw r3,r0 | |
1478 | bne 15b | |
1479 | isync | |
1480 | ||
1481 | /* Primary thread switches back to host partition */ | |
de56a948 PM |
1482 | ld r6,KVM_HOST_SDR1(r4) |
1483 | lwz r7,KVM_HOST_LPID(r4) | |
1484 | li r8,LPID_RSVD /* switch to reserved LPID */ | |
1485 | mtspr SPRN_LPID,r8 | |
1486 | ptesync | |
1487 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | |
1488 | mtspr SPRN_LPID,r7 | |
1489 | isync | |
93b0f4dc | 1490 | |
b005255e MN |
1491 | BEGIN_FTR_SECTION |
1492 | /* DPDES is shared between threads */ | |
1493 | mfspr r7, SPRN_DPDES | |
1494 | std r7, VCORE_DPDES(r5) | |
1495 | /* clear DPDES so we don't get guest doorbells in the host */ | |
1496 | li r8, 0 | |
1497 | mtspr SPRN_DPDES, r8 | |
1498 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
1499 | ||
93b0f4dc PM |
1500 | /* Subtract timebase offset from timebase */ |
1501 | ld r8,VCORE_TB_OFFSET(r5) | |
1502 | cmpdi r8,0 | |
1503 | beq 17f | |
c5fb80d3 | 1504 | mftb r6 /* current guest timebase */ |
93b0f4dc PM |
1505 | subf r8,r8,r6 |
1506 | mtspr SPRN_TBU40,r8 /* update upper 40 bits */ | |
1507 | mftb r7 /* check if lower 24 bits overflowed */ | |
1508 | clrldi r6,r6,40 | |
1509 | clrldi r7,r7,40 | |
1510 | cmpld r7,r6 | |
1511 | bge 17f | |
1512 | addis r8,r8,0x100 /* if so, increment upper 40 bits */ | |
1513 | mtspr SPRN_TBU40,r8 | |
1514 | ||
388cc6e1 PM |
1515 | /* Reset PCR */ |
1516 | 17: ld r0, VCORE_PCR(r5) | |
1517 | cmpdi r0, 0 | |
1518 | beq 18f | |
1519 | li r0, 0 | |
1520 | mtspr SPRN_PCR, r0 | |
1521 | 18: | |
93b0f4dc | 1522 | /* Signal secondary CPUs to continue */ |
371fefd6 | 1523 | stb r0,VCORE_IN_GUEST(r5) |
de56a948 PM |
1524 | lis r8,0x7fff /* MAX_INT@h */ |
1525 | mtspr SPRN_HDEC,r8 | |
1526 | ||
371fefd6 | 1527 | 16: ld r8,KVM_HOST_LPCR(r4) |
de56a948 PM |
1528 | mtspr SPRN_LPCR,r8 |
1529 | isync | |
1530 | ||
1531 | /* load host SLB entries */ | |
c17b98cf | 1532 | ld r8,PACA_SLBSHADOWPTR(r13) |
de56a948 PM |
1533 | |
1534 | .rept SLB_NUM_BOLTED | |
0865a583 AG |
1535 | li r3, SLBSHADOW_SAVEAREA |
1536 | LDX_BE r5, r8, r3 | |
1537 | addi r3, r3, 8 | |
1538 | LDX_BE r6, r8, r3 | |
de56a948 PM |
1539 | andis. r7,r5,SLB_ESID_V@h |
1540 | beq 1f | |
1541 | slbmte r6,r5 | |
1542 | 1: addi r8,r8,16 | |
1543 | .endr | |
1544 | ||
44a3add8 PM |
1545 | /* Unset guest mode */ |
1546 | li r0, KVM_GUEST_MODE_NONE | |
1547 | stb r0, HSTATE_IN_GUEST(r13) | |
1548 | ||
218309b7 PM |
1549 | ld r0, 112+PPC_LR_STKOFF(r1) |
1550 | addi r1, r1, 112 | |
1551 | mtlr r0 | |
1552 | blr | |
b4072df4 | 1553 | |
697d3899 PM |
1554 | /* |
1555 | * Check whether an HDSI is an HPTE not found fault or something else. | |
1556 | * If it is an HPTE not found fault that is due to the guest accessing | |
1557 | * a page that they have mapped but which we have paged out, then | |
1558 | * we continue on with the guest exit path. In all other cases, | |
1559 | * reflect the HDSI to the guest as a DSI. | |
1560 | */ | |
1561 | kvmppc_hdsi: | |
1562 | mfspr r4, SPRN_HDAR | |
1563 | mfspr r6, SPRN_HDSISR | |
4cf302bc PM |
1564 | /* HPTE not found fault or protection fault? */ |
1565 | andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h | |
697d3899 PM |
1566 | beq 1f /* if not, send it to the guest */ |
1567 | andi. r0, r11, MSR_DR /* data relocation enabled? */ | |
1568 | beq 3f | |
1569 | clrrdi r0, r4, 28 | |
c75df6f9 | 1570 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
697d3899 PM |
1571 | bne 1f /* if no SLB entry found */ |
1572 | 4: std r4, VCPU_FAULT_DAR(r9) | |
1573 | stw r6, VCPU_FAULT_DSISR(r9) | |
1574 | ||
1575 | /* Search the hash table. */ | |
1576 | mr r3, r9 /* vcpu pointer */ | |
342d3db7 | 1577 | li r7, 1 /* data fault */ |
b1576fec | 1578 | bl kvmppc_hpte_hv_fault |
697d3899 PM |
1579 | ld r9, HSTATE_KVM_VCPU(r13) |
1580 | ld r10, VCPU_PC(r9) | |
1581 | ld r11, VCPU_MSR(r9) | |
1582 | li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE | |
1583 | cmpdi r3, 0 /* retry the instruction */ | |
1584 | beq 6f | |
1585 | cmpdi r3, -1 /* handle in kernel mode */ | |
b4072df4 | 1586 | beq guest_exit_cont |
697d3899 PM |
1587 | cmpdi r3, -2 /* MMIO emulation; need instr word */ |
1588 | beq 2f | |
1589 | ||
1590 | /* Synthesize a DSI for the guest */ | |
1591 | ld r4, VCPU_FAULT_DAR(r9) | |
1592 | mr r6, r3 | |
1593 | 1: mtspr SPRN_DAR, r4 | |
1594 | mtspr SPRN_DSISR, r6 | |
1595 | mtspr SPRN_SRR0, r10 | |
1596 | mtspr SPRN_SRR1, r11 | |
1597 | li r10, BOOK3S_INTERRUPT_DATA_STORAGE | |
e4e38121 | 1598 | bl kvmppc_msr_interrupt |
b4072df4 | 1599 | fast_interrupt_c_return: |
697d3899 PM |
1600 | 6: ld r7, VCPU_CTR(r9) |
1601 | lwz r8, VCPU_XER(r9) | |
1602 | mtctr r7 | |
1603 | mtxer r8 | |
1604 | mr r4, r9 | |
1605 | b fast_guest_return | |
1606 | ||
1607 | 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
1608 | ld r5, KVM_VRMA_SLB_V(r5) | |
1609 | b 4b | |
1610 | ||
1611 | /* If this is for emulated MMIO, load the instruction word */ | |
1612 | 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ | |
1613 | ||
1614 | /* Set guest mode to 'jump over instruction' so if lwz faults | |
1615 | * we'll just continue at the next IP. */ | |
1616 | li r0, KVM_GUEST_MODE_SKIP | |
1617 | stb r0, HSTATE_IN_GUEST(r13) | |
1618 | ||
1619 | /* Do the access with MSR:DR enabled */ | |
1620 | mfmsr r3 | |
1621 | ori r4, r3, MSR_DR /* Enable paging for data */ | |
1622 | mtmsrd r4 | |
1623 | lwz r8, 0(r10) | |
1624 | mtmsrd r3 | |
1625 | ||
1626 | /* Store the result */ | |
1627 | stw r8, VCPU_LAST_INST(r9) | |
1628 | ||
1629 | /* Unset guest mode. */ | |
44a3add8 | 1630 | li r0, KVM_GUEST_MODE_HOST_HV |
697d3899 | 1631 | stb r0, HSTATE_IN_GUEST(r13) |
b4072df4 | 1632 | b guest_exit_cont |
de56a948 | 1633 | |
342d3db7 PM |
1634 | /* |
1635 | * Similarly for an HISI, reflect it to the guest as an ISI unless | |
1636 | * it is an HPTE not found fault for a page that we have paged out. | |
1637 | */ | |
1638 | kvmppc_hisi: | |
1639 | andis. r0, r11, SRR1_ISI_NOPT@h | |
1640 | beq 1f | |
1641 | andi. r0, r11, MSR_IR /* instruction relocation enabled? */ | |
1642 | beq 3f | |
1643 | clrrdi r0, r10, 28 | |
c75df6f9 | 1644 | PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ |
342d3db7 PM |
1645 | bne 1f /* if no SLB entry found */ |
1646 | 4: | |
1647 | /* Search the hash table. */ | |
1648 | mr r3, r9 /* vcpu pointer */ | |
1649 | mr r4, r10 | |
1650 | mr r6, r11 | |
1651 | li r7, 0 /* instruction fault */ | |
b1576fec | 1652 | bl kvmppc_hpte_hv_fault |
342d3db7 PM |
1653 | ld r9, HSTATE_KVM_VCPU(r13) |
1654 | ld r10, VCPU_PC(r9) | |
1655 | ld r11, VCPU_MSR(r9) | |
1656 | li r12, BOOK3S_INTERRUPT_H_INST_STORAGE | |
1657 | cmpdi r3, 0 /* retry the instruction */ | |
b4072df4 | 1658 | beq fast_interrupt_c_return |
342d3db7 | 1659 | cmpdi r3, -1 /* handle in kernel mode */ |
b4072df4 | 1660 | beq guest_exit_cont |
342d3db7 PM |
1661 | |
1662 | /* Synthesize an ISI for the guest */ | |
1663 | mr r11, r3 | |
1664 | 1: mtspr SPRN_SRR0, r10 | |
1665 | mtspr SPRN_SRR1, r11 | |
1666 | li r10, BOOK3S_INTERRUPT_INST_STORAGE | |
e4e38121 | 1667 | bl kvmppc_msr_interrupt |
b4072df4 | 1668 | b fast_interrupt_c_return |
342d3db7 PM |
1669 | |
1670 | 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ | |
1671 | ld r5, KVM_VRMA_SLB_V(r6) | |
1672 | b 4b | |
1673 | ||
a8606e20 PM |
1674 | /* |
1675 | * Try to handle an hcall in real mode. | |
1676 | * Returns to the guest if we handle it, or continues on up to | |
1677 | * the kernel if we can't (i.e. if we don't have a handler for | |
1678 | * it, or if the handler returns H_TOO_HARD). | |
1679 | */ | |
1680 | .globl hcall_try_real_mode | |
1681 | hcall_try_real_mode: | |
c75df6f9 | 1682 | ld r3,VCPU_GPR(R3)(r9) |
a8606e20 | 1683 | andi. r0,r11,MSR_PR |
27025a60 LPF |
1684 | /* sc 1 from userspace - reflect to guest syscall */ |
1685 | bne sc_1_fast_return | |
a8606e20 PM |
1686 | clrrdi r3,r3,2 |
1687 | cmpldi r3,hcall_real_table_end - hcall_real_table | |
b4072df4 | 1688 | bge guest_exit_cont |
699a0ea0 PM |
1689 | /* See if this hcall is enabled for in-kernel handling */ |
1690 | ld r4, VCPU_KVM(r9) | |
1691 | srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */ | |
1692 | sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */ | |
1693 | add r4, r4, r0 | |
1694 | ld r0, KVM_ENABLED_HCALLS(r4) | |
1695 | rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */ | |
1696 | srd r0, r0, r4 | |
1697 | andi. r0, r0, 1 | |
1698 | beq guest_exit_cont | |
1699 | /* Get pointer to handler, if any, and call it */ | |
a8606e20 | 1700 | LOAD_REG_ADDR(r4, hcall_real_table) |
4baa1d87 | 1701 | lwax r3,r3,r4 |
a8606e20 | 1702 | cmpwi r3,0 |
b4072df4 | 1703 | beq guest_exit_cont |
05a308c7 AB |
1704 | add r12,r3,r4 |
1705 | mtctr r12 | |
a8606e20 | 1706 | mr r3,r9 /* get vcpu pointer */ |
c75df6f9 | 1707 | ld r4,VCPU_GPR(R4)(r9) |
a8606e20 PM |
1708 | bctrl |
1709 | cmpdi r3,H_TOO_HARD | |
1710 | beq hcall_real_fallback | |
1711 | ld r4,HSTATE_KVM_VCPU(r13) | |
c75df6f9 | 1712 | std r3,VCPU_GPR(R3)(r4) |
a8606e20 PM |
1713 | ld r10,VCPU_PC(r4) |
1714 | ld r11,VCPU_MSR(r4) | |
1715 | b fast_guest_return | |
1716 | ||
27025a60 LPF |
1717 | sc_1_fast_return: |
1718 | mtspr SPRN_SRR0,r10 | |
1719 | mtspr SPRN_SRR1,r11 | |
1720 | li r10, BOOK3S_INTERRUPT_SYSCALL | |
e4e38121 | 1721 | bl kvmppc_msr_interrupt |
27025a60 LPF |
1722 | mr r4,r9 |
1723 | b fast_guest_return | |
1724 | ||
a8606e20 PM |
1725 | /* We've attempted a real mode hcall, but it's punted it back |
1726 | * to userspace. We need to restore some clobbered volatiles | |
1727 | * before resuming the pass-it-to-qemu path */ | |
1728 | hcall_real_fallback: | |
1729 | li r12,BOOK3S_INTERRUPT_SYSCALL | |
1730 | ld r9, HSTATE_KVM_VCPU(r13) | |
a8606e20 | 1731 | |
b4072df4 | 1732 | b guest_exit_cont |
a8606e20 PM |
1733 | |
1734 | .globl hcall_real_table | |
1735 | hcall_real_table: | |
1736 | .long 0 /* 0 - unused */ | |
c1fb0194 AB |
1737 | .long DOTSYM(kvmppc_h_remove) - hcall_real_table |
1738 | .long DOTSYM(kvmppc_h_enter) - hcall_real_table | |
1739 | .long DOTSYM(kvmppc_h_read) - hcall_real_table | |
a8606e20 PM |
1740 | .long 0 /* 0x10 - H_CLEAR_MOD */ |
1741 | .long 0 /* 0x14 - H_CLEAR_REF */ | |
c1fb0194 AB |
1742 | .long DOTSYM(kvmppc_h_protect) - hcall_real_table |
1743 | .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table | |
1744 | .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table | |
a8606e20 | 1745 | .long 0 /* 0x24 - H_SET_SPRG0 */ |
c1fb0194 | 1746 | .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table |
a8606e20 PM |
1747 | .long 0 /* 0x2c */ |
1748 | .long 0 /* 0x30 */ | |
1749 | .long 0 /* 0x34 */ | |
1750 | .long 0 /* 0x38 */ | |
1751 | .long 0 /* 0x3c */ | |
1752 | .long 0 /* 0x40 */ | |
1753 | .long 0 /* 0x44 */ | |
1754 | .long 0 /* 0x48 */ | |
1755 | .long 0 /* 0x4c */ | |
1756 | .long 0 /* 0x50 */ | |
1757 | .long 0 /* 0x54 */ | |
1758 | .long 0 /* 0x58 */ | |
1759 | .long 0 /* 0x5c */ | |
1760 | .long 0 /* 0x60 */ | |
e7d26f28 | 1761 | #ifdef CONFIG_KVM_XICS |
c1fb0194 AB |
1762 | .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table |
1763 | .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table | |
1764 | .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table | |
e7d26f28 | 1765 | .long 0 /* 0x70 - H_IPOLL */ |
c1fb0194 | 1766 | .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table |
e7d26f28 BH |
1767 | #else |
1768 | .long 0 /* 0x64 - H_EOI */ | |
1769 | .long 0 /* 0x68 - H_CPPR */ | |
1770 | .long 0 /* 0x6c - H_IPI */ | |
1771 | .long 0 /* 0x70 - H_IPOLL */ | |
1772 | .long 0 /* 0x74 - H_XIRR */ | |
1773 | #endif | |
a8606e20 PM |
1774 | .long 0 /* 0x78 */ |
1775 | .long 0 /* 0x7c */ | |
1776 | .long 0 /* 0x80 */ | |
1777 | .long 0 /* 0x84 */ | |
1778 | .long 0 /* 0x88 */ | |
1779 | .long 0 /* 0x8c */ | |
1780 | .long 0 /* 0x90 */ | |
1781 | .long 0 /* 0x94 */ | |
1782 | .long 0 /* 0x98 */ | |
1783 | .long 0 /* 0x9c */ | |
1784 | .long 0 /* 0xa0 */ | |
1785 | .long 0 /* 0xa4 */ | |
1786 | .long 0 /* 0xa8 */ | |
1787 | .long 0 /* 0xac */ | |
1788 | .long 0 /* 0xb0 */ | |
1789 | .long 0 /* 0xb4 */ | |
1790 | .long 0 /* 0xb8 */ | |
1791 | .long 0 /* 0xbc */ | |
1792 | .long 0 /* 0xc0 */ | |
1793 | .long 0 /* 0xc4 */ | |
1794 | .long 0 /* 0xc8 */ | |
1795 | .long 0 /* 0xcc */ | |
1796 | .long 0 /* 0xd0 */ | |
1797 | .long 0 /* 0xd4 */ | |
1798 | .long 0 /* 0xd8 */ | |
1799 | .long 0 /* 0xdc */ | |
c1fb0194 | 1800 | .long DOTSYM(kvmppc_h_cede) - hcall_real_table |
a8606e20 PM |
1801 | .long 0 /* 0xe4 */ |
1802 | .long 0 /* 0xe8 */ | |
1803 | .long 0 /* 0xec */ | |
1804 | .long 0 /* 0xf0 */ | |
1805 | .long 0 /* 0xf4 */ | |
1806 | .long 0 /* 0xf8 */ | |
1807 | .long 0 /* 0xfc */ | |
1808 | .long 0 /* 0x100 */ | |
1809 | .long 0 /* 0x104 */ | |
1810 | .long 0 /* 0x108 */ | |
1811 | .long 0 /* 0x10c */ | |
1812 | .long 0 /* 0x110 */ | |
1813 | .long 0 /* 0x114 */ | |
1814 | .long 0 /* 0x118 */ | |
1815 | .long 0 /* 0x11c */ | |
1816 | .long 0 /* 0x120 */ | |
c1fb0194 | 1817 | .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table |
8563bf52 PM |
1818 | .long 0 /* 0x128 */ |
1819 | .long 0 /* 0x12c */ | |
1820 | .long 0 /* 0x130 */ | |
c1fb0194 | 1821 | .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table |
ae2113a4 | 1822 | .globl hcall_real_table_end |
a8606e20 PM |
1823 | hcall_real_table_end: |
1824 | ||
de56a948 PM |
1825 | ignore_hdec: |
1826 | mr r4,r9 | |
1827 | b fast_guest_return | |
1828 | ||
8563bf52 PM |
1829 | _GLOBAL(kvmppc_h_set_xdabr) |
1830 | andi. r0, r5, DABRX_USER | DABRX_KERNEL | |
1831 | beq 6f | |
1832 | li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI | |
1833 | andc. r0, r5, r0 | |
1834 | beq 3f | |
1835 | 6: li r3, H_PARAMETER | |
1836 | blr | |
1837 | ||
a8606e20 | 1838 | _GLOBAL(kvmppc_h_set_dabr) |
8563bf52 PM |
1839 | li r5, DABRX_USER | DABRX_KERNEL |
1840 | 3: | |
eee7ff9d MN |
1841 | BEGIN_FTR_SECTION |
1842 | b 2f | |
1843 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
a8606e20 | 1844 | std r4,VCPU_DABR(r3) |
8563bf52 PM |
1845 | stw r5, VCPU_DABRX(r3) |
1846 | mtspr SPRN_DABRX, r5 | |
8943633c PM |
1847 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
1848 | 1: mtspr SPRN_DABR,r4 | |
1849 | mfspr r5, SPRN_DABR | |
1850 | cmpd r4, r5 | |
1851 | bne 1b | |
1852 | isync | |
a8606e20 PM |
1853 | li r3,0 |
1854 | blr | |
1855 | ||
8563bf52 PM |
1856 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ |
1857 | 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW | |
1858 | rlwimi r5, r4, 1, DAWRX_WT | |
1859 | clrrdi r4, r4, 3 | |
1860 | std r4, VCPU_DAWR(r3) | |
1861 | std r5, VCPU_DAWRX(r3) | |
1862 | mtspr SPRN_DAWR, r4 | |
1863 | mtspr SPRN_DAWRX, r5 | |
1864 | li r3, 0 | |
a8606e20 PM |
1865 | blr |
1866 | ||
19ccb76a PM |
1867 | _GLOBAL(kvmppc_h_cede) |
1868 | ori r11,r11,MSR_EE | |
1869 | std r11,VCPU_MSR(r3) | |
1870 | li r0,1 | |
1871 | stb r0,VCPU_CEDED(r3) | |
1872 | sync /* order setting ceded vs. testing prodded */ | |
1873 | lbz r5,VCPU_PRODDED(r3) | |
1874 | cmpwi r5,0 | |
04f995a5 | 1875 | bne kvm_cede_prodded |
19ccb76a PM |
1876 | li r0,0 /* set trap to 0 to say hcall is handled */ |
1877 | stw r0,VCPU_TRAP(r3) | |
1878 | li r0,H_SUCCESS | |
c75df6f9 | 1879 | std r0,VCPU_GPR(R3)(r3) |
19ccb76a PM |
1880 | |
1881 | /* | |
1882 | * Set our bit in the bitmask of napping threads unless all the | |
1883 | * other threads are already napping, in which case we send this | |
1884 | * up to the host. | |
1885 | */ | |
1886 | ld r5,HSTATE_KVM_VCORE(r13) | |
e0b7ec05 | 1887 | lbz r6,HSTATE_PTID(r13) |
19ccb76a PM |
1888 | lwz r8,VCORE_ENTRY_EXIT(r5) |
1889 | clrldi r8,r8,56 | |
1890 | li r0,1 | |
1891 | sld r0,r0,r6 | |
1892 | addi r6,r5,VCORE_NAPPING_THREADS | |
1893 | 31: lwarx r4,0,r6 | |
1894 | or r4,r4,r0 | |
c75df6f9 | 1895 | PPC_POPCNTW(R7,R4) |
19ccb76a | 1896 | cmpw r7,r8 |
04f995a5 | 1897 | bge kvm_cede_exit |
19ccb76a PM |
1898 | stwcx. r4,0,r6 |
1899 | bne 31b | |
f019b7ad PM |
1900 | /* order napping_threads update vs testing entry_exit_count */ |
1901 | isync | |
e0b7ec05 | 1902 | li r0,NAPPING_CEDE |
19ccb76a | 1903 | stb r0,HSTATE_NAPPING(r13) |
19ccb76a PM |
1904 | lwz r7,VCORE_ENTRY_EXIT(r5) |
1905 | cmpwi r7,0x100 | |
1906 | bge 33f /* another thread already exiting */ | |
1907 | ||
1908 | /* | |
1909 | * Although not specifically required by the architecture, POWER7 | |
1910 | * preserves the following registers in nap mode, even if an SMT mode | |
1911 | * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, | |
1912 | * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. | |
1913 | */ | |
1914 | /* Save non-volatile GPRs */ | |
c75df6f9 MN |
1915 | std r14, VCPU_GPR(R14)(r3) |
1916 | std r15, VCPU_GPR(R15)(r3) | |
1917 | std r16, VCPU_GPR(R16)(r3) | |
1918 | std r17, VCPU_GPR(R17)(r3) | |
1919 | std r18, VCPU_GPR(R18)(r3) | |
1920 | std r19, VCPU_GPR(R19)(r3) | |
1921 | std r20, VCPU_GPR(R20)(r3) | |
1922 | std r21, VCPU_GPR(R21)(r3) | |
1923 | std r22, VCPU_GPR(R22)(r3) | |
1924 | std r23, VCPU_GPR(R23)(r3) | |
1925 | std r24, VCPU_GPR(R24)(r3) | |
1926 | std r25, VCPU_GPR(R25)(r3) | |
1927 | std r26, VCPU_GPR(R26)(r3) | |
1928 | std r27, VCPU_GPR(R27)(r3) | |
1929 | std r28, VCPU_GPR(R28)(r3) | |
1930 | std r29, VCPU_GPR(R29)(r3) | |
1931 | std r30, VCPU_GPR(R30)(r3) | |
1932 | std r31, VCPU_GPR(R31)(r3) | |
19ccb76a PM |
1933 | |
1934 | /* save FP state */ | |
595e4f7e | 1935 | bl kvmppc_save_fp |
19ccb76a PM |
1936 | |
1937 | /* | |
aa31e843 | 1938 | * Take a nap until a decrementer or external or doobell interrupt |
582b910e PM |
1939 | * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the |
1940 | * runlatch bit before napping. | |
19ccb76a | 1941 | */ |
582b910e PM |
1942 | mfspr r2, SPRN_CTRLF |
1943 | clrrdi r2, r2, 1 | |
1944 | mtspr SPRN_CTRLT, r2 | |
1945 | ||
f0888f70 PM |
1946 | li r0,1 |
1947 | stb r0,HSTATE_HWTHREAD_REQ(r13) | |
19ccb76a PM |
1948 | mfspr r5,SPRN_LPCR |
1949 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 | |
aa31e843 PM |
1950 | BEGIN_FTR_SECTION |
1951 | oris r5,r5,LPCR_PECEDP@h | |
1952 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |
19ccb76a PM |
1953 | mtspr SPRN_LPCR,r5 |
1954 | isync | |
1955 | li r0, 0 | |
1956 | std r0, HSTATE_SCRATCH0(r13) | |
1957 | ptesync | |
1958 | ld r0, HSTATE_SCRATCH0(r13) | |
1959 | 1: cmpd r0, r0 | |
1960 | bne 1b | |
1961 | nap | |
1962 | b . | |
1963 | ||
e3bbbbfa PM |
1964 | 33: mr r4, r3 |
1965 | li r3, 0 | |
1966 | li r12, 0 | |
1967 | b 34f | |
1968 | ||
19ccb76a | 1969 | kvm_end_cede: |
4619ac88 PM |
1970 | /* get vcpu pointer */ |
1971 | ld r4, HSTATE_KVM_VCPU(r13) | |
1972 | ||
19ccb76a PM |
1973 | /* Woken by external or decrementer interrupt */ |
1974 | ld r1, HSTATE_HOST_R1(r13) | |
19ccb76a | 1975 | |
19ccb76a PM |
1976 | /* load up FP state */ |
1977 | bl kvmppc_load_fp | |
1978 | ||
1979 | /* Load NV GPRS */ | |
c75df6f9 MN |
1980 | ld r14, VCPU_GPR(R14)(r4) |
1981 | ld r15, VCPU_GPR(R15)(r4) | |
1982 | ld r16, VCPU_GPR(R16)(r4) | |
1983 | ld r17, VCPU_GPR(R17)(r4) | |
1984 | ld r18, VCPU_GPR(R18)(r4) | |
1985 | ld r19, VCPU_GPR(R19)(r4) | |
1986 | ld r20, VCPU_GPR(R20)(r4) | |
1987 | ld r21, VCPU_GPR(R21)(r4) | |
1988 | ld r22, VCPU_GPR(R22)(r4) | |
1989 | ld r23, VCPU_GPR(R23)(r4) | |
1990 | ld r24, VCPU_GPR(R24)(r4) | |
1991 | ld r25, VCPU_GPR(R25)(r4) | |
1992 | ld r26, VCPU_GPR(R26)(r4) | |
1993 | ld r27, VCPU_GPR(R27)(r4) | |
1994 | ld r28, VCPU_GPR(R28)(r4) | |
1995 | ld r29, VCPU_GPR(R29)(r4) | |
1996 | ld r30, VCPU_GPR(R30)(r4) | |
1997 | ld r31, VCPU_GPR(R31)(r4) | |
e3bbbbfa PM |
1998 | |
1999 | /* Check the wake reason in SRR1 to see why we got here */ | |
2000 | bl kvmppc_check_wake_reason | |
19ccb76a PM |
2001 | |
2002 | /* clear our bit in vcore->napping_threads */ | |
e3bbbbfa PM |
2003 | 34: ld r5,HSTATE_KVM_VCORE(r13) |
2004 | lbz r7,HSTATE_PTID(r13) | |
19ccb76a | 2005 | li r0,1 |
e3bbbbfa | 2006 | sld r0,r0,r7 |
19ccb76a PM |
2007 | addi r6,r5,VCORE_NAPPING_THREADS |
2008 | 32: lwarx r7,0,r6 | |
2009 | andc r7,r7,r0 | |
2010 | stwcx. r7,0,r6 | |
2011 | bne 32b | |
2012 | li r0,0 | |
2013 | stb r0,HSTATE_NAPPING(r13) | |
2014 | ||
e3bbbbfa PM |
2015 | /* See if the wake reason means we need to exit */ |
2016 | stw r12, VCPU_TRAP(r4) | |
4619ac88 | 2017 | mr r9, r4 |
e3bbbbfa PM |
2018 | cmpdi r3, 0 |
2019 | bgt guest_exit_cont | |
4619ac88 | 2020 | |
19ccb76a PM |
2021 | /* see if any other thread is already exiting */ |
2022 | lwz r0,VCORE_ENTRY_EXIT(r5) | |
2023 | cmpwi r0,0x100 | |
e3bbbbfa | 2024 | bge guest_exit_cont |
19ccb76a | 2025 | |
e3bbbbfa | 2026 | b kvmppc_cede_reentry /* if not go back to guest */ |
19ccb76a PM |
2027 | |
2028 | /* cede when already previously prodded case */ | |
04f995a5 PM |
2029 | kvm_cede_prodded: |
2030 | li r0,0 | |
19ccb76a PM |
2031 | stb r0,VCPU_PRODDED(r3) |
2032 | sync /* order testing prodded vs. clearing ceded */ | |
2033 | stb r0,VCPU_CEDED(r3) | |
2034 | li r3,H_SUCCESS | |
2035 | blr | |
2036 | ||
2037 | /* we've ceded but we want to give control to the host */ | |
04f995a5 | 2038 | kvm_cede_exit: |
4619ac88 | 2039 | b hcall_real_fallback |
19ccb76a | 2040 | |
b4072df4 PM |
2041 | /* Try to handle a machine check in real mode */ |
2042 | machine_check_realmode: | |
2043 | mr r3, r9 /* get vcpu pointer */ | |
b1576fec | 2044 | bl kvmppc_realmode_machine_check |
b4072df4 | 2045 | nop |
74845bc2 | 2046 | cmpdi r3, 0 /* Did we handle MCE ? */ |
b4072df4 PM |
2047 | ld r9, HSTATE_KVM_VCPU(r13) |
2048 | li r12, BOOK3S_INTERRUPT_MACHINE_CHECK | |
74845bc2 MS |
2049 | /* |
2050 | * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through | |
2051 | * machine check interrupt (set HSRR0 to 0x200). And for handled | |
2052 | * errors (no-fatal), just go back to guest execution with current | |
2053 | * HSRR0 instead of exiting guest. This new approach will inject | |
2054 | * machine check to guest for fatal error causing guest to crash. | |
2055 | * | |
2056 | * The old code used to return to host for unhandled errors which | |
2057 | * was causing guest to hang with soft lockups inside guest and | |
2058 | * makes it difficult to recover guest instance. | |
2059 | */ | |
2060 | ld r10, VCPU_PC(r9) | |
2061 | ld r11, VCPU_MSR(r9) | |
2062 | bne 2f /* Continue guest execution. */ | |
b4072df4 PM |
2063 | /* If not, deliver a machine check. SRR0/1 are already set */ |
2064 | li r10, BOOK3S_INTERRUPT_MACHINE_CHECK | |
000a25dd | 2065 | ld r11, VCPU_MSR(r9) |
e4e38121 | 2066 | bl kvmppc_msr_interrupt |
74845bc2 | 2067 | 2: b fast_interrupt_c_return |
b4072df4 | 2068 | |
e3bbbbfa PM |
2069 | /* |
2070 | * Check the reason we woke from nap, and take appropriate action. | |
2071 | * Returns: | |
2072 | * 0 if nothing needs to be done | |
2073 | * 1 if something happened that needs to be handled by the host | |
2074 | * -1 if there was a guest wakeup (IPI) | |
2075 | * | |
2076 | * Also sets r12 to the interrupt vector for any interrupt that needs | |
2077 | * to be handled now by the host (0x500 for external interrupt), or zero. | |
2078 | */ | |
2079 | kvmppc_check_wake_reason: | |
2080 | mfspr r6, SPRN_SRR1 | |
aa31e843 PM |
2081 | BEGIN_FTR_SECTION |
2082 | rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ | |
2083 | FTR_SECTION_ELSE | |
2084 | rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ | |
2085 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) | |
2086 | cmpwi r6, 8 /* was it an external interrupt? */ | |
e3bbbbfa PM |
2087 | li r12, BOOK3S_INTERRUPT_EXTERNAL |
2088 | beq kvmppc_read_intr /* if so, see what it was */ | |
2089 | li r3, 0 | |
2090 | li r12, 0 | |
2091 | cmpwi r6, 6 /* was it the decrementer? */ | |
2092 | beq 0f | |
aa31e843 PM |
2093 | BEGIN_FTR_SECTION |
2094 | cmpwi r6, 5 /* privileged doorbell? */ | |
2095 | beq 0f | |
5d00f66b PM |
2096 | cmpwi r6, 3 /* hypervisor doorbell? */ |
2097 | beq 3f | |
aa31e843 | 2098 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
e3bbbbfa PM |
2099 | li r3, 1 /* anything else, return 1 */ |
2100 | 0: blr | |
2101 | ||
5d00f66b PM |
2102 | /* hypervisor doorbell */ |
2103 | 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL | |
2104 | li r3, 1 | |
2105 | blr | |
2106 | ||
c934243c PM |
2107 | /* |
2108 | * Determine what sort of external interrupt is pending (if any). | |
2109 | * Returns: | |
2110 | * 0 if no interrupt is pending | |
2111 | * 1 if an interrupt is pending that needs to be handled by the host | |
2112 | * -1 if there was a guest wakeup IPI (which has now been cleared) | |
2113 | */ | |
2114 | kvmppc_read_intr: | |
2115 | /* see if a host IPI is pending */ | |
2116 | li r3, 1 | |
2117 | lbz r0, HSTATE_HOST_IPI(r13) | |
2118 | cmpwi r0, 0 | |
2119 | bne 1f | |
371fefd6 | 2120 | |
c934243c PM |
2121 | /* Now read the interrupt from the ICP */ |
2122 | ld r6, HSTATE_XICS_PHYS(r13) | |
19ccb76a | 2123 | li r7, XICS_XIRR |
c934243c PM |
2124 | cmpdi r6, 0 |
2125 | beq- 1f | |
2126 | lwzcix r0, r6, r7 | |
76d072fb AG |
2127 | /* |
2128 | * Save XIRR for later. Since we get in in reverse endian on LE | |
2129 | * systems, save it byte reversed and fetch it back in host endian. | |
2130 | */ | |
2131 | li r3, HSTATE_SAVED_XIRR | |
2132 | STWX_BE r0, r3, r13 | |
2133 | #ifdef __LITTLE_ENDIAN__ | |
2134 | lwz r3, HSTATE_SAVED_XIRR(r13) | |
2135 | #else | |
2136 | mr r3, r0 | |
2137 | #endif | |
2138 | rlwinm. r3, r3, 0, 0xffffff | |
19ccb76a | 2139 | sync |
c934243c | 2140 | beq 1f /* if nothing pending in the ICP */ |
371fefd6 | 2141 | |
c934243c PM |
2142 | /* We found something in the ICP... |
2143 | * | |
2144 | * If it's not an IPI, stash it in the PACA and return to | |
2145 | * the host, we don't (yet) handle directing real external | |
2146 | * interrupts directly to the guest | |
2147 | */ | |
2148 | cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ | |
c934243c | 2149 | bne 42f |
371fefd6 | 2150 | |
c934243c PM |
2151 | /* It's an IPI, clear the MFRR and EOI it */ |
2152 | li r3, 0xff | |
2153 | li r8, XICS_MFRR | |
2154 | stbcix r3, r6, r8 /* clear the IPI */ | |
2155 | stwcix r0, r6, r7 /* EOI it */ | |
2156 | sync | |
f0888f70 | 2157 | |
c934243c PM |
2158 | /* We need to re-check host IPI now in case it got set in the |
2159 | * meantime. If it's clear, we bounce the interrupt to the | |
2160 | * guest | |
2161 | */ | |
2162 | lbz r0, HSTATE_HOST_IPI(r13) | |
2163 | cmpwi r0, 0 | |
2164 | bne- 43f | |
2165 | ||
2166 | /* OK, it's an IPI for us */ | |
2167 | li r3, -1 | |
2168 | 1: blr | |
2169 | ||
76d072fb AG |
2170 | 42: /* It's not an IPI and it's for the host. We saved a copy of XIRR in |
2171 | * the PACA earlier, it will be picked up by the host ICP driver | |
c934243c | 2172 | */ |
e3bbbbfa | 2173 | li r3, 1 |
c934243c PM |
2174 | b 1b |
2175 | ||
2176 | 43: /* We raced with the host, we need to resend that IPI, bummer */ | |
2177 | li r0, IPI_PRIORITY | |
2178 | stbcix r0, r6, r8 /* set the IPI */ | |
2179 | sync | |
e3bbbbfa | 2180 | li r3, 1 |
c934243c | 2181 | b 1b |
371fefd6 | 2182 | |
de56a948 PM |
2183 | /* |
2184 | * Save away FP, VMX and VSX registers. | |
2185 | * r3 = vcpu pointer | |
595e4f7e PM |
2186 | * N.B. r30 and r31 are volatile across this function, |
2187 | * thus it is not callable from C. | |
a8606e20 | 2188 | */ |
595e4f7e PM |
2189 | kvmppc_save_fp: |
2190 | mflr r30 | |
2191 | mr r31,r3 | |
8943633c PM |
2192 | mfmsr r5 |
2193 | ori r8,r5,MSR_FP | |
de56a948 PM |
2194 | #ifdef CONFIG_ALTIVEC |
2195 | BEGIN_FTR_SECTION | |
2196 | oris r8,r8,MSR_VEC@h | |
2197 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
2198 | #endif | |
2199 | #ifdef CONFIG_VSX | |
2200 | BEGIN_FTR_SECTION | |
2201 | oris r8,r8,MSR_VSX@h | |
2202 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
2203 | #endif | |
2204 | mtmsrd r8 | |
595e4f7e | 2205 | addi r3,r3,VCPU_FPRS |
9bf163f8 | 2206 | bl store_fp_state |
de56a948 PM |
2207 | #ifdef CONFIG_ALTIVEC |
2208 | BEGIN_FTR_SECTION | |
595e4f7e | 2209 | addi r3,r31,VCPU_VRS |
9bf163f8 | 2210 | bl store_vr_state |
de56a948 PM |
2211 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2212 | #endif | |
2213 | mfspr r6,SPRN_VRSAVE | |
e724f080 | 2214 | stw r6,VCPU_VRSAVE(r31) |
595e4f7e | 2215 | mtlr r30 |
de56a948 PM |
2216 | blr |
2217 | ||
2218 | /* | |
2219 | * Load up FP, VMX and VSX registers | |
2220 | * r4 = vcpu pointer | |
595e4f7e PM |
2221 | * N.B. r30 and r31 are volatile across this function, |
2222 | * thus it is not callable from C. | |
de56a948 | 2223 | */ |
de56a948 | 2224 | kvmppc_load_fp: |
595e4f7e PM |
2225 | mflr r30 |
2226 | mr r31,r4 | |
de56a948 PM |
2227 | mfmsr r9 |
2228 | ori r8,r9,MSR_FP | |
2229 | #ifdef CONFIG_ALTIVEC | |
2230 | BEGIN_FTR_SECTION | |
2231 | oris r8,r8,MSR_VEC@h | |
2232 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
2233 | #endif | |
2234 | #ifdef CONFIG_VSX | |
2235 | BEGIN_FTR_SECTION | |
2236 | oris r8,r8,MSR_VSX@h | |
2237 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
2238 | #endif | |
2239 | mtmsrd r8 | |
595e4f7e | 2240 | addi r3,r4,VCPU_FPRS |
9bf163f8 | 2241 | bl load_fp_state |
de56a948 PM |
2242 | #ifdef CONFIG_ALTIVEC |
2243 | BEGIN_FTR_SECTION | |
595e4f7e | 2244 | addi r3,r31,VCPU_VRS |
9bf163f8 | 2245 | bl load_vr_state |
de56a948 PM |
2246 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2247 | #endif | |
e724f080 | 2248 | lwz r7,VCPU_VRSAVE(r31) |
de56a948 | 2249 | mtspr SPRN_VRSAVE,r7 |
595e4f7e PM |
2250 | mtlr r30 |
2251 | mr r4,r31 | |
de56a948 | 2252 | blr |
44a3add8 PM |
2253 | |
2254 | /* | |
2255 | * We come here if we get any exception or interrupt while we are | |
2256 | * executing host real mode code while in guest MMU context. | |
2257 | * For now just spin, but we should do something better. | |
2258 | */ | |
2259 | kvmppc_bad_host_intr: | |
2260 | b . | |
e4e38121 MN |
2261 | |
2262 | /* | |
2263 | * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken | |
2264 | * from VCPU_INTR_MSR and is modified based on the required TM state changes. | |
2265 | * r11 has the guest MSR value (in/out) | |
2266 | * r9 has a vcpu pointer (in) | |
2267 | * r0 is used as a scratch register | |
2268 | */ | |
2269 | kvmppc_msr_interrupt: | |
2270 | rldicl r0, r11, 64 - MSR_TS_S_LG, 62 | |
2271 | cmpwi r0, 2 /* Check if we are in transactional state.. */ | |
2272 | ld r11, VCPU_INTR_MSR(r9) | |
2273 | bne 1f | |
2274 | /* ... if transactional, change to suspended */ | |
2275 | li r0, 1 | |
2276 | 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG | |
2277 | blr | |
9bc01a9b PM |
2278 | |
2279 | /* | |
2280 | * This works around a hardware bug on POWER8E processors, where | |
2281 | * writing a 1 to the MMCR0[PMAO] bit doesn't generate a | |
2282 | * performance monitor interrupt. Instead, when we need to have | |
2283 | * an interrupt pending, we have to arrange for a counter to overflow. | |
2284 | */ | |
2285 | kvmppc_fix_pmao: | |
2286 | li r3, 0 | |
2287 | mtspr SPRN_MMCR2, r3 | |
2288 | lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h | |
2289 | ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN | |
2290 | mtspr SPRN_MMCR0, r3 | |
2291 | lis r3, 0x7fff | |
2292 | ori r3, r3, 0xffff | |
2293 | mtspr SPRN_PMC6, r3 | |
2294 | isync | |
2295 | blr |