Merge branches 'topic/sc18is602' and 'topic/rspi' of git://git.kernel.org/pub/scm...
[linux-block.git] / arch / powerpc / kvm / book3s_hv_rmhandlers.S
CommitLineData
de56a948
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
177339d7 23#include <asm/mmu.h>
de56a948 24#include <asm/page.h>
177339d7
PM
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
de56a948
PM
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
f0888f70 29#include <asm/kvm_book3s_asm.h>
b4072df4 30#include <asm/mmu-hash64.h>
de56a948 31
7ffcf8ec
AB
32#ifdef __LITTLE_ENDIAN__
33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif
35
de56a948 36/*
19ccb76a 37 * Call kvmppc_hv_entry in real mode.
de56a948
PM
38 * Must be called with interrupts hard-disabled.
39 *
40 * Input Registers:
41 *
42 * LR = return address to continue at after eventually re-enabling MMU
43 */
44_GLOBAL(kvmppc_hv_entry_trampoline)
218309b7
PM
45 mflr r0
46 std r0, PPC_LR_STKOFF(r1)
47 stdu r1, -112(r1)
de56a948 48 mfmsr r10
218309b7 49 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
de56a948
PM
50 li r0,MSR_RI
51 andc r0,r10,r0
52 li r6,MSR_IR | MSR_DR
53 andc r6,r10,r6
54 mtmsrd r0,1 /* clear RI in MSR */
55 mtsrr0 r5
56 mtsrr1 r6
57 RFI
58
218309b7
PM
59kvmppc_call_hv_entry:
60 bl kvmppc_hv_entry
61
62 /* Back from guest - restore host state and return to caller */
63
64 /* Restore host DABR and DABRX */
65 ld r5,HSTATE_DABR(r13)
66 li r6,7
67 mtspr SPRN_DABR,r5
68 mtspr SPRN_DABRX,r6
69
70 /* Restore SPRG3 */
71 ld r3,PACA_SPRG3(r13)
72 mtspr SPRN_SPRG3,r3
73
74 /*
75 * Reload DEC. HDEC interrupts were disabled when
76 * we reloaded the host's LPCR value.
77 */
78 ld r3, HSTATE_DECEXP(r13)
79 mftb r4
80 subf r4, r4, r3
81 mtspr SPRN_DEC, r4
82
83 /* Reload the host's PMU registers */
84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
85 lbz r4, LPPACA_PMCINUSE(r3)
86 cmpwi r4, 0
87 beq 23f /* skip if not */
88 lwz r3, HSTATE_PMC(r13)
89 lwz r4, HSTATE_PMC + 4(r13)
90 lwz r5, HSTATE_PMC + 8(r13)
91 lwz r6, HSTATE_PMC + 12(r13)
92 lwz r8, HSTATE_PMC + 16(r13)
93 lwz r9, HSTATE_PMC + 20(r13)
94BEGIN_FTR_SECTION
95 lwz r10, HSTATE_PMC + 24(r13)
96 lwz r11, HSTATE_PMC + 28(r13)
97END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
98 mtspr SPRN_PMC1, r3
99 mtspr SPRN_PMC2, r4
100 mtspr SPRN_PMC3, r5
101 mtspr SPRN_PMC4, r6
102 mtspr SPRN_PMC5, r8
103 mtspr SPRN_PMC6, r9
104BEGIN_FTR_SECTION
105 mtspr SPRN_PMC7, r10
106 mtspr SPRN_PMC8, r11
107END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
108 ld r3, HSTATE_MMCR(r13)
109 ld r4, HSTATE_MMCR + 8(r13)
110 ld r5, HSTATE_MMCR + 16(r13)
111 mtspr SPRN_MMCR1, r4
112 mtspr SPRN_MMCRA, r5
113 mtspr SPRN_MMCR0, r3
114 isync
11523:
116
117 /*
118 * For external and machine check interrupts, we need
119 * to call the Linux handler to process the interrupt.
120 * We do that by jumping to absolute address 0x500 for
121 * external interrupts, or the machine_check_fwnmi label
122 * for machine checks (since firmware might have patched
123 * the vector area at 0x200). The [h]rfid at the end of the
124 * handler will return to the book3s_hv_interrupts.S code.
125 * For other interrupts we do the rfid to get back
126 * to the book3s_hv_interrupts.S code here.
127 */
128 ld r8, 112+PPC_LR_STKOFF(r1)
129 addi r1, r1, 112
130 ld r7, HSTATE_HOST_MSR(r13)
131
132 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
133 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
134BEGIN_FTR_SECTION
135 beq 11f
136END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
137
138 /* RFI into the highmem handler, or branch to interrupt handler */
139 mfmsr r6
140 li r0, MSR_RI
141 andc r6, r6, r0
142 mtmsrd r6, 1 /* Clear RI in MSR */
143 mtsrr0 r8
144 mtsrr1 r7
145 beqa 0x500 /* external interrupt (PPC970) */
146 beq cr1, 13f /* machine check */
147 RFI
148
149 /* On POWER7, we have external interrupts set to use HSRR0/1 */
15011: mtspr SPRN_HSRR0, r8
151 mtspr SPRN_HSRR1, r7
152 ba 0x500
153
15413: b machine_check_fwnmi
155
371fefd6
PM
156/*
157 * We come in here when wakened from nap mode on a secondary hw thread.
158 * Relocation is off and most register values are lost.
159 * r13 points to the PACA.
160 */
161 .globl kvm_start_guest
162kvm_start_guest:
163 ld r1,PACAEMERGSP(r13)
164 subi r1,r1,STACK_FRAME_OVERHEAD
19ccb76a
PM
165 ld r2,PACATOC(r13)
166
f0888f70
PM
167 li r0,KVM_HWTHREAD_IN_KVM
168 stb r0,HSTATE_HWTHREAD_STATE(r13)
371fefd6 169
f0888f70
PM
170 /* NV GPR values from power7_idle() will no longer be valid */
171 li r0,1
172 stb r0,PACA_NAPSTATELOST(r13)
371fefd6 173
4619ac88
PM
174 /* were we napping due to cede? */
175 lbz r0,HSTATE_NAPPING(r13)
176 cmpwi r0,0
177 bne kvm_end_cede
178
179 /*
180 * We weren't napping due to cede, so this must be a secondary
181 * thread being woken up to run a guest, or being woken up due
182 * to a stray IPI. (Or due to some machine check or hypervisor
183 * maintenance interrupt while the core is in KVM.)
184 */
f0888f70
PM
185
186 /* Check the wake reason in SRR1 to see why we got here */
187 mfspr r3,SPRN_SRR1
188 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
189 cmpwi r3,4 /* was it an external interrupt? */
4619ac88
PM
190 bne 27f /* if not */
191 ld r5,HSTATE_XICS_PHYS(r13)
192 li r7,XICS_XIRR /* if it was an external interrupt, */
f0888f70 193 lwzcix r8,r5,r7 /* get and ack the interrupt */
371fefd6 194 sync
f0888f70 195 clrldi. r9,r8,40 /* get interrupt source ID. */
4619ac88
PM
196 beq 28f /* none there? */
197 cmpwi r9,XICS_IPI /* was it an IPI? */
198 bne 29f
199 li r0,0xff
200 li r6,XICS_MFRR
f0888f70 201 stbcix r0,r5,r6 /* clear IPI */
4619ac88
PM
202 stwcix r8,r5,r7 /* EOI the interrupt */
203 sync /* order loading of vcpu after that */
371fefd6 204
4619ac88 205 /* get vcpu pointer, NULL if we have no vcpu to run */
7b444c67
PM
206 ld r4,HSTATE_KVM_VCPU(r13)
207 cmpdi r4,0
f0888f70 208 /* if we have no vcpu to run, go back to sleep */
7b444c67 209 beq kvm_no_guest
218309b7 210 b 30f
f0888f70 211
4619ac88
PM
21227: /* XXX should handle hypervisor maintenance interrupts etc. here */
213 b kvm_no_guest
21428: /* SRR1 said external but ICP said nope?? */
215 b kvm_no_guest
21629: /* External non-IPI interrupt to offline secondary thread? help?? */
217 stw r8,HSTATE_SAVED_XIRR(r13)
218 b kvm_no_guest
2fde6d20 219
218309b7
PM
22030: bl kvmppc_hv_entry
221
222 /* Back from the guest, go back to nap */
223 /* Clear our vcpu pointer so we don't come back in early */
224 li r0, 0
225 std r0, HSTATE_KVM_VCPU(r13)
f019b7ad
PM
226 /*
227 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
228 * the nap_count, because once the increment to nap_count is
229 * visible we could be given another vcpu.
230 */
218309b7
PM
231 lwsync
232 /* Clear any pending IPI - we're an offline thread */
233 ld r5, HSTATE_XICS_PHYS(r13)
234 li r7, XICS_XIRR
235 lwzcix r3, r5, r7 /* ack any pending interrupt */
236 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
237 beq 37f
238 sync
239 li r0, 0xff
240 li r6, XICS_MFRR
241 stbcix r0, r5, r6 /* clear the IPI */
242 stwcix r3, r5, r7 /* EOI it */
24337: sync
244
245 /* increment the nap count and then go to nap mode */
246 ld r4, HSTATE_KVM_VCORE(r13)
247 addi r4, r4, VCORE_NAP_COUNT
218309b7
PM
24851: lwarx r3, 0, r4
249 addi r3, r3, 1
250 stwcx. r3, 0, r4
251 bne 51b
252
253kvm_no_guest:
254 li r0, KVM_HWTHREAD_IN_NAP
255 stb r0, HSTATE_HWTHREAD_STATE(r13)
256 li r3, LPCR_PECE0
257 mfspr r4, SPRN_LPCR
258 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
259 mtspr SPRN_LPCR, r4
260 isync
261 std r0, HSTATE_SCRATCH0(r13)
262 ptesync
263 ld r0, HSTATE_SCRATCH0(r13)
2641: cmpd r0, r0
265 bne 1b
266 nap
267 b .
268
269/******************************************************************************
270 * *
271 * Entry code *
272 * *
273 *****************************************************************************/
274
de56a948
PM
275.global kvmppc_hv_entry
276kvmppc_hv_entry:
277
278 /* Required state:
279 *
280 * R4 = vcpu pointer
281 * MSR = ~IR|DR
282 * R13 = PACA
283 * R1 = host R1
284 * all other volatile GPRS = free
285 */
286 mflr r0
218309b7
PM
287 std r0, PPC_LR_STKOFF(r1)
288 stdu r1, -112(r1)
de56a948 289
8943633c
PM
290 /* Set partition DABR */
291 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
292 li r5,3
293 ld r6,VCPU_DABR(r4)
294 mtspr SPRN_DABRX,r5
295 mtspr SPRN_DABR,r6
296BEGIN_FTR_SECTION
297 isync
298END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
de56a948
PM
299
300 /* Load guest PMU registers */
301 /* R4 is live here (vcpu pointer) */
302 li r3, 1
303 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
304 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
305 isync
306 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
307 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
308 lwz r6, VCPU_PMC + 8(r4)
309 lwz r7, VCPU_PMC + 12(r4)
310 lwz r8, VCPU_PMC + 16(r4)
311 lwz r9, VCPU_PMC + 20(r4)
9e368f29
PM
312BEGIN_FTR_SECTION
313 lwz r10, VCPU_PMC + 24(r4)
314 lwz r11, VCPU_PMC + 28(r4)
315END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
de56a948
PM
316 mtspr SPRN_PMC1, r3
317 mtspr SPRN_PMC2, r5
318 mtspr SPRN_PMC3, r6
319 mtspr SPRN_PMC4, r7
320 mtspr SPRN_PMC5, r8
321 mtspr SPRN_PMC6, r9
9e368f29
PM
322BEGIN_FTR_SECTION
323 mtspr SPRN_PMC7, r10
324 mtspr SPRN_PMC8, r11
325END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
de56a948
PM
326 ld r3, VCPU_MMCR(r4)
327 ld r5, VCPU_MMCR + 8(r4)
328 ld r6, VCPU_MMCR + 16(r4)
14941789
PM
329 ld r7, VCPU_SIAR(r4)
330 ld r8, VCPU_SDAR(r4)
de56a948
PM
331 mtspr SPRN_MMCR1, r5
332 mtspr SPRN_MMCRA, r6
14941789
PM
333 mtspr SPRN_SIAR, r7
334 mtspr SPRN_SDAR, r8
de56a948
PM
335 mtspr SPRN_MMCR0, r3
336 isync
337
338 /* Load up FP, VMX and VSX registers */
339 bl kvmppc_load_fp
340
c75df6f9
MN
341 ld r14, VCPU_GPR(R14)(r4)
342 ld r15, VCPU_GPR(R15)(r4)
343 ld r16, VCPU_GPR(R16)(r4)
344 ld r17, VCPU_GPR(R17)(r4)
345 ld r18, VCPU_GPR(R18)(r4)
346 ld r19, VCPU_GPR(R19)(r4)
347 ld r20, VCPU_GPR(R20)(r4)
348 ld r21, VCPU_GPR(R21)(r4)
349 ld r22, VCPU_GPR(R22)(r4)
350 ld r23, VCPU_GPR(R23)(r4)
351 ld r24, VCPU_GPR(R24)(r4)
352 ld r25, VCPU_GPR(R25)(r4)
353 ld r26, VCPU_GPR(R26)(r4)
354 ld r27, VCPU_GPR(R27)(r4)
355 ld r28, VCPU_GPR(R28)(r4)
356 ld r29, VCPU_GPR(R29)(r4)
357 ld r30, VCPU_GPR(R30)(r4)
358 ld r31, VCPU_GPR(R31)(r4)
8943633c 359
9e368f29 360BEGIN_FTR_SECTION
de56a948
PM
361 /* Switch DSCR to guest value */
362 ld r5, VCPU_DSCR(r4)
363 mtspr SPRN_DSCR, r5
9e368f29 364END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
de56a948
PM
365
366 /*
367 * Set the decrementer to the guest decrementer.
368 */
369 ld r8,VCPU_DEC_EXPIRES(r4)
370 mftb r7
371 subf r3,r7,r8
372 mtspr SPRN_DEC,r3
373 stw r3,VCPU_DEC(r4)
374
375 ld r5, VCPU_SPRG0(r4)
376 ld r6, VCPU_SPRG1(r4)
377 ld r7, VCPU_SPRG2(r4)
378 ld r8, VCPU_SPRG3(r4)
379 mtspr SPRN_SPRG0, r5
380 mtspr SPRN_SPRG1, r6
381 mtspr SPRN_SPRG2, r7
382 mtspr SPRN_SPRG3, r8
383
384 /* Save R1 in the PACA */
385 std r1, HSTATE_HOST_R1(r13)
386
387 /* Load up DAR and DSISR */
388 ld r5, VCPU_DAR(r4)
389 lwz r6, VCPU_DSISR(r4)
390 mtspr SPRN_DAR, r5
391 mtspr SPRN_DSISR, r6
392
44a3add8
PM
393 li r6, KVM_GUEST_MODE_HOST_HV
394 stb r6, HSTATE_IN_GUEST(r13)
395
9e368f29 396BEGIN_FTR_SECTION
de56a948
PM
397 /* Restore AMR and UAMOR, set AMOR to all 1s */
398 ld r5,VCPU_AMR(r4)
399 ld r6,VCPU_UAMOR(r4)
400 li r7,-1
401 mtspr SPRN_AMR,r5
402 mtspr SPRN_UAMOR,r6
403 mtspr SPRN_AMOR,r7
9e368f29 404END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
de56a948
PM
405
406 /* Clear out SLB */
407 li r6,0
408 slbmte r6,r6
409 slbia
410 ptesync
411
9e368f29
PM
412BEGIN_FTR_SECTION
413 b 30f
414END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
415 /*
416 * POWER7 host -> guest partition switch code.
417 * We don't have to lock against concurrent tlbies,
418 * but we do have to coordinate across hardware threads.
419 */
371fefd6
PM
420 /* Increment entry count iff exit count is zero. */
421 ld r5,HSTATE_KVM_VCORE(r13)
422 addi r9,r5,VCORE_ENTRY_EXIT
42321: lwarx r3,0,r9
424 cmpwi r3,0x100 /* any threads starting to exit? */
425 bge secondary_too_late /* if so we're too late to the party */
426 addi r3,r3,1
427 stwcx. r3,0,r9
428 bne 21b
429
430 /* Primary thread switches to guest partition. */
aa04b4cc 431 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
371fefd6
PM
432 lwz r6,VCPU_PTID(r4)
433 cmpwi r6,0
434 bne 20f
de56a948
PM
435 ld r6,KVM_SDR1(r9)
436 lwz r7,KVM_LPID(r9)
437 li r0,LPID_RSVD /* switch to reserved LPID */
438 mtspr SPRN_LPID,r0
439 ptesync
440 mtspr SPRN_SDR1,r6 /* switch to partition page table */
441 mtspr SPRN_LPID,r7
442 isync
1b400ba0
PM
443
444 /* See if we need to flush the TLB */
445 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
446 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
447 srdi r6,r6,6 /* doubleword number */
448 sldi r6,r6,3 /* address offset */
449 add r6,r6,r9
450 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
371fefd6 451 li r0,1
1b400ba0
PM
452 sld r0,r0,r7
453 ld r7,0(r6)
454 and. r7,r7,r0
455 beq 22f
45623: ldarx r7,0,r6 /* if set, clear the bit */
457 andc r7,r7,r0
458 stdcx. r7,0,r6
459 bne 23b
460 li r6,128 /* and flush the TLB */
461 mtctr r6
462 li r7,0x800 /* IS field = 0b10 */
463 ptesync
46428: tlbiel r7
465 addi r7,r7,0x1000
466 bdnz 28b
467 ptesync
468
93b0f4dc
PM
469 /* Add timebase offset onto timebase */
47022: ld r8,VCORE_TB_OFFSET(r5)
471 cmpdi r8,0
472 beq 37f
473 mftb r6 /* current host timebase */
474 add r8,r8,r6
475 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
476 mftb r7 /* check if lower 24 bits overflowed */
477 clrldi r6,r6,40
478 clrldi r7,r7,40
479 cmpld r7,r6
480 bge 37f
481 addis r8,r8,0x100 /* if so, increment upper 40 bits */
482 mtspr SPRN_TBU40,r8
483
388cc6e1
PM
484 /* Load guest PCR value to select appropriate compat mode */
48537: ld r7, VCORE_PCR(r5)
486 cmpdi r7, 0
487 beq 38f
488 mtspr SPRN_PCR, r7
48938:
490 li r0,1
371fefd6
PM
491 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
492 b 10f
493
494 /* Secondary threads wait for primary to have done partition switch */
49520: lbz r0,VCORE_IN_GUEST(r5)
496 cmpwi r0,0
497 beq 20b
aa04b4cc 498
19ccb76a 499 /* Set LPCR and RMOR. */
a0144e2a 50010: ld r8,VCORE_LPCR(r5)
19ccb76a 501 mtspr SPRN_LPCR,r8
aa04b4cc
PM
502 ld r8,KVM_RMOR(r9)
503 mtspr SPRN_RMOR,r8
de56a948
PM
504 isync
505
8c2dbb79
PM
506 /* Increment yield count if they have a VPA */
507 ld r3, VCPU_VPA(r4)
508 cmpdi r3, 0
509 beq 25f
510 lwz r5, LPPACA_YIELDCOUNT(r3)
511 addi r5, r5, 1
512 stw r5, LPPACA_YIELDCOUNT(r3)
513 li r6, 1
514 stb r6, VCPU_VPA_DIRTY(r4)
51525:
de56a948
PM
516 /* Check if HDEC expires soon */
517 mfspr r3,SPRN_HDEC
518 cmpwi r3,10
519 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
520 mr r9,r4
521 blt hdec_soon
522
de56a948
PM
523 /* Save purr/spurr */
524 mfspr r5,SPRN_PURR
525 mfspr r6,SPRN_SPURR
526 std r5,HSTATE_PURR(r13)
527 std r6,HSTATE_SPURR(r13)
528 ld r7,VCPU_PURR(r4)
529 ld r8,VCPU_SPURR(r4)
530 mtspr SPRN_PURR,r7
531 mtspr SPRN_SPURR,r8
9e368f29
PM
532 b 31f
533
534 /*
535 * PPC970 host -> guest partition switch code.
536 * We have to lock against concurrent tlbies,
537 * using native_tlbie_lock to lock against host tlbies
538 * and kvm->arch.tlbie_lock to lock against guest tlbies.
539 * We also have to invalidate the TLB since its
540 * entries aren't tagged with the LPID.
541 */
54230: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
543
544 /* first take native_tlbie_lock */
545 .section ".toc","aw"
546toc_tlbie_lock:
547 .tc native_tlbie_lock[TC],native_tlbie_lock
548 .previous
549 ld r3,toc_tlbie_lock@toc(2)
54bb7f4b 550#ifdef __BIG_ENDIAN__
9e368f29 551 lwz r8,PACA_LOCK_TOKEN(r13)
54bb7f4b
AB
552#else
553 lwz r8,PACAPACAINDEX(r13)
554#endif
9e368f29
PM
55524: lwarx r0,0,r3
556 cmpwi r0,0
557 bne 24b
558 stwcx. r8,0,r3
559 bne 24b
560 isync
561
a0144e2a
PM
562 ld r5,HSTATE_KVM_VCORE(r13)
563 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
9e368f29
PM
564 li r0,0x18f
565 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
566 or r0,r7,r0
567 ptesync
568 sync
569 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
570 isync
571 li r0,0
572 stw r0,0(r3) /* drop native_tlbie_lock */
573
574 /* invalidate the whole TLB */
575 li r0,256
576 mtctr r0
577 li r6,0
57825: tlbiel r6
579 addi r6,r6,0x1000
580 bdnz 25b
581 ptesync
582
583 /* Take the guest's tlbie_lock */
584 addi r3,r9,KVM_TLBIE_LOCK
58524: lwarx r0,0,r3
586 cmpwi r0,0
587 bne 24b
588 stwcx. r8,0,r3
589 bne 24b
590 isync
591 ld r6,KVM_SDR1(r9)
592 mtspr SPRN_SDR1,r6 /* switch to partition page table */
593
594 /* Set up HID4 with the guest's LPID etc. */
595 sync
596 mtspr SPRN_HID4,r7
597 isync
598
599 /* drop the guest's tlbie_lock */
600 li r0,0
601 stw r0,0(r3)
602
603 /* Check if HDEC expires soon */
604 mfspr r3,SPRN_HDEC
605 cmpwi r3,10
606 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
607 mr r9,r4
608 blt hdec_soon
609
610 /* Enable HDEC interrupts */
611 mfspr r0,SPRN_HID0
612 li r3,1
613 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
614 sync
615 mtspr SPRN_HID0,r0
616 mfspr r0,SPRN_HID0
617 mfspr r0,SPRN_HID0
618 mfspr r0,SPRN_HID0
619 mfspr r0,SPRN_HID0
620 mfspr r0,SPRN_HID0
621 mfspr r0,SPRN_HID0
de56a948
PM
622
623 /* Load up guest SLB entries */
9e368f29 62431: lwz r5,VCPU_SLB_MAX(r4)
de56a948
PM
625 cmpwi r5,0
626 beq 9f
627 mtctr r5
628 addi r6,r4,VCPU_SLB
6291: ld r8,VCPU_SLB_E(r6)
630 ld r9,VCPU_SLB_V(r6)
631 slbmte r9,r8
632 addi r6,r6,VCPU_SLB_SIZE
633 bdnz 1b
6349:
635
636 /* Restore state of CTRL run bit; assume 1 on entry */
637 lwz r5,VCPU_CTRL(r4)
638 andi. r5,r5,1
639 bne 4f
640 mfspr r6,SPRN_CTRLF
641 clrrdi r6,r6,1
642 mtspr SPRN_CTRLT,r6
6434:
644 ld r6, VCPU_CTR(r4)
645 lwz r7, VCPU_XER(r4)
646
647 mtctr r6
648 mtxer r7
649
4619ac88
PM
650 ld r10, VCPU_PC(r4)
651 ld r11, VCPU_MSR(r4)
19ccb76a 652kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
de56a948
PM
653 ld r6, VCPU_SRR0(r4)
654 ld r7, VCPU_SRR1(r4)
de56a948 655
4619ac88 656 /* r11 = vcpu->arch.msr & ~MSR_HV */
de56a948
PM
657 rldicl r11, r11, 63 - MSR_HV_LG, 1
658 rotldi r11, r11, 1 + MSR_HV_LG
659 ori r11, r11, MSR_ME
660
19ccb76a
PM
661 /* Check if we can deliver an external or decrementer interrupt now */
662 ld r0,VCPU_PENDING_EXC(r4)
4619ac88 663 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
19ccb76a
PM
664 and r0,r0,r8
665 cmpdi cr1,r0,0
666 andi. r0,r11,MSR_EE
667 beq cr1,11f
668BEGIN_FTR_SECTION
669 mfspr r8,SPRN_LPCR
670 ori r8,r8,LPCR_MER
671 mtspr SPRN_LPCR,r8
672 isync
673END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
674 beq 5f
675 li r0,BOOK3S_INTERRUPT_EXTERNAL
67612: mr r6,r10
677 mr r10,r0
678 mr r7,r11
679 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
680 rotldi r11,r11,63
681 b 5f
68211: beq 5f
683 mfspr r0,SPRN_DEC
684 cmpwi r0,0
685 li r0,BOOK3S_INTERRUPT_DECREMENTER
686 blt 12b
687
688 /* Move SRR0 and SRR1 into the respective regs */
6895: mtspr SPRN_SRR0, r6
690 mtspr SPRN_SRR1, r7
19ccb76a 691
de56a948 692fast_guest_return:
4619ac88
PM
693 li r0,0
694 stb r0,VCPU_CEDED(r4) /* cancel cede */
de56a948
PM
695 mtspr SPRN_HSRR0,r10
696 mtspr SPRN_HSRR1,r11
697
698 /* Activate guest mode, so faults get handled by KVM */
44a3add8 699 li r9, KVM_GUEST_MODE_GUEST_HV
de56a948
PM
700 stb r9, HSTATE_IN_GUEST(r13)
701
702 /* Enter guest */
703
0acb9111
PM
704BEGIN_FTR_SECTION
705 ld r5, VCPU_CFAR(r4)
706 mtspr SPRN_CFAR, r5
707END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
4b8473c9
PM
708BEGIN_FTR_SECTION
709 ld r0, VCPU_PPR(r4)
710END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0acb9111 711
de56a948
PM
712 ld r5, VCPU_LR(r4)
713 lwz r6, VCPU_CR(r4)
714 mtlr r5
715 mtcr r6
716
c75df6f9
MN
717 ld r1, VCPU_GPR(R1)(r4)
718 ld r2, VCPU_GPR(R2)(r4)
719 ld r3, VCPU_GPR(R3)(r4)
720 ld r5, VCPU_GPR(R5)(r4)
721 ld r6, VCPU_GPR(R6)(r4)
722 ld r7, VCPU_GPR(R7)(r4)
723 ld r8, VCPU_GPR(R8)(r4)
724 ld r9, VCPU_GPR(R9)(r4)
725 ld r10, VCPU_GPR(R10)(r4)
726 ld r11, VCPU_GPR(R11)(r4)
727 ld r12, VCPU_GPR(R12)(r4)
728 ld r13, VCPU_GPR(R13)(r4)
729
4b8473c9
PM
730BEGIN_FTR_SECTION
731 mtspr SPRN_PPR, r0
732END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
733 ld r0, VCPU_GPR(R0)(r4)
c75df6f9 734 ld r4, VCPU_GPR(R4)(r4)
de56a948
PM
735
736 hrfid
737 b .
738
739/******************************************************************************
740 * *
741 * Exit code *
742 * *
743 *****************************************************************************/
744
745/*
746 * We come here from the first-level interrupt handlers.
747 */
dd96b2c2
AK
748 .globl kvmppc_interrupt_hv
749kvmppc_interrupt_hv:
de56a948
PM
750 /*
751 * Register contents:
752 * R12 = interrupt vector
753 * R13 = PACA
754 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
755 * guest R13 saved in SPRN_SCRATCH0
756 */
36e7bb38 757 std r9, HSTATE_SCRATCH2(r13)
44a3add8
PM
758
759 lbz r9, HSTATE_IN_GUEST(r13)
760 cmpwi r9, KVM_GUEST_MODE_HOST_HV
761 beq kvmppc_bad_host_intr
dd96b2c2
AK
762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
763 cmpwi r9, KVM_GUEST_MODE_GUEST
36e7bb38 764 ld r9, HSTATE_SCRATCH2(r13)
dd96b2c2
AK
765 beq kvmppc_interrupt_pr
766#endif
44a3add8
PM
767 /* We're now back in the host but in guest MMU context */
768 li r9, KVM_GUEST_MODE_HOST_HV
769 stb r9, HSTATE_IN_GUEST(r13)
770
de56a948
PM
771 ld r9, HSTATE_KVM_VCPU(r13)
772
773 /* Save registers */
774
c75df6f9
MN
775 std r0, VCPU_GPR(R0)(r9)
776 std r1, VCPU_GPR(R1)(r9)
777 std r2, VCPU_GPR(R2)(r9)
778 std r3, VCPU_GPR(R3)(r9)
779 std r4, VCPU_GPR(R4)(r9)
780 std r5, VCPU_GPR(R5)(r9)
781 std r6, VCPU_GPR(R6)(r9)
782 std r7, VCPU_GPR(R7)(r9)
783 std r8, VCPU_GPR(R8)(r9)
36e7bb38 784 ld r0, HSTATE_SCRATCH2(r13)
c75df6f9
MN
785 std r0, VCPU_GPR(R9)(r9)
786 std r10, VCPU_GPR(R10)(r9)
787 std r11, VCPU_GPR(R11)(r9)
de56a948
PM
788 ld r3, HSTATE_SCRATCH0(r13)
789 lwz r4, HSTATE_SCRATCH1(r13)
c75df6f9 790 std r3, VCPU_GPR(R12)(r9)
de56a948 791 stw r4, VCPU_CR(r9)
0acb9111
PM
792BEGIN_FTR_SECTION
793 ld r3, HSTATE_CFAR(r13)
794 std r3, VCPU_CFAR(r9)
795END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
4b8473c9
PM
796BEGIN_FTR_SECTION
797 ld r4, HSTATE_PPR(r13)
798 std r4, VCPU_PPR(r9)
799END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
de56a948
PM
800
801 /* Restore R1/R2 so we can handle faults */
802 ld r1, HSTATE_HOST_R1(r13)
803 ld r2, PACATOC(r13)
804
805 mfspr r10, SPRN_SRR0
806 mfspr r11, SPRN_SRR1
807 std r10, VCPU_SRR0(r9)
808 std r11, VCPU_SRR1(r9)
809 andi. r0, r12, 2 /* need to read HSRR0/1? */
810 beq 1f
811 mfspr r10, SPRN_HSRR0
812 mfspr r11, SPRN_HSRR1
813 clrrdi r12, r12, 2
8141: std r10, VCPU_PC(r9)
815 std r11, VCPU_MSR(r9)
816
817 GET_SCRATCH0(r3)
818 mflr r4
c75df6f9 819 std r3, VCPU_GPR(R13)(r9)
de56a948
PM
820 std r4, VCPU_LR(r9)
821
de56a948
PM
822 stw r12,VCPU_TRAP(r9)
823
697d3899
PM
824 /* Save HEIR (HV emulation assist reg) in last_inst
825 if this is an HEI (HV emulation interrupt, e40) */
826 li r3,KVM_INST_FETCH_FAILED
827BEGIN_FTR_SECTION
828 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
829 bne 11f
830 mfspr r3,SPRN_HEIR
831END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
83211: stw r3,VCPU_LAST_INST(r9)
833
834 /* these are volatile across C function calls */
835 mfctr r3
836 mfxer r4
837 std r3, VCPU_CTR(r9)
838 stw r4, VCPU_XER(r9)
839
840BEGIN_FTR_SECTION
841 /* If this is a page table miss then see if it's theirs or ours */
842 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
843 beq kvmppc_hdsi
342d3db7
PM
844 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
845 beq kvmppc_hisi
697d3899
PM
846END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
847
de56a948
PM
848 /* See if this is a leftover HDEC interrupt */
849 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
850 bne 2f
851 mfspr r3,SPRN_HDEC
852 cmpwi r3,0
853 bge ignore_hdec
8542:
697d3899 855 /* See if this is an hcall we can handle in real mode */
a8606e20
PM
856 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
857 beq hcall_try_real_mode
de56a948 858
54695c30 859 /* Only handle external interrupts here on arch 206 and later */
9e368f29 860BEGIN_FTR_SECTION
54695c30
BH
861 b ext_interrupt_to_host
862END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
863
864 /* External interrupt ? */
865 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
866 bne+ ext_interrupt_to_host
867
868 /* External interrupt, first check for host_ipi. If this is
869 * set, we know the host wants us out so let's do it now
870 */
4619ac88 871do_ext_interrupt:
c934243c
PM
872 bl kvmppc_read_intr
873 cmpdi r3, 0
874 bgt ext_interrupt_to_host
54695c30
BH
875
876 /* Allright, looks like an IPI for the guest, we need to set MER */
4619ac88
PM
877 /* Check if any CPU is heading out to the host, if so head out too */
878 ld r5, HSTATE_KVM_VCORE(r13)
879 lwz r0, VCORE_ENTRY_EXIT(r5)
880 cmpwi r0, 0x100
881 bge ext_interrupt_to_host
882
883 /* See if there is a pending interrupt for the guest */
884 mfspr r8, SPRN_LPCR
885 ld r0, VCPU_PENDING_EXC(r9)
886 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
887 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
888 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
889 beq 2f
54695c30
BH
890
891 /* And if the guest EE is set, we can deliver immediately, else
892 * we return to the guest with MER set
893 */
894 andi. r0, r11, MSR_EE
4619ac88
PM
895 beq 2f
896 mtspr SPRN_SRR0, r10
897 mtspr SPRN_SRR1, r11
898 li r10, BOOK3S_INTERRUPT_EXTERNAL
899 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
900 rotldi r11, r11, 63
9012: mr r4, r9
902 mtspr SPRN_LPCR, r8
54695c30
BH
903 b fast_guest_return
904
54695c30 905ext_interrupt_to_host:
de56a948 906
b4072df4 907guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
de56a948 908 /* Save more register state */
de56a948
PM
909 mfdar r6
910 mfdsisr r7
de56a948
PM
911 std r6, VCPU_DAR(r9)
912 stw r7, VCPU_DSISR(r9)
9e368f29 913BEGIN_FTR_SECTION
697d3899 914 /* don't overwrite fault_dar/fault_dsisr if HDSI */
de56a948
PM
915 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
916 beq 6f
9e368f29 917END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
697d3899 918 std r6, VCPU_FAULT_DAR(r9)
de56a948
PM
919 stw r7, VCPU_FAULT_DSISR(r9)
920
b4072df4
PM
921 /* See if it is a machine check */
922 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
923 beq machine_check_realmode
924mc_cont:
925
de56a948 926 /* Save guest CTRL register, set runlatch to 1 */
697d3899 9276: mfspr r6,SPRN_CTRLF
de56a948
PM
928 stw r6,VCPU_CTRL(r9)
929 andi. r0,r6,1
930 bne 4f
931 ori r6,r6,1
932 mtspr SPRN_CTRLT,r6
9334:
934 /* Read the guest SLB and save it away */
935 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
936 mtctr r0
937 li r6,0
938 addi r7,r9,VCPU_SLB
939 li r5,0
9401: slbmfee r8,r6
941 andis. r0,r8,SLB_ESID_V@h
942 beq 2f
943 add r8,r8,r6 /* put index in */
944 slbmfev r3,r6
945 std r8,VCPU_SLB_E(r7)
946 std r3,VCPU_SLB_V(r7)
947 addi r7,r7,VCPU_SLB_SIZE
948 addi r5,r5,1
9492: addi r6,r6,1
950 bdnz 1b
951 stw r5,VCPU_SLB_MAX(r9)
952
953 /*
954 * Save the guest PURR/SPURR
955 */
9e368f29 956BEGIN_FTR_SECTION
de56a948
PM
957 mfspr r5,SPRN_PURR
958 mfspr r6,SPRN_SPURR
959 ld r7,VCPU_PURR(r9)
960 ld r8,VCPU_SPURR(r9)
961 std r5,VCPU_PURR(r9)
962 std r6,VCPU_SPURR(r9)
963 subf r5,r7,r5
964 subf r6,r8,r6
965
966 /*
967 * Restore host PURR/SPURR and add guest times
968 * so that the time in the guest gets accounted.
969 */
970 ld r3,HSTATE_PURR(r13)
971 ld r4,HSTATE_SPURR(r13)
972 add r3,r3,r5
973 add r4,r4,r6
974 mtspr SPRN_PURR,r3
975 mtspr SPRN_SPURR,r4
9e368f29 976END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
de56a948
PM
977
978 /* Clear out SLB */
979 li r5,0
980 slbmte r5,r5
981 slbia
982 ptesync
983
19ccb76a 984hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
9e368f29
PM
985BEGIN_FTR_SECTION
986 b 32f
987END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
988 /*
989 * POWER7 guest -> host partition switch code.
990 * We don't have to lock against tlbies but we do
991 * have to coordinate the hardware threads.
992 */
371fefd6
PM
993 /* Increment the threads-exiting-guest count in the 0xff00
994 bits of vcore->entry_exit_count */
371fefd6
PM
995 ld r5,HSTATE_KVM_VCORE(r13)
996 addi r6,r5,VCORE_ENTRY_EXIT
99741: lwarx r3,0,r6
998 addi r0,r3,0x100
999 stwcx. r0,0,r6
1000 bne 41b
f019b7ad 1001 isync /* order stwcx. vs. reading napping_threads */
371fefd6
PM
1002
1003 /*
1004 * At this point we have an interrupt that we have to pass
1005 * up to the kernel or qemu; we can't handle it in real mode.
1006 * Thus we have to do a partition switch, so we have to
1007 * collect the other threads, if we are the first thread
1008 * to take an interrupt. To do this, we set the HDEC to 0,
1009 * which causes an HDEC interrupt in all threads within 2ns
1010 * because the HDEC register is shared between all 4 threads.
1011 * However, we don't need to bother if this is an HDEC
1012 * interrupt, since the other threads will already be on their
1013 * way here in that case.
1014 */
19ccb76a
PM
1015 cmpwi r3,0x100 /* Are we the first here? */
1016 bge 43f
1017 cmpwi r3,1 /* Are any other threads in the guest? */
1018 ble 43f
371fefd6
PM
1019 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1020 beq 40f
371fefd6
PM
1021 li r0,0
1022 mtspr SPRN_HDEC,r0
102340:
19ccb76a
PM
1024 /*
1025 * Send an IPI to any napping threads, since an HDEC interrupt
1026 * doesn't wake CPUs up from nap.
1027 */
1028 lwz r3,VCORE_NAPPING_THREADS(r5)
1029 lwz r4,VCPU_PTID(r9)
1030 li r0,1
2f584a14 1031 sld r0,r0,r4
19ccb76a
PM
1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1033 beq 43f
f019b7ad
PM
1034 /* Order entry/exit update vs. IPIs */
1035 sync
19ccb76a
PM
1036 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1037 subf r6,r4,r13
103842: andi. r0,r3,1
1039 beq 44f
1040 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1041 li r0,IPI_PRIORITY
54695c30 1042 li r7,XICS_MFRR
19ccb76a
PM
1043 stbcix r0,r7,r8 /* trigger the IPI */
104444: srdi. r3,r3,1
1045 addi r6,r6,PACA_SIZE
1046 bne 42b
371fefd6
PM
1047
1048 /* Secondary threads wait for primary to do partition switch */
19ccb76a 104943: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
371fefd6
PM
1050 ld r5,HSTATE_KVM_VCORE(r13)
1051 lwz r3,VCPU_PTID(r9)
1052 cmpwi r3,0
1053 beq 15f
1054 HMT_LOW
105513: lbz r3,VCORE_IN_GUEST(r5)
1056 cmpwi r3,0
1057 bne 13b
1058 HMT_MEDIUM
1059 b 16f
1060
1061 /* Primary thread waits for all the secondaries to exit guest */
106215: lwz r3,VCORE_ENTRY_EXIT(r5)
1063 srwi r0,r3,8
1064 clrldi r3,r3,56
1065 cmpw r3,r0
1066 bne 15b
1067 isync
1068
1069 /* Primary thread switches back to host partition */
de56a948
PM
1070 ld r6,KVM_HOST_SDR1(r4)
1071 lwz r7,KVM_HOST_LPID(r4)
1072 li r8,LPID_RSVD /* switch to reserved LPID */
1073 mtspr SPRN_LPID,r8
1074 ptesync
1075 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1076 mtspr SPRN_LPID,r7
1077 isync
93b0f4dc
PM
1078
1079 /* Subtract timebase offset from timebase */
1080 ld r8,VCORE_TB_OFFSET(r5)
1081 cmpdi r8,0
1082 beq 17f
1083 mftb r6 /* current host timebase */
1084 subf r8,r8,r6
1085 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1086 mftb r7 /* check if lower 24 bits overflowed */
1087 clrldi r6,r6,40
1088 clrldi r7,r7,40
1089 cmpld r7,r6
1090 bge 17f
1091 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1092 mtspr SPRN_TBU40,r8
1093
388cc6e1
PM
1094 /* Reset PCR */
109517: ld r0, VCORE_PCR(r5)
1096 cmpdi r0, 0
1097 beq 18f
1098 li r0, 0
1099 mtspr SPRN_PCR, r0
110018:
93b0f4dc 1101 /* Signal secondary CPUs to continue */
371fefd6 1102 stb r0,VCORE_IN_GUEST(r5)
de56a948
PM
1103 lis r8,0x7fff /* MAX_INT@h */
1104 mtspr SPRN_HDEC,r8
1105
371fefd6 110616: ld r8,KVM_HOST_LPCR(r4)
de56a948
PM
1107 mtspr SPRN_LPCR,r8
1108 isync
9e368f29
PM
1109 b 33f
1110
1111 /*
1112 * PPC970 guest -> host partition switch code.
1113 * We have to lock against concurrent tlbies, and
1114 * we have to flush the whole TLB.
1115 */
111632: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
1117
1118 /* Take the guest's tlbie_lock */
54bb7f4b 1119#ifdef __BIG_ENDIAN__
9e368f29 1120 lwz r8,PACA_LOCK_TOKEN(r13)
54bb7f4b
AB
1121#else
1122 lwz r8,PACAPACAINDEX(r13)
1123#endif
9e368f29
PM
1124 addi r3,r4,KVM_TLBIE_LOCK
112524: lwarx r0,0,r3
1126 cmpwi r0,0
1127 bne 24b
1128 stwcx. r8,0,r3
1129 bne 24b
1130 isync
1131
1132 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1133 li r0,0x18f
1134 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1135 or r0,r7,r0
1136 ptesync
1137 sync
1138 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1139 isync
1140 li r0,0
1141 stw r0,0(r3) /* drop guest tlbie_lock */
1142
1143 /* invalidate the whole TLB */
1144 li r0,256
1145 mtctr r0
1146 li r6,0
114725: tlbiel r6
1148 addi r6,r6,0x1000
1149 bdnz 25b
1150 ptesync
1151
1152 /* take native_tlbie_lock */
1153 ld r3,toc_tlbie_lock@toc(2)
115424: lwarx r0,0,r3
1155 cmpwi r0,0
1156 bne 24b
1157 stwcx. r8,0,r3
1158 bne 24b
1159 isync
1160
1161 ld r6,KVM_HOST_SDR1(r4)
1162 mtspr SPRN_SDR1,r6 /* switch to host page table */
1163
1164 /* Set up host HID4 value */
1165 sync
1166 mtspr SPRN_HID4,r7
1167 isync
1168 li r0,0
1169 stw r0,0(r3) /* drop native_tlbie_lock */
1170
1171 lis r8,0x7fff /* MAX_INT@h */
1172 mtspr SPRN_HDEC,r8
1173
1174 /* Disable HDEC interrupts */
1175 mfspr r0,SPRN_HID0
1176 li r3,0
1177 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1178 sync
1179 mtspr SPRN_HID0,r0
1180 mfspr r0,SPRN_HID0
1181 mfspr r0,SPRN_HID0
1182 mfspr r0,SPRN_HID0
1183 mfspr r0,SPRN_HID0
1184 mfspr r0,SPRN_HID0
1185 mfspr r0,SPRN_HID0
de56a948
PM
1186
1187 /* load host SLB entries */
9e368f29 118833: ld r8,PACA_SLBSHADOWPTR(r13)
de56a948
PM
1189
1190 .rept SLB_NUM_BOLTED
1191 ld r5,SLBSHADOW_SAVEAREA(r8)
1192 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1193 andis. r7,r5,SLB_ESID_V@h
1194 beq 1f
1195 slbmte r6,r5
11961: addi r8,r8,16
1197 .endr
1198
93b0f4dc
PM
1199 /* Save DEC */
1200 mfspr r5,SPRN_DEC
1201 mftb r6
1202 extsw r5,r5
1203 add r5,r5,r6
1204 std r5,VCPU_DEC_EXPIRES(r9)
1205
de56a948 1206 /* Save and reset AMR and UAMOR before turning on the MMU */
9e368f29 1207BEGIN_FTR_SECTION
de56a948
PM
1208 mfspr r5,SPRN_AMR
1209 mfspr r6,SPRN_UAMOR
1210 std r5,VCPU_AMR(r9)
1211 std r6,VCPU_UAMOR(r9)
1212 li r6,0
1213 mtspr SPRN_AMR,r6
9e368f29 1214END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
de56a948 1215
44a3add8
PM
1216 /* Unset guest mode */
1217 li r0, KVM_GUEST_MODE_NONE
1218 stb r0, HSTATE_IN_GUEST(r13)
1219
de56a948 1220 /* Switch DSCR back to host value */
9e368f29 1221BEGIN_FTR_SECTION
de56a948
PM
1222 mfspr r8, SPRN_DSCR
1223 ld r7, HSTATE_DSCR(r13)
cfc86025 1224 std r8, VCPU_DSCR(r9)
de56a948 1225 mtspr SPRN_DSCR, r7
9e368f29 1226END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
de56a948
PM
1227
1228 /* Save non-volatile GPRs */
c75df6f9
MN
1229 std r14, VCPU_GPR(R14)(r9)
1230 std r15, VCPU_GPR(R15)(r9)
1231 std r16, VCPU_GPR(R16)(r9)
1232 std r17, VCPU_GPR(R17)(r9)
1233 std r18, VCPU_GPR(R18)(r9)
1234 std r19, VCPU_GPR(R19)(r9)
1235 std r20, VCPU_GPR(R20)(r9)
1236 std r21, VCPU_GPR(R21)(r9)
1237 std r22, VCPU_GPR(R22)(r9)
1238 std r23, VCPU_GPR(R23)(r9)
1239 std r24, VCPU_GPR(R24)(r9)
1240 std r25, VCPU_GPR(R25)(r9)
1241 std r26, VCPU_GPR(R26)(r9)
1242 std r27, VCPU_GPR(R27)(r9)
1243 std r28, VCPU_GPR(R28)(r9)
1244 std r29, VCPU_GPR(R29)(r9)
1245 std r30, VCPU_GPR(R30)(r9)
1246 std r31, VCPU_GPR(R31)(r9)
de56a948
PM
1247
1248 /* Save SPRGs */
1249 mfspr r3, SPRN_SPRG0
1250 mfspr r4, SPRN_SPRG1
1251 mfspr r5, SPRN_SPRG2
1252 mfspr r6, SPRN_SPRG3
1253 std r3, VCPU_SPRG0(r9)
1254 std r4, VCPU_SPRG1(r9)
1255 std r5, VCPU_SPRG2(r9)
1256 std r6, VCPU_SPRG3(r9)
1257
8943633c
PM
1258 /* save FP state */
1259 mr r3, r9
1260 bl .kvmppc_save_fp
1261
a8606e20
PM
1262 /* Increment yield count if they have a VPA */
1263 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1264 cmpdi r8, 0
1265 beq 25f
1266 lwz r3, LPPACA_YIELDCOUNT(r8)
1267 addi r3, r3, 1
1268 stw r3, LPPACA_YIELDCOUNT(r8)
c35635ef
PM
1269 li r3, 1
1270 stb r3, VCPU_VPA_DIRTY(r9)
a8606e20
PM
127125:
1272 /* Save PMU registers if requested */
1273 /* r8 and cr0.eq are live here */
de56a948
PM
1274 li r3, 1
1275 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1276 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1277 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
8943633c
PM
1278 mfspr r6, SPRN_MMCRA
1279BEGIN_FTR_SECTION
1280 /* On P7, clear MMCRA in order to disable SDAR updates */
1281 li r7, 0
1282 mtspr SPRN_MMCRA, r7
1283END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
de56a948 1284 isync
a8606e20
PM
1285 beq 21f /* if no VPA, save PMU stuff anyway */
1286 lbz r7, LPPACA_PMCINUSE(r8)
1287 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1288 bne 21f
1289 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1290 b 22f
129121: mfspr r5, SPRN_MMCR1
14941789
PM
1292 mfspr r7, SPRN_SIAR
1293 mfspr r8, SPRN_SDAR
de56a948
PM
1294 std r4, VCPU_MMCR(r9)
1295 std r5, VCPU_MMCR + 8(r9)
1296 std r6, VCPU_MMCR + 16(r9)
14941789
PM
1297 std r7, VCPU_SIAR(r9)
1298 std r8, VCPU_SDAR(r9)
de56a948
PM
1299 mfspr r3, SPRN_PMC1
1300 mfspr r4, SPRN_PMC2
1301 mfspr r5, SPRN_PMC3
1302 mfspr r6, SPRN_PMC4
1303 mfspr r7, SPRN_PMC5
1304 mfspr r8, SPRN_PMC6
9e368f29
PM
1305BEGIN_FTR_SECTION
1306 mfspr r10, SPRN_PMC7
1307 mfspr r11, SPRN_PMC8
1308END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
de56a948
PM
1309 stw r3, VCPU_PMC(r9)
1310 stw r4, VCPU_PMC + 4(r9)
1311 stw r5, VCPU_PMC + 8(r9)
1312 stw r6, VCPU_PMC + 12(r9)
1313 stw r7, VCPU_PMC + 16(r9)
1314 stw r8, VCPU_PMC + 20(r9)
9e368f29
PM
1315BEGIN_FTR_SECTION
1316 stw r10, VCPU_PMC + 24(r9)
1317 stw r11, VCPU_PMC + 28(r9)
1318END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
de56a948 131922:
218309b7
PM
1320 ld r0, 112+PPC_LR_STKOFF(r1)
1321 addi r1, r1, 112
1322 mtlr r0
1323 blr
1324secondary_too_late:
1325 ld r5,HSTATE_KVM_VCORE(r13)
1326 HMT_LOW
132713: lbz r3,VCORE_IN_GUEST(r5)
1328 cmpwi r3,0
1329 bne 13b
1330 HMT_MEDIUM
1331 li r0, KVM_GUEST_MODE_NONE
1332 stb r0, HSTATE_IN_GUEST(r13)
1333 ld r11,PACA_SLBSHADOWPTR(r13)
de56a948 1334
218309b7
PM
1335 .rept SLB_NUM_BOLTED
1336 ld r5,SLBSHADOW_SAVEAREA(r11)
1337 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1338 andis. r7,r5,SLB_ESID_V@h
1339 beq 1f
1340 slbmte r6,r5
13411: addi r11,r11,16
1342 .endr
1343 b 22b
b4072df4 1344
697d3899
PM
1345/*
1346 * Check whether an HDSI is an HPTE not found fault or something else.
1347 * If it is an HPTE not found fault that is due to the guest accessing
1348 * a page that they have mapped but which we have paged out, then
1349 * we continue on with the guest exit path. In all other cases,
1350 * reflect the HDSI to the guest as a DSI.
1351 */
1352kvmppc_hdsi:
1353 mfspr r4, SPRN_HDAR
1354 mfspr r6, SPRN_HDSISR
4cf302bc
PM
1355 /* HPTE not found fault or protection fault? */
1356 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
697d3899
PM
1357 beq 1f /* if not, send it to the guest */
1358 andi. r0, r11, MSR_DR /* data relocation enabled? */
1359 beq 3f
1360 clrrdi r0, r4, 28
c75df6f9 1361 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
697d3899
PM
1362 bne 1f /* if no SLB entry found */
13634: std r4, VCPU_FAULT_DAR(r9)
1364 stw r6, VCPU_FAULT_DSISR(r9)
1365
1366 /* Search the hash table. */
1367 mr r3, r9 /* vcpu pointer */
342d3db7 1368 li r7, 1 /* data fault */
697d3899
PM
1369 bl .kvmppc_hpte_hv_fault
1370 ld r9, HSTATE_KVM_VCPU(r13)
1371 ld r10, VCPU_PC(r9)
1372 ld r11, VCPU_MSR(r9)
1373 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1374 cmpdi r3, 0 /* retry the instruction */
1375 beq 6f
1376 cmpdi r3, -1 /* handle in kernel mode */
b4072df4 1377 beq guest_exit_cont
697d3899
PM
1378 cmpdi r3, -2 /* MMIO emulation; need instr word */
1379 beq 2f
1380
1381 /* Synthesize a DSI for the guest */
1382 ld r4, VCPU_FAULT_DAR(r9)
1383 mr r6, r3
13841: mtspr SPRN_DAR, r4
1385 mtspr SPRN_DSISR, r6
1386 mtspr SPRN_SRR0, r10
1387 mtspr SPRN_SRR1, r11
1388 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1389 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1390 rotldi r11, r11, 63
b4072df4 1391fast_interrupt_c_return:
697d3899
PM
13926: ld r7, VCPU_CTR(r9)
1393 lwz r8, VCPU_XER(r9)
1394 mtctr r7
1395 mtxer r8
1396 mr r4, r9
1397 b fast_guest_return
1398
13993: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1400 ld r5, KVM_VRMA_SLB_V(r5)
1401 b 4b
1402
1403 /* If this is for emulated MMIO, load the instruction word */
14042: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1405
1406 /* Set guest mode to 'jump over instruction' so if lwz faults
1407 * we'll just continue at the next IP. */
1408 li r0, KVM_GUEST_MODE_SKIP
1409 stb r0, HSTATE_IN_GUEST(r13)
1410
1411 /* Do the access with MSR:DR enabled */
1412 mfmsr r3
1413 ori r4, r3, MSR_DR /* Enable paging for data */
1414 mtmsrd r4
1415 lwz r8, 0(r10)
1416 mtmsrd r3
1417
1418 /* Store the result */
1419 stw r8, VCPU_LAST_INST(r9)
1420
1421 /* Unset guest mode. */
44a3add8 1422 li r0, KVM_GUEST_MODE_HOST_HV
697d3899 1423 stb r0, HSTATE_IN_GUEST(r13)
b4072df4 1424 b guest_exit_cont
de56a948 1425
342d3db7
PM
1426/*
1427 * Similarly for an HISI, reflect it to the guest as an ISI unless
1428 * it is an HPTE not found fault for a page that we have paged out.
1429 */
1430kvmppc_hisi:
1431 andis. r0, r11, SRR1_ISI_NOPT@h
1432 beq 1f
1433 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1434 beq 3f
1435 clrrdi r0, r10, 28
c75df6f9 1436 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
342d3db7
PM
1437 bne 1f /* if no SLB entry found */
14384:
1439 /* Search the hash table. */
1440 mr r3, r9 /* vcpu pointer */
1441 mr r4, r10
1442 mr r6, r11
1443 li r7, 0 /* instruction fault */
1444 bl .kvmppc_hpte_hv_fault
1445 ld r9, HSTATE_KVM_VCPU(r13)
1446 ld r10, VCPU_PC(r9)
1447 ld r11, VCPU_MSR(r9)
1448 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1449 cmpdi r3, 0 /* retry the instruction */
b4072df4 1450 beq fast_interrupt_c_return
342d3db7 1451 cmpdi r3, -1 /* handle in kernel mode */
b4072df4 1452 beq guest_exit_cont
342d3db7
PM
1453
1454 /* Synthesize an ISI for the guest */
1455 mr r11, r3
14561: mtspr SPRN_SRR0, r10
1457 mtspr SPRN_SRR1, r11
1458 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1459 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1460 rotldi r11, r11, 63
b4072df4 1461 b fast_interrupt_c_return
342d3db7
PM
1462
14633: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1464 ld r5, KVM_VRMA_SLB_V(r6)
1465 b 4b
1466
a8606e20
PM
1467/*
1468 * Try to handle an hcall in real mode.
1469 * Returns to the guest if we handle it, or continues on up to
1470 * the kernel if we can't (i.e. if we don't have a handler for
1471 * it, or if the handler returns H_TOO_HARD).
1472 */
1473 .globl hcall_try_real_mode
1474hcall_try_real_mode:
c75df6f9 1475 ld r3,VCPU_GPR(R3)(r9)
a8606e20 1476 andi. r0,r11,MSR_PR
b4072df4 1477 bne guest_exit_cont
a8606e20
PM
1478 clrrdi r3,r3,2
1479 cmpldi r3,hcall_real_table_end - hcall_real_table
b4072df4 1480 bge guest_exit_cont
a8606e20 1481 LOAD_REG_ADDR(r4, hcall_real_table)
4baa1d87 1482 lwax r3,r3,r4
a8606e20 1483 cmpwi r3,0
b4072df4 1484 beq guest_exit_cont
a8606e20
PM
1485 add r3,r3,r4
1486 mtctr r3
1487 mr r3,r9 /* get vcpu pointer */
c75df6f9 1488 ld r4,VCPU_GPR(R4)(r9)
a8606e20
PM
1489 bctrl
1490 cmpdi r3,H_TOO_HARD
1491 beq hcall_real_fallback
1492 ld r4,HSTATE_KVM_VCPU(r13)
c75df6f9 1493 std r3,VCPU_GPR(R3)(r4)
a8606e20
PM
1494 ld r10,VCPU_PC(r4)
1495 ld r11,VCPU_MSR(r4)
1496 b fast_guest_return
1497
1498 /* We've attempted a real mode hcall, but it's punted it back
1499 * to userspace. We need to restore some clobbered volatiles
1500 * before resuming the pass-it-to-qemu path */
1501hcall_real_fallback:
1502 li r12,BOOK3S_INTERRUPT_SYSCALL
1503 ld r9, HSTATE_KVM_VCPU(r13)
a8606e20 1504
b4072df4 1505 b guest_exit_cont
a8606e20
PM
1506
1507 .globl hcall_real_table
1508hcall_real_table:
1509 .long 0 /* 0 - unused */
1510 .long .kvmppc_h_remove - hcall_real_table
1511 .long .kvmppc_h_enter - hcall_real_table
1512 .long .kvmppc_h_read - hcall_real_table
1513 .long 0 /* 0x10 - H_CLEAR_MOD */
1514 .long 0 /* 0x14 - H_CLEAR_REF */
1515 .long .kvmppc_h_protect - hcall_real_table
1516 .long 0 /* 0x1c - H_GET_TCE */
54738c09 1517 .long .kvmppc_h_put_tce - hcall_real_table
a8606e20
PM
1518 .long 0 /* 0x24 - H_SET_SPRG0 */
1519 .long .kvmppc_h_set_dabr - hcall_real_table
1520 .long 0 /* 0x2c */
1521 .long 0 /* 0x30 */
1522 .long 0 /* 0x34 */
1523 .long 0 /* 0x38 */
1524 .long 0 /* 0x3c */
1525 .long 0 /* 0x40 */
1526 .long 0 /* 0x44 */
1527 .long 0 /* 0x48 */
1528 .long 0 /* 0x4c */
1529 .long 0 /* 0x50 */
1530 .long 0 /* 0x54 */
1531 .long 0 /* 0x58 */
1532 .long 0 /* 0x5c */
1533 .long 0 /* 0x60 */
e7d26f28
BH
1534#ifdef CONFIG_KVM_XICS
1535 .long .kvmppc_rm_h_eoi - hcall_real_table
1536 .long .kvmppc_rm_h_cppr - hcall_real_table
1537 .long .kvmppc_rm_h_ipi - hcall_real_table
1538 .long 0 /* 0x70 - H_IPOLL */
1539 .long .kvmppc_rm_h_xirr - hcall_real_table
1540#else
1541 .long 0 /* 0x64 - H_EOI */
1542 .long 0 /* 0x68 - H_CPPR */
1543 .long 0 /* 0x6c - H_IPI */
1544 .long 0 /* 0x70 - H_IPOLL */
1545 .long 0 /* 0x74 - H_XIRR */
1546#endif
a8606e20
PM
1547 .long 0 /* 0x78 */
1548 .long 0 /* 0x7c */
1549 .long 0 /* 0x80 */
1550 .long 0 /* 0x84 */
1551 .long 0 /* 0x88 */
1552 .long 0 /* 0x8c */
1553 .long 0 /* 0x90 */
1554 .long 0 /* 0x94 */
1555 .long 0 /* 0x98 */
1556 .long 0 /* 0x9c */
1557 .long 0 /* 0xa0 */
1558 .long 0 /* 0xa4 */
1559 .long 0 /* 0xa8 */
1560 .long 0 /* 0xac */
1561 .long 0 /* 0xb0 */
1562 .long 0 /* 0xb4 */
1563 .long 0 /* 0xb8 */
1564 .long 0 /* 0xbc */
1565 .long 0 /* 0xc0 */
1566 .long 0 /* 0xc4 */
1567 .long 0 /* 0xc8 */
1568 .long 0 /* 0xcc */
1569 .long 0 /* 0xd0 */
1570 .long 0 /* 0xd4 */
1571 .long 0 /* 0xd8 */
1572 .long 0 /* 0xdc */
19ccb76a 1573 .long .kvmppc_h_cede - hcall_real_table
a8606e20
PM
1574 .long 0 /* 0xe4 */
1575 .long 0 /* 0xe8 */
1576 .long 0 /* 0xec */
1577 .long 0 /* 0xf0 */
1578 .long 0 /* 0xf4 */
1579 .long 0 /* 0xf8 */
1580 .long 0 /* 0xfc */
1581 .long 0 /* 0x100 */
1582 .long 0 /* 0x104 */
1583 .long 0 /* 0x108 */
1584 .long 0 /* 0x10c */
1585 .long 0 /* 0x110 */
1586 .long 0 /* 0x114 */
1587 .long 0 /* 0x118 */
1588 .long 0 /* 0x11c */
1589 .long 0 /* 0x120 */
1590 .long .kvmppc_h_bulk_remove - hcall_real_table
1591hcall_real_table_end:
1592
de56a948
PM
1593ignore_hdec:
1594 mr r4,r9
1595 b fast_guest_return
1596
a8606e20
PM
1597_GLOBAL(kvmppc_h_set_dabr)
1598 std r4,VCPU_DABR(r3)
8943633c
PM
1599 /* Work around P7 bug where DABR can get corrupted on mtspr */
16001: mtspr SPRN_DABR,r4
1601 mfspr r5, SPRN_DABR
1602 cmpd r4, r5
1603 bne 1b
1604 isync
a8606e20
PM
1605 li r3,0
1606 blr
1607
19ccb76a
PM
1608_GLOBAL(kvmppc_h_cede)
1609 ori r11,r11,MSR_EE
1610 std r11,VCPU_MSR(r3)
1611 li r0,1
1612 stb r0,VCPU_CEDED(r3)
1613 sync /* order setting ceded vs. testing prodded */
1614 lbz r5,VCPU_PRODDED(r3)
1615 cmpwi r5,0
04f995a5 1616 bne kvm_cede_prodded
19ccb76a
PM
1617 li r0,0 /* set trap to 0 to say hcall is handled */
1618 stw r0,VCPU_TRAP(r3)
1619 li r0,H_SUCCESS
c75df6f9 1620 std r0,VCPU_GPR(R3)(r3)
19ccb76a 1621BEGIN_FTR_SECTION
04f995a5 1622 b kvm_cede_exit /* just send it up to host on 970 */
19ccb76a
PM
1623END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1624
1625 /*
1626 * Set our bit in the bitmask of napping threads unless all the
1627 * other threads are already napping, in which case we send this
1628 * up to the host.
1629 */
1630 ld r5,HSTATE_KVM_VCORE(r13)
1631 lwz r6,VCPU_PTID(r3)
1632 lwz r8,VCORE_ENTRY_EXIT(r5)
1633 clrldi r8,r8,56
1634 li r0,1
1635 sld r0,r0,r6
1636 addi r6,r5,VCORE_NAPPING_THREADS
163731: lwarx r4,0,r6
1638 or r4,r4,r0
c75df6f9 1639 PPC_POPCNTW(R7,R4)
19ccb76a 1640 cmpw r7,r8
04f995a5 1641 bge kvm_cede_exit
19ccb76a
PM
1642 stwcx. r4,0,r6
1643 bne 31b
f019b7ad
PM
1644 /* order napping_threads update vs testing entry_exit_count */
1645 isync
19ccb76a
PM
1646 li r0,1
1647 stb r0,HSTATE_NAPPING(r13)
19ccb76a
PM
1648 mr r4,r3
1649 lwz r7,VCORE_ENTRY_EXIT(r5)
1650 cmpwi r7,0x100
1651 bge 33f /* another thread already exiting */
1652
1653/*
1654 * Although not specifically required by the architecture, POWER7
1655 * preserves the following registers in nap mode, even if an SMT mode
1656 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1657 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1658 */
1659 /* Save non-volatile GPRs */
c75df6f9
MN
1660 std r14, VCPU_GPR(R14)(r3)
1661 std r15, VCPU_GPR(R15)(r3)
1662 std r16, VCPU_GPR(R16)(r3)
1663 std r17, VCPU_GPR(R17)(r3)
1664 std r18, VCPU_GPR(R18)(r3)
1665 std r19, VCPU_GPR(R19)(r3)
1666 std r20, VCPU_GPR(R20)(r3)
1667 std r21, VCPU_GPR(R21)(r3)
1668 std r22, VCPU_GPR(R22)(r3)
1669 std r23, VCPU_GPR(R23)(r3)
1670 std r24, VCPU_GPR(R24)(r3)
1671 std r25, VCPU_GPR(R25)(r3)
1672 std r26, VCPU_GPR(R26)(r3)
1673 std r27, VCPU_GPR(R27)(r3)
1674 std r28, VCPU_GPR(R28)(r3)
1675 std r29, VCPU_GPR(R29)(r3)
1676 std r30, VCPU_GPR(R30)(r3)
1677 std r31, VCPU_GPR(R31)(r3)
19ccb76a
PM
1678
1679 /* save FP state */
1680 bl .kvmppc_save_fp
1681
1682 /*
1683 * Take a nap until a decrementer or external interrupt occurs,
1684 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1685 */
f0888f70
PM
1686 li r0,1
1687 stb r0,HSTATE_HWTHREAD_REQ(r13)
19ccb76a
PM
1688 mfspr r5,SPRN_LPCR
1689 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1690 mtspr SPRN_LPCR,r5
1691 isync
1692 li r0, 0
1693 std r0, HSTATE_SCRATCH0(r13)
1694 ptesync
1695 ld r0, HSTATE_SCRATCH0(r13)
16961: cmpd r0, r0
1697 bne 1b
1698 nap
1699 b .
1700
1701kvm_end_cede:
4619ac88
PM
1702 /* get vcpu pointer */
1703 ld r4, HSTATE_KVM_VCPU(r13)
1704
19ccb76a
PM
1705 /* Woken by external or decrementer interrupt */
1706 ld r1, HSTATE_HOST_R1(r13)
19ccb76a 1707
19ccb76a
PM
1708 /* load up FP state */
1709 bl kvmppc_load_fp
1710
1711 /* Load NV GPRS */
c75df6f9
MN
1712 ld r14, VCPU_GPR(R14)(r4)
1713 ld r15, VCPU_GPR(R15)(r4)
1714 ld r16, VCPU_GPR(R16)(r4)
1715 ld r17, VCPU_GPR(R17)(r4)
1716 ld r18, VCPU_GPR(R18)(r4)
1717 ld r19, VCPU_GPR(R19)(r4)
1718 ld r20, VCPU_GPR(R20)(r4)
1719 ld r21, VCPU_GPR(R21)(r4)
1720 ld r22, VCPU_GPR(R22)(r4)
1721 ld r23, VCPU_GPR(R23)(r4)
1722 ld r24, VCPU_GPR(R24)(r4)
1723 ld r25, VCPU_GPR(R25)(r4)
1724 ld r26, VCPU_GPR(R26)(r4)
1725 ld r27, VCPU_GPR(R27)(r4)
1726 ld r28, VCPU_GPR(R28)(r4)
1727 ld r29, VCPU_GPR(R29)(r4)
1728 ld r30, VCPU_GPR(R30)(r4)
1729 ld r31, VCPU_GPR(R31)(r4)
19ccb76a
PM
1730
1731 /* clear our bit in vcore->napping_threads */
173233: ld r5,HSTATE_KVM_VCORE(r13)
1733 lwz r3,VCPU_PTID(r4)
1734 li r0,1
1735 sld r0,r0,r3
1736 addi r6,r5,VCORE_NAPPING_THREADS
173732: lwarx r7,0,r6
1738 andc r7,r7,r0
1739 stwcx. r7,0,r6
1740 bne 32b
1741 li r0,0
1742 stb r0,HSTATE_NAPPING(r13)
1743
4619ac88
PM
1744 /* Check the wake reason in SRR1 to see why we got here */
1745 mfspr r3, SPRN_SRR1
1746 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
1747 cmpwi r3, 4 /* was it an external interrupt? */
1748 li r12, BOOK3S_INTERRUPT_EXTERNAL
1749 mr r9, r4
1750 ld r10, VCPU_PC(r9)
1751 ld r11, VCPU_MSR(r9)
1752 beq do_ext_interrupt /* if so */
1753
19ccb76a
PM
1754 /* see if any other thread is already exiting */
1755 lwz r0,VCORE_ENTRY_EXIT(r5)
1756 cmpwi r0,0x100
1757 blt kvmppc_cede_reentry /* if not go back to guest */
1758
1759 /* some threads are exiting, so go to the guest exit path */
1760 b hcall_real_fallback
1761
1762 /* cede when already previously prodded case */
04f995a5
PM
1763kvm_cede_prodded:
1764 li r0,0
19ccb76a
PM
1765 stb r0,VCPU_PRODDED(r3)
1766 sync /* order testing prodded vs. clearing ceded */
1767 stb r0,VCPU_CEDED(r3)
1768 li r3,H_SUCCESS
1769 blr
1770
1771 /* we've ceded but we want to give control to the host */
04f995a5 1772kvm_cede_exit:
4619ac88 1773 b hcall_real_fallback
19ccb76a 1774
b4072df4
PM
1775 /* Try to handle a machine check in real mode */
1776machine_check_realmode:
1777 mr r3, r9 /* get vcpu pointer */
1778 bl .kvmppc_realmode_machine_check
1779 nop
1780 cmpdi r3, 0 /* continue exiting from guest? */
1781 ld r9, HSTATE_KVM_VCPU(r13)
1782 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1783 beq mc_cont
1784 /* If not, deliver a machine check. SRR0/1 are already set */
1785 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1786 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1787 rotldi r11, r11, 63
1788 b fast_interrupt_c_return
1789
c934243c
PM
1790/*
1791 * Determine what sort of external interrupt is pending (if any).
1792 * Returns:
1793 * 0 if no interrupt is pending
1794 * 1 if an interrupt is pending that needs to be handled by the host
1795 * -1 if there was a guest wakeup IPI (which has now been cleared)
1796 */
1797kvmppc_read_intr:
1798 /* see if a host IPI is pending */
1799 li r3, 1
1800 lbz r0, HSTATE_HOST_IPI(r13)
1801 cmpwi r0, 0
1802 bne 1f
371fefd6 1803
c934243c
PM
1804 /* Now read the interrupt from the ICP */
1805 ld r6, HSTATE_XICS_PHYS(r13)
19ccb76a 1806 li r7, XICS_XIRR
c934243c
PM
1807 cmpdi r6, 0
1808 beq- 1f
1809 lwzcix r0, r6, r7
1810 rlwinm. r3, r0, 0, 0xffffff
19ccb76a 1811 sync
c934243c 1812 beq 1f /* if nothing pending in the ICP */
371fefd6 1813
c934243c
PM
1814 /* We found something in the ICP...
1815 *
1816 * If it's not an IPI, stash it in the PACA and return to
1817 * the host, we don't (yet) handle directing real external
1818 * interrupts directly to the guest
1819 */
1820 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
1821 li r3, 1
1822 bne 42f
371fefd6 1823
c934243c
PM
1824 /* It's an IPI, clear the MFRR and EOI it */
1825 li r3, 0xff
1826 li r8, XICS_MFRR
1827 stbcix r3, r6, r8 /* clear the IPI */
1828 stwcix r0, r6, r7 /* EOI it */
1829 sync
f0888f70 1830
c934243c
PM
1831 /* We need to re-check host IPI now in case it got set in the
1832 * meantime. If it's clear, we bounce the interrupt to the
1833 * guest
1834 */
1835 lbz r0, HSTATE_HOST_IPI(r13)
1836 cmpwi r0, 0
1837 bne- 43f
1838
1839 /* OK, it's an IPI for us */
1840 li r3, -1
18411: blr
1842
184342: /* It's not an IPI and it's for the host, stash it in the PACA
1844 * before exit, it will be picked up by the host ICP driver
1845 */
1846 stw r0, HSTATE_SAVED_XIRR(r13)
1847 b 1b
1848
184943: /* We raced with the host, we need to resend that IPI, bummer */
1850 li r0, IPI_PRIORITY
1851 stbcix r0, r6, r8 /* set the IPI */
1852 sync
1853 b 1b
371fefd6 1854
de56a948
PM
1855/*
1856 * Save away FP, VMX and VSX registers.
1857 * r3 = vcpu pointer
a8606e20 1858 */
de56a948 1859_GLOBAL(kvmppc_save_fp)
8943633c
PM
1860 mfmsr r5
1861 ori r8,r5,MSR_FP
de56a948
PM
1862#ifdef CONFIG_ALTIVEC
1863BEGIN_FTR_SECTION
1864 oris r8,r8,MSR_VEC@h
1865END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1866#endif
1867#ifdef CONFIG_VSX
1868BEGIN_FTR_SECTION
1869 oris r8,r8,MSR_VSX@h
1870END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1871#endif
1872 mtmsrd r8
1873 isync
1874#ifdef CONFIG_VSX
1875BEGIN_FTR_SECTION
1876 reg = 0
1877 .rept 32
1878 li r6,reg*16+VCPU_VSRS
c75df6f9 1879 STXVD2X(reg,R6,R3)
de56a948
PM
1880 reg = reg + 1
1881 .endr
1882FTR_SECTION_ELSE
1883#endif
1884 reg = 0
1885 .rept 32
1886 stfd reg,reg*8+VCPU_FPRS(r3)
1887 reg = reg + 1
1888 .endr
1889#ifdef CONFIG_VSX
1890ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1891#endif
1892 mffs fr0
1893 stfd fr0,VCPU_FPSCR(r3)
1894
1895#ifdef CONFIG_ALTIVEC
1896BEGIN_FTR_SECTION
1897 reg = 0
1898 .rept 32
1899 li r6,reg*16+VCPU_VRS
1900 stvx reg,r6,r3
1901 reg = reg + 1
1902 .endr
1903 mfvscr vr0
1904 li r6,VCPU_VSCR
1905 stvx vr0,r6,r3
1906END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1907#endif
1908 mfspr r6,SPRN_VRSAVE
1909 stw r6,VCPU_VRSAVE(r3)
8943633c 1910 mtmsrd r5
de56a948
PM
1911 isync
1912 blr
1913
1914/*
1915 * Load up FP, VMX and VSX registers
1916 * r4 = vcpu pointer
1917 */
1918 .globl kvmppc_load_fp
1919kvmppc_load_fp:
1920 mfmsr r9
1921 ori r8,r9,MSR_FP
1922#ifdef CONFIG_ALTIVEC
1923BEGIN_FTR_SECTION
1924 oris r8,r8,MSR_VEC@h
1925END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1926#endif
1927#ifdef CONFIG_VSX
1928BEGIN_FTR_SECTION
1929 oris r8,r8,MSR_VSX@h
1930END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1931#endif
1932 mtmsrd r8
1933 isync
1934 lfd fr0,VCPU_FPSCR(r4)
1935 MTFSF_L(fr0)
1936#ifdef CONFIG_VSX
1937BEGIN_FTR_SECTION
1938 reg = 0
1939 .rept 32
1940 li r7,reg*16+VCPU_VSRS
c75df6f9 1941 LXVD2X(reg,R7,R4)
de56a948
PM
1942 reg = reg + 1
1943 .endr
1944FTR_SECTION_ELSE
1945#endif
1946 reg = 0
1947 .rept 32
1948 lfd reg,reg*8+VCPU_FPRS(r4)
1949 reg = reg + 1
1950 .endr
1951#ifdef CONFIG_VSX
1952ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1953#endif
1954
1955#ifdef CONFIG_ALTIVEC
1956BEGIN_FTR_SECTION
1957 li r7,VCPU_VSCR
1958 lvx vr0,r7,r4
1959 mtvscr vr0
1960 reg = 0
1961 .rept 32
1962 li r7,reg*16+VCPU_VRS
1963 lvx reg,r7,r4
1964 reg = reg + 1
1965 .endr
1966END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1967#endif
1968 lwz r7,VCPU_VRSAVE(r4)
1969 mtspr SPRN_VRSAVE,r7
1970 blr
44a3add8
PM
1971
1972/*
1973 * We come here if we get any exception or interrupt while we are
1974 * executing host real mode code while in guest MMU context.
1975 * For now just spin, but we should do something better.
1976 */
1977kvmppc_bad_host_intr:
1978 b .