2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h>
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
35 #ifdef __LITTLE_ENDIAN__
36 #error Need to fix lppaca and SLB shadow accesses in little endian mode
39 /* Values in HSTATE_NAPPING(r13) */
40 #define NAPPING_CEDE 1
41 #define NAPPING_NOVCPU 2
44 * Call kvmppc_hv_entry in real mode.
45 * Must be called with interrupts hard-disabled.
49 * LR = return address to continue at after eventually re-enabling MMU
51 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
53 std r0, PPC_LR_STKOFF(r1)
56 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
61 mtmsrd r0,1 /* clear RI in MSR */
67 ld r4, HSTATE_KVM_VCPU(r13)
70 /* Back from guest - restore host state and return to caller */
73 /* Restore host DABR and DABRX */
74 ld r5,HSTATE_DABR(r13)
78 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
81 ld r3,PACA_SPRG_VDSO(r13)
82 mtspr SPRN_SPRG_VDSO_WRITE,r3
84 /* Reload the host's PMU registers */
85 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
86 lbz r4, LPPACA_PMCINUSE(r3)
88 beq 23f /* skip if not */
90 ld r3, HSTATE_MMCR(r13)
91 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
94 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
95 lwz r3, HSTATE_PMC(r13)
96 lwz r4, HSTATE_PMC + 4(r13)
97 lwz r5, HSTATE_PMC + 8(r13)
98 lwz r6, HSTATE_PMC + 12(r13)
99 lwz r8, HSTATE_PMC + 16(r13)
100 lwz r9, HSTATE_PMC + 20(r13)
102 lwz r10, HSTATE_PMC + 24(r13)
103 lwz r11, HSTATE_PMC + 28(r13)
104 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
114 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
115 ld r3, HSTATE_MMCR(r13)
116 ld r4, HSTATE_MMCR + 8(r13)
117 ld r5, HSTATE_MMCR + 16(r13)
118 ld r6, HSTATE_MMCR + 24(r13)
119 ld r7, HSTATE_MMCR + 32(r13)
125 ld r8, HSTATE_MMCR + 40(r13)
126 ld r9, HSTATE_MMCR + 48(r13)
129 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
135 * Reload DEC. HDEC interrupts were disabled when
136 * we reloaded the host's LPCR value.
138 ld r3, HSTATE_DECEXP(r13)
144 * For external and machine check interrupts, we need
145 * to call the Linux handler to process the interrupt.
146 * We do that by jumping to absolute address 0x500 for
147 * external interrupts, or the machine_check_fwnmi label
148 * for machine checks (since firmware might have patched
149 * the vector area at 0x200). The [h]rfid at the end of the
150 * handler will return to the book3s_hv_interrupts.S code.
151 * For other interrupts we do the rfid to get back
152 * to the book3s_hv_interrupts.S code here.
154 ld r8, 112+PPC_LR_STKOFF(r1)
156 ld r7, HSTATE_HOST_MSR(r13)
158 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
159 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
162 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
163 beq cr2, 14f /* HMI check */
164 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
166 /* RFI into the highmem handler, or branch to interrupt handler */
170 mtmsrd r6, 1 /* Clear RI in MSR */
173 beqa 0x500 /* external interrupt (PPC970) */
174 beq cr1, 13f /* machine check */
177 /* On POWER7, we have external interrupts set to use HSRR0/1 */
178 11: mtspr SPRN_HSRR0, r8
182 13: b machine_check_fwnmi
184 14: mtspr SPRN_HSRR0, r8
186 b hmi_exception_after_realmode
188 kvmppc_primary_no_guest:
189 /* We handle this much like a ceded vcpu */
190 /* set our bit in napping_threads */
191 ld r5, HSTATE_KVM_VCORE(r13)
192 lbz r7, HSTATE_PTID(r13)
195 addi r6, r5, VCORE_NAPPING_THREADS
200 /* order napping_threads update vs testing entry_exit_count */
203 lwz r7, VCORE_ENTRY_EXIT(r5)
205 bge kvm_novcpu_exit /* another thread already exiting */
206 li r3, NAPPING_NOVCPU
207 stb r3, HSTATE_NAPPING(r13)
209 stb r3, HSTATE_HWTHREAD_REQ(r13)
214 ld r1, HSTATE_HOST_R1(r13)
215 ld r5, HSTATE_KVM_VCORE(r13)
217 stb r0, HSTATE_NAPPING(r13)
218 stb r0, HSTATE_HWTHREAD_REQ(r13)
220 /* check the wake reason */
221 bl kvmppc_check_wake_reason
223 /* see if any other thread is already exiting */
224 lwz r0, VCORE_ENTRY_EXIT(r5)
228 /* clear our bit in napping_threads */
229 lbz r7, HSTATE_PTID(r13)
232 addi r6, r5, VCORE_NAPPING_THREADS
238 /* See if the wake reason means we need to exit */
242 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
243 ld r4, HSTATE_KVM_VCPU(r13)
251 * We come in here when wakened from nap mode.
252 * Relocation is off and most register values are lost.
253 * r13 points to the PACA.
255 .globl kvm_start_guest
258 /* Set runlatch bit the minute you wake up from nap */
265 li r0,KVM_HWTHREAD_IN_KVM
266 stb r0,HSTATE_HWTHREAD_STATE(r13)
268 /* NV GPR values from power7_idle() will no longer be valid */
270 stb r0,PACA_NAPSTATELOST(r13)
272 /* were we napping due to cede? */
273 lbz r0,HSTATE_NAPPING(r13)
274 cmpwi r0,NAPPING_CEDE
276 cmpwi r0,NAPPING_NOVCPU
277 beq kvm_novcpu_wakeup
279 ld r1,PACAEMERGSP(r13)
280 subi r1,r1,STACK_FRAME_OVERHEAD
283 * We weren't napping due to cede, so this must be a secondary
284 * thread being woken up to run a guest, or being woken up due
285 * to a stray IPI. (Or due to some machine check or hypervisor
286 * maintenance interrupt while the core is in KVM.)
289 /* Check the wake reason in SRR1 to see why we got here */
290 bl kvmppc_check_wake_reason
294 /* get vcpu pointer, NULL if we have no vcpu to run */
295 ld r4,HSTATE_KVM_VCPU(r13)
297 /* if we have no vcpu to run, go back to sleep */
300 /* Set HSTATE_DSCR(r13) to something sensible */
301 ld r6, PACA_DSCR(r13)
302 std r6, HSTATE_DSCR(r13)
306 /* Back from the guest, go back to nap */
307 /* Clear our vcpu pointer so we don't come back in early */
309 std r0, HSTATE_KVM_VCPU(r13)
311 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
312 * the nap_count, because once the increment to nap_count is
313 * visible we could be given another vcpu.
317 /* increment the nap count and then go to nap mode */
318 ld r4, HSTATE_KVM_VCORE(r13)
319 addi r4, r4, VCORE_NAP_COUNT
326 li r0, KVM_HWTHREAD_IN_NAP
327 stb r0, HSTATE_HWTHREAD_STATE(r13)
329 /* Clear the runlatch bit before napping */
336 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
339 std r0, HSTATE_SCRATCH0(r13)
341 ld r0, HSTATE_SCRATCH0(r13)
347 /******************************************************************************
351 *****************************************************************************/
353 .global kvmppc_hv_entry
358 * R4 = vcpu pointer (or NULL)
362 * all other volatile GPRS = free
365 std r0, PPC_LR_STKOFF(r1)
368 /* Save R1 in the PACA */
369 std r1, HSTATE_HOST_R1(r13)
371 li r6, KVM_GUEST_MODE_HOST_HV
372 stb r6, HSTATE_IN_GUEST(r13)
382 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
384 * POWER7 host -> guest partition switch code.
385 * We don't have to lock against concurrent tlbies,
386 * but we do have to coordinate across hardware threads.
388 /* Increment entry count iff exit count is zero. */
389 ld r5,HSTATE_KVM_VCORE(r13)
390 addi r9,r5,VCORE_ENTRY_EXIT
392 cmpwi r3,0x100 /* any threads starting to exit? */
393 bge secondary_too_late /* if so we're too late to the party */
398 /* Primary thread switches to guest partition. */
399 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
400 lbz r6,HSTATE_PTID(r13)
405 li r0,LPID_RSVD /* switch to reserved LPID */
408 mtspr SPRN_SDR1,r6 /* switch to partition page table */
412 /* See if we need to flush the TLB */
413 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
414 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
415 srdi r6,r6,6 /* doubleword number */
416 sldi r6,r6,3 /* address offset */
418 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
424 23: ldarx r7,0,r6 /* if set, clear the bit */
428 /* Flush the TLB of any entries for this LPID */
429 /* use arch 2.07S as a proxy for POWER8 */
431 li r6,512 /* POWER8 has 512 sets */
433 li r6,128 /* POWER7 has 128 sets */
434 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
436 li r7,0x800 /* IS field = 0b10 */
443 /* Add timebase offset onto timebase */
444 22: ld r8,VCORE_TB_OFFSET(r5)
447 mftb r6 /* current host timebase */
449 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
450 mftb r7 /* check if lower 24 bits overflowed */
455 addis r8,r8,0x100 /* if so, increment upper 40 bits */
458 /* Load guest PCR value to select appropriate compat mode */
459 37: ld r7, VCORE_PCR(r5)
466 /* DPDES is shared between threads */
467 ld r8, VCORE_DPDES(r5)
469 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
472 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
475 /* Secondary threads wait for primary to have done partition switch */
476 20: lbz r0,VCORE_IN_GUEST(r5)
480 /* Set LPCR and RMOR. */
481 10: ld r8,VCORE_LPCR(r5)
487 /* Check if HDEC expires soon */
489 cmpwi r3,512 /* 1 microsecond */
490 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
495 * PPC970 host -> guest partition switch code.
496 * We have to lock against concurrent tlbies,
497 * using native_tlbie_lock to lock against host tlbies
498 * and kvm->arch.tlbie_lock to lock against guest tlbies.
499 * We also have to invalidate the TLB since its
500 * entries aren't tagged with the LPID.
502 30: ld r5,HSTATE_KVM_VCORE(r13)
503 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
505 /* first take native_tlbie_lock */
508 .tc native_tlbie_lock[TC],native_tlbie_lock
510 ld r3,toc_tlbie_lock@toc(2)
511 #ifdef __BIG_ENDIAN__
512 lwz r8,PACA_LOCK_TOKEN(r13)
514 lwz r8,PACAPACAINDEX(r13)
523 ld r5,HSTATE_KVM_VCORE(r13)
524 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
526 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
530 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
533 stw r0,0(r3) /* drop native_tlbie_lock */
535 /* invalidate the whole TLB */
544 /* Take the guest's tlbie_lock */
545 addi r3,r9,KVM_TLBIE_LOCK
553 mtspr SPRN_SDR1,r6 /* switch to partition page table */
555 /* Set up HID4 with the guest's LPID etc. */
560 /* drop the guest's tlbie_lock */
564 /* Check if HDEC expires soon */
567 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
570 /* Enable HDEC interrupts */
573 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
583 /* Do we have a guest vcpu to run? */
585 beq kvmppc_primary_no_guest
588 /* Load up guest SLB entries */
589 lwz r5,VCPU_SLB_MAX(r4)
594 1: ld r8,VCPU_SLB_E(r6)
597 addi r6,r6,VCPU_SLB_SIZE
600 /* Increment yield count if they have a VPA */
604 lwz r5, LPPACA_YIELDCOUNT(r3)
606 stw r5, LPPACA_YIELDCOUNT(r3)
608 stb r6, VCPU_VPA_DIRTY(r4)
612 /* Save purr/spurr */
615 std r5,HSTATE_PURR(r13)
616 std r6,HSTATE_SPURR(r13)
621 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
624 /* Set partition DABR */
625 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
626 lwz r5,VCPU_DABRX(r4)
630 BEGIN_FTR_SECTION_NESTED(89)
632 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
633 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
635 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
638 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
640 /* Turn on TM/FP/VSX/VMX so we can restore them. */
646 oris r5, r5, (MSR_VEC | MSR_VSX)@h
650 * The user may change these outside of a transaction, so they must
651 * always be context switched.
653 ld r5, VCPU_TFHAR(r4)
654 ld r6, VCPU_TFIAR(r4)
655 ld r7, VCPU_TEXASR(r4)
658 mtspr SPRN_TEXASR, r7
661 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
662 beq skip_tm /* TM not active in guest */
664 /* Make sure the failure summary is set, otherwise we'll program check
665 * when we trechkpt. It's possible that this might have been not set
666 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
669 oris r7, r7, (TEXASR_FS)@h
670 mtspr SPRN_TEXASR, r7
673 * We need to load up the checkpointed state for the guest.
674 * We need to do this early as it will blow away any GPRs, VSRs and
679 addi r3, r31, VCPU_FPRS_TM
681 addi r3, r31, VCPU_VRS_TM
684 lwz r7, VCPU_VRSAVE_TM(r4)
685 mtspr SPRN_VRSAVE, r7
687 ld r5, VCPU_LR_TM(r4)
688 lwz r6, VCPU_CR_TM(r4)
689 ld r7, VCPU_CTR_TM(r4)
690 ld r8, VCPU_AMR_TM(r4)
691 ld r9, VCPU_TAR_TM(r4)
699 * Load up PPR and DSCR values but don't put them in the actual SPRs
700 * till the last moment to avoid running with userspace PPR and DSCR for
703 ld r29, VCPU_DSCR_TM(r4)
704 ld r30, VCPU_PPR_TM(r4)
706 std r2, PACATMSCRATCH(r13) /* Save TOC */
708 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
712 /* Load GPRs r0-r28 */
715 ld reg, VCPU_GPRS_TM(reg)(r31)
722 /* Load final GPRs */
723 ld 29, VCPU_GPRS_TM(29)(r31)
724 ld 30, VCPU_GPRS_TM(30)(r31)
725 ld 31, VCPU_GPRS_TM(31)(r31)
727 /* TM checkpointed state is now setup. All GPRs are now volatile. */
730 /* Now let's get back the state we need. */
733 ld r29, HSTATE_DSCR(r13)
735 ld r4, HSTATE_KVM_VCPU(r13)
736 ld r1, HSTATE_HOST_R1(r13)
737 ld r2, PACATMSCRATCH(r13)
739 /* Set the MSR RI since we have our registers back. */
745 /* Load guest PMU registers */
746 /* R4 is live here (vcpu pointer) */
748 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
749 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
753 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
756 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
757 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
758 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
759 lwz r6, VCPU_PMC + 8(r4)
760 lwz r7, VCPU_PMC + 12(r4)
761 lwz r8, VCPU_PMC + 16(r4)
762 lwz r9, VCPU_PMC + 20(r4)
764 lwz r10, VCPU_PMC + 24(r4)
765 lwz r11, VCPU_PMC + 28(r4)
766 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
776 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
778 ld r5, VCPU_MMCR + 8(r4)
779 ld r6, VCPU_MMCR + 16(r4)
787 ld r5, VCPU_MMCR + 24(r4)
789 lwz r7, VCPU_PMC + 24(r4)
790 lwz r8, VCPU_PMC + 28(r4)
791 ld r9, VCPU_MMCR + 32(r4)
797 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
801 /* Load up FP, VMX and VSX registers */
804 ld r14, VCPU_GPR(R14)(r4)
805 ld r15, VCPU_GPR(R15)(r4)
806 ld r16, VCPU_GPR(R16)(r4)
807 ld r17, VCPU_GPR(R17)(r4)
808 ld r18, VCPU_GPR(R18)(r4)
809 ld r19, VCPU_GPR(R19)(r4)
810 ld r20, VCPU_GPR(R20)(r4)
811 ld r21, VCPU_GPR(R21)(r4)
812 ld r22, VCPU_GPR(R22)(r4)
813 ld r23, VCPU_GPR(R23)(r4)
814 ld r24, VCPU_GPR(R24)(r4)
815 ld r25, VCPU_GPR(R25)(r4)
816 ld r26, VCPU_GPR(R26)(r4)
817 ld r27, VCPU_GPR(R27)(r4)
818 ld r28, VCPU_GPR(R28)(r4)
819 ld r29, VCPU_GPR(R29)(r4)
820 ld r30, VCPU_GPR(R30)(r4)
821 ld r31, VCPU_GPR(R31)(r4)
824 /* Switch DSCR to guest value */
827 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
830 /* Skip next section on POWER7 or PPC970 */
832 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
833 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
836 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
839 /* Load up POWER8-specific registers */
841 lwz r6, VCPU_PSPB(r4)
847 ld r6, VCPU_DAWRX(r4)
848 ld r7, VCPU_CIABR(r4)
858 ld r8, VCPU_EBBHR(r4)
860 ld r5, VCPU_EBBRR(r4)
861 ld r6, VCPU_BESCR(r4)
862 ld r7, VCPU_CSIGR(r4)
868 ld r5, VCPU_TCSCR(r4)
870 lwz r7, VCPU_GUEST_PID(r4)
879 * Set the decrementer to the guest decrementer.
881 ld r8,VCPU_DEC_EXPIRES(r4)
882 /* r8 is a host timebase value here, convert to guest TB */
883 ld r5,HSTATE_KVM_VCORE(r13)
884 ld r6,VCORE_TB_OFFSET(r5)
891 ld r5, VCPU_SPRG0(r4)
892 ld r6, VCPU_SPRG1(r4)
893 ld r7, VCPU_SPRG2(r4)
894 ld r8, VCPU_SPRG3(r4)
900 /* Load up DAR and DSISR */
902 lwz r6, VCPU_DSISR(r4)
907 /* Restore AMR and UAMOR, set AMOR to all 1s */
914 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
916 /* Restore state of CTRL run bit; assume 1 on entry */
930 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
938 deliver_guest_interrupt:
939 /* r11 = vcpu->arch.msr & ~MSR_HV */
940 rldicl r11, r11, 63 - MSR_HV_LG, 1
941 rotldi r11, r11, 1 + MSR_HV_LG
944 /* Check if we can deliver an external or decrementer interrupt now */
945 ld r0, VCPU_PENDING_EXC(r4)
946 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
948 andi. r8, r11, MSR_EE
951 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
952 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
955 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
957 li r0, BOOK3S_INTERRUPT_EXTERNAL
961 li r0, BOOK3S_INTERRUPT_DECREMENTER
964 12: mtspr SPRN_SRR0, r10
968 bl kvmppc_msr_interrupt
974 * R10: value for HSRR0
975 * R11: value for HSRR1
980 stb r0,VCPU_CEDED(r4) /* cancel cede */
984 /* Activate guest mode, so faults get handled by KVM */
985 li r9, KVM_GUEST_MODE_GUEST_HV
986 stb r9, HSTATE_IN_GUEST(r13)
993 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
996 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1003 ld r1, VCPU_GPR(R1)(r4)
1004 ld r2, VCPU_GPR(R2)(r4)
1005 ld r3, VCPU_GPR(R3)(r4)
1006 ld r5, VCPU_GPR(R5)(r4)
1007 ld r6, VCPU_GPR(R6)(r4)
1008 ld r7, VCPU_GPR(R7)(r4)
1009 ld r8, VCPU_GPR(R8)(r4)
1010 ld r9, VCPU_GPR(R9)(r4)
1011 ld r10, VCPU_GPR(R10)(r4)
1012 ld r11, VCPU_GPR(R11)(r4)
1013 ld r12, VCPU_GPR(R12)(r4)
1014 ld r13, VCPU_GPR(R13)(r4)
1018 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1019 ld r0, VCPU_GPR(R0)(r4)
1020 ld r4, VCPU_GPR(R4)(r4)
1025 /******************************************************************************
1029 *****************************************************************************/
1032 * We come here from the first-level interrupt handlers.
1034 .globl kvmppc_interrupt_hv
1035 kvmppc_interrupt_hv:
1037 * Register contents:
1038 * R12 = interrupt vector
1040 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1041 * guest R13 saved in SPRN_SCRATCH0
1043 std r9, HSTATE_SCRATCH2(r13)
1045 lbz r9, HSTATE_IN_GUEST(r13)
1046 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1047 beq kvmppc_bad_host_intr
1048 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1049 cmpwi r9, KVM_GUEST_MODE_GUEST
1050 ld r9, HSTATE_SCRATCH2(r13)
1051 beq kvmppc_interrupt_pr
1053 /* We're now back in the host but in guest MMU context */
1054 li r9, KVM_GUEST_MODE_HOST_HV
1055 stb r9, HSTATE_IN_GUEST(r13)
1057 ld r9, HSTATE_KVM_VCPU(r13)
1059 /* Save registers */
1061 std r0, VCPU_GPR(R0)(r9)
1062 std r1, VCPU_GPR(R1)(r9)
1063 std r2, VCPU_GPR(R2)(r9)
1064 std r3, VCPU_GPR(R3)(r9)
1065 std r4, VCPU_GPR(R4)(r9)
1066 std r5, VCPU_GPR(R5)(r9)
1067 std r6, VCPU_GPR(R6)(r9)
1068 std r7, VCPU_GPR(R7)(r9)
1069 std r8, VCPU_GPR(R8)(r9)
1070 ld r0, HSTATE_SCRATCH2(r13)
1071 std r0, VCPU_GPR(R9)(r9)
1072 std r10, VCPU_GPR(R10)(r9)
1073 std r11, VCPU_GPR(R11)(r9)
1074 ld r3, HSTATE_SCRATCH0(r13)
1075 lwz r4, HSTATE_SCRATCH1(r13)
1076 std r3, VCPU_GPR(R12)(r9)
1079 ld r3, HSTATE_CFAR(r13)
1080 std r3, VCPU_CFAR(r9)
1081 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1083 ld r4, HSTATE_PPR(r13)
1084 std r4, VCPU_PPR(r9)
1085 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1087 /* Restore R1/R2 so we can handle faults */
1088 ld r1, HSTATE_HOST_R1(r13)
1091 mfspr r10, SPRN_SRR0
1092 mfspr r11, SPRN_SRR1
1093 std r10, VCPU_SRR0(r9)
1094 std r11, VCPU_SRR1(r9)
1095 andi. r0, r12, 2 /* need to read HSRR0/1? */
1097 mfspr r10, SPRN_HSRR0
1098 mfspr r11, SPRN_HSRR1
1100 1: std r10, VCPU_PC(r9)
1101 std r11, VCPU_MSR(r9)
1105 std r3, VCPU_GPR(R13)(r9)
1108 stw r12,VCPU_TRAP(r9)
1110 /* Save HEIR (HV emulation assist reg) in last_inst
1111 if this is an HEI (HV emulation interrupt, e40) */
1112 li r3,KVM_INST_FETCH_FAILED
1114 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1118 11: stw r3,VCPU_LAST_INST(r9)
1120 /* these are volatile across C function calls */
1123 std r3, VCPU_CTR(r9)
1124 stw r4, VCPU_XER(r9)
1127 /* If this is a page table miss then see if it's theirs or ours */
1128 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1130 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1132 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1134 /* See if this is a leftover HDEC interrupt */
1135 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1141 /* See if this is an hcall we can handle in real mode */
1142 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1143 beq hcall_try_real_mode
1145 /* Only handle external interrupts here on arch 206 and later */
1147 b ext_interrupt_to_host
1148 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1150 /* External interrupt ? */
1151 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1152 bne+ ext_interrupt_to_host
1154 /* External interrupt, first check for host_ipi. If this is
1155 * set, we know the host wants us out so let's do it now
1159 bgt ext_interrupt_to_host
1161 /* Check if any CPU is heading out to the host, if so head out too */
1162 ld r5, HSTATE_KVM_VCORE(r13)
1163 lwz r0, VCORE_ENTRY_EXIT(r5)
1165 bge ext_interrupt_to_host
1167 /* Return to guest after delivering any pending interrupt */
1169 b deliver_guest_interrupt
1171 ext_interrupt_to_host:
1173 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1174 /* Save more register state */
1177 std r6, VCPU_DAR(r9)
1178 stw r7, VCPU_DSISR(r9)
1180 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1181 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1183 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1184 std r6, VCPU_FAULT_DAR(r9)
1185 stw r7, VCPU_FAULT_DSISR(r9)
1187 /* See if it is a machine check */
1188 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1189 beq machine_check_realmode
1192 /* Save guest CTRL register, set runlatch to 1 */
1193 6: mfspr r6,SPRN_CTRLF
1194 stw r6,VCPU_CTRL(r9)
1200 /* Read the guest SLB and save it away */
1201 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1207 andis. r0,r8,SLB_ESID_V@h
1209 add r8,r8,r6 /* put index in */
1211 std r8,VCPU_SLB_E(r7)
1212 std r3,VCPU_SLB_V(r7)
1213 addi r7,r7,VCPU_SLB_SIZE
1217 stw r5,VCPU_SLB_MAX(r9)
1220 * Save the guest PURR/SPURR
1226 ld r8,VCPU_SPURR(r9)
1227 std r5,VCPU_PURR(r9)
1228 std r6,VCPU_SPURR(r9)
1233 * Restore host PURR/SPURR and add guest times
1234 * so that the time in the guest gets accounted.
1236 ld r3,HSTATE_PURR(r13)
1237 ld r4,HSTATE_SPURR(r13)
1242 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1249 /* r5 is a guest timebase value here, convert to host TB */
1250 ld r3,HSTATE_KVM_VCORE(r13)
1251 ld r4,VCORE_TB_OFFSET(r3)
1253 std r5,VCPU_DEC_EXPIRES(r9)
1257 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1258 /* Save POWER8-specific registers */
1262 std r5, VCPU_IAMR(r9)
1263 stw r6, VCPU_PSPB(r9)
1264 std r7, VCPU_FSCR(r9)
1269 std r6, VCPU_VTB(r9)
1270 std r7, VCPU_TAR(r9)
1271 mfspr r8, SPRN_EBBHR
1272 std r8, VCPU_EBBHR(r9)
1273 mfspr r5, SPRN_EBBRR
1274 mfspr r6, SPRN_BESCR
1275 mfspr r7, SPRN_CSIGR
1277 std r5, VCPU_EBBRR(r9)
1278 std r6, VCPU_BESCR(r9)
1279 std r7, VCPU_CSIGR(r9)
1280 std r8, VCPU_TACR(r9)
1281 mfspr r5, SPRN_TCSCR
1285 std r5, VCPU_TCSCR(r9)
1286 std r6, VCPU_ACOP(r9)
1287 stw r7, VCPU_GUEST_PID(r9)
1288 std r8, VCPU_WORT(r9)
1291 /* Save and reset AMR and UAMOR before turning on the MMU */
1296 std r6,VCPU_UAMOR(r9)
1299 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1301 /* Switch DSCR back to host value */
1304 ld r7, HSTATE_DSCR(r13)
1305 std r8, VCPU_DSCR(r9)
1307 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1309 /* Save non-volatile GPRs */
1310 std r14, VCPU_GPR(R14)(r9)
1311 std r15, VCPU_GPR(R15)(r9)
1312 std r16, VCPU_GPR(R16)(r9)
1313 std r17, VCPU_GPR(R17)(r9)
1314 std r18, VCPU_GPR(R18)(r9)
1315 std r19, VCPU_GPR(R19)(r9)
1316 std r20, VCPU_GPR(R20)(r9)
1317 std r21, VCPU_GPR(R21)(r9)
1318 std r22, VCPU_GPR(R22)(r9)
1319 std r23, VCPU_GPR(R23)(r9)
1320 std r24, VCPU_GPR(R24)(r9)
1321 std r25, VCPU_GPR(R25)(r9)
1322 std r26, VCPU_GPR(R26)(r9)
1323 std r27, VCPU_GPR(R27)(r9)
1324 std r28, VCPU_GPR(R28)(r9)
1325 std r29, VCPU_GPR(R29)(r9)
1326 std r30, VCPU_GPR(R30)(r9)
1327 std r31, VCPU_GPR(R31)(r9)
1330 mfspr r3, SPRN_SPRG0
1331 mfspr r4, SPRN_SPRG1
1332 mfspr r5, SPRN_SPRG2
1333 mfspr r6, SPRN_SPRG3
1334 std r3, VCPU_SPRG0(r9)
1335 std r4, VCPU_SPRG1(r9)
1336 std r5, VCPU_SPRG2(r9)
1337 std r6, VCPU_SPRG3(r9)
1343 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1346 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1350 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1354 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1355 beq 1f /* TM not active in guest. */
1357 li r3, TM_CAUSE_KVM_RESCHED
1359 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1363 /* All GPRs are volatile at this point. */
1366 /* Temporarily store r13 and r9 so we have some regs to play with */
1369 std r9, PACATMSCRATCH(r13)
1370 ld r9, HSTATE_KVM_VCPU(r13)
1372 /* Get a few more GPRs free. */
1373 std r29, VCPU_GPRS_TM(29)(r9)
1374 std r30, VCPU_GPRS_TM(30)(r9)
1375 std r31, VCPU_GPRS_TM(31)(r9)
1377 /* Save away PPR and DSCR soon so don't run with user values. */
1380 mfspr r30, SPRN_DSCR
1381 ld r29, HSTATE_DSCR(r13)
1382 mtspr SPRN_DSCR, r29
1384 /* Save all but r9, r13 & r29-r31 */
1387 .if (reg != 9) && (reg != 13)
1388 std reg, VCPU_GPRS_TM(reg)(r9)
1392 /* ... now save r13 */
1394 std r4, VCPU_GPRS_TM(13)(r9)
1395 /* ... and save r9 */
1396 ld r4, PACATMSCRATCH(r13)
1397 std r4, VCPU_GPRS_TM(9)(r9)
1399 /* Reload stack pointer and TOC. */
1400 ld r1, HSTATE_HOST_R1(r13)
1403 /* Set MSR RI now we have r1 and r13 back. */
1407 /* Save away checkpinted SPRs. */
1408 std r31, VCPU_PPR_TM(r9)
1409 std r30, VCPU_DSCR_TM(r9)
1415 std r5, VCPU_LR_TM(r9)
1416 stw r6, VCPU_CR_TM(r9)
1417 std r7, VCPU_CTR_TM(r9)
1418 std r8, VCPU_AMR_TM(r9)
1419 std r10, VCPU_TAR_TM(r9)
1421 /* Restore r12 as trap number. */
1422 lwz r12, VCPU_TRAP(r9)
1425 addi r3, r9, VCPU_FPRS_TM
1427 addi r3, r9, VCPU_VRS_TM
1429 mfspr r6, SPRN_VRSAVE
1430 stw r6, VCPU_VRSAVE_TM(r9)
1433 * We need to save these SPRs after the treclaim so that the software
1434 * error code is recorded correctly in the TEXASR. Also the user may
1435 * change these outside of a transaction, so they must always be
1438 mfspr r5, SPRN_TFHAR
1439 mfspr r6, SPRN_TFIAR
1440 mfspr r7, SPRN_TEXASR
1441 std r5, VCPU_TFHAR(r9)
1442 std r6, VCPU_TFIAR(r9)
1443 std r7, VCPU_TEXASR(r9)
1447 /* Increment yield count if they have a VPA */
1448 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1451 lwz r3, LPPACA_YIELDCOUNT(r8)
1453 stw r3, LPPACA_YIELDCOUNT(r8)
1455 stb r3, VCPU_VPA_DIRTY(r9)
1457 /* Save PMU registers if requested */
1458 /* r8 and cr0.eq are live here */
1461 * POWER8 seems to have a hardware bug where setting
1462 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1463 * when some counters are already negative doesn't seem
1464 * to cause a performance monitor alert (and hence interrupt).
1465 * The effect of this is that when saving the PMU state,
1466 * if there is no PMU alert pending when we read MMCR0
1467 * before freezing the counters, but one becomes pending
1468 * before we read the counters, we lose it.
1469 * To work around this, we need a way to freeze the counters
1470 * before reading MMCR0. Normally, freezing the counters
1471 * is done by writing MMCR0 (to set MMCR0[FC]) which
1472 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1473 * we can also freeze the counters using MMCR2, by writing
1474 * 1s to all the counter freeze condition bits (there are
1475 * 9 bits each for 6 counters).
1477 li r3, -1 /* set all freeze bits */
1479 mfspr r10, SPRN_MMCR2
1480 mtspr SPRN_MMCR2, r3
1482 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1484 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1485 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1486 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1487 mfspr r6, SPRN_MMCRA
1489 /* On P7, clear MMCRA in order to disable SDAR updates */
1491 mtspr SPRN_MMCRA, r7
1492 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1494 beq 21f /* if no VPA, save PMU stuff anyway */
1495 lbz r7, LPPACA_PMCINUSE(r8)
1496 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1498 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1500 21: mfspr r5, SPRN_MMCR1
1503 std r4, VCPU_MMCR(r9)
1504 std r5, VCPU_MMCR + 8(r9)
1505 std r6, VCPU_MMCR + 16(r9)
1507 std r10, VCPU_MMCR + 24(r9)
1508 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1509 std r7, VCPU_SIAR(r9)
1510 std r8, VCPU_SDAR(r9)
1518 mfspr r10, SPRN_PMC7
1519 mfspr r11, SPRN_PMC8
1520 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1521 stw r3, VCPU_PMC(r9)
1522 stw r4, VCPU_PMC + 4(r9)
1523 stw r5, VCPU_PMC + 8(r9)
1524 stw r6, VCPU_PMC + 12(r9)
1525 stw r7, VCPU_PMC + 16(r9)
1526 stw r8, VCPU_PMC + 20(r9)
1528 stw r10, VCPU_PMC + 24(r9)
1529 stw r11, VCPU_PMC + 28(r9)
1530 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1533 mfspr r6, SPRN_SPMC1
1534 mfspr r7, SPRN_SPMC2
1535 mfspr r8, SPRN_MMCRS
1536 std r5, VCPU_SIER(r9)
1537 stw r6, VCPU_PMC + 24(r9)
1538 stw r7, VCPU_PMC + 28(r9)
1539 std r8, VCPU_MMCR + 32(r9)
1541 mtspr SPRN_MMCRS, r4
1542 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1550 hdec_soon: /* r12 = trap, r13 = paca */
1553 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1555 * POWER7 guest -> host partition switch code.
1556 * We don't have to lock against tlbies but we do
1557 * have to coordinate the hardware threads.
1559 /* Increment the threads-exiting-guest count in the 0xff00
1560 bits of vcore->entry_exit_count */
1561 ld r5,HSTATE_KVM_VCORE(r13)
1562 addi r6,r5,VCORE_ENTRY_EXIT
1567 isync /* order stwcx. vs. reading napping_threads */
1570 * At this point we have an interrupt that we have to pass
1571 * up to the kernel or qemu; we can't handle it in real mode.
1572 * Thus we have to do a partition switch, so we have to
1573 * collect the other threads, if we are the first thread
1574 * to take an interrupt. To do this, we set the HDEC to 0,
1575 * which causes an HDEC interrupt in all threads within 2ns
1576 * because the HDEC register is shared between all 4 threads.
1577 * However, we don't need to bother if this is an HDEC
1578 * interrupt, since the other threads will already be on their
1579 * way here in that case.
1581 cmpwi r3,0x100 /* Are we the first here? */
1583 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1589 * Send an IPI to any napping threads, since an HDEC interrupt
1590 * doesn't wake CPUs up from nap.
1592 lwz r3,VCORE_NAPPING_THREADS(r5)
1593 lbz r4,HSTATE_PTID(r13)
1596 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1598 /* Order entry/exit update vs. IPIs */
1600 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1604 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1607 stbcix r0,r7,r8 /* trigger the IPI */
1609 addi r6,r6,PACA_SIZE
1613 /* Secondary threads wait for primary to do partition switch */
1614 43: ld r5,HSTATE_KVM_VCORE(r13)
1615 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1616 lbz r3,HSTATE_PTID(r13)
1620 13: lbz r3,VCORE_IN_GUEST(r5)
1626 /* Primary thread waits for all the secondaries to exit guest */
1627 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1634 /* Primary thread switches back to host partition */
1635 ld r6,KVM_HOST_SDR1(r4)
1636 lwz r7,KVM_HOST_LPID(r4)
1637 li r8,LPID_RSVD /* switch to reserved LPID */
1640 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1645 /* DPDES is shared between threads */
1646 mfspr r7, SPRN_DPDES
1647 std r7, VCORE_DPDES(r5)
1648 /* clear DPDES so we don't get guest doorbells in the host */
1650 mtspr SPRN_DPDES, r8
1651 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1653 /* Subtract timebase offset from timebase */
1654 ld r8,VCORE_TB_OFFSET(r5)
1657 mftb r6 /* current guest timebase */
1659 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1660 mftb r7 /* check if lower 24 bits overflowed */
1665 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1669 17: ld r0, VCORE_PCR(r5)
1675 /* Signal secondary CPUs to continue */
1676 stb r0,VCORE_IN_GUEST(r5)
1677 lis r8,0x7fff /* MAX_INT@h */
1680 16: ld r8,KVM_HOST_LPCR(r4)
1686 * PPC970 guest -> host partition switch code.
1687 * We have to lock against concurrent tlbies, and
1688 * we have to flush the whole TLB.
1690 32: ld r5,HSTATE_KVM_VCORE(r13)
1691 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1693 /* Take the guest's tlbie_lock */
1694 #ifdef __BIG_ENDIAN__
1695 lwz r8,PACA_LOCK_TOKEN(r13)
1697 lwz r8,PACAPACAINDEX(r13)
1699 addi r3,r4,KVM_TLBIE_LOCK
1707 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1709 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1713 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1716 stw r0,0(r3) /* drop guest tlbie_lock */
1718 /* invalidate the whole TLB */
1727 /* take native_tlbie_lock */
1728 ld r3,toc_tlbie_lock@toc(2)
1736 ld r6,KVM_HOST_SDR1(r4)
1737 mtspr SPRN_SDR1,r6 /* switch to host page table */
1739 /* Set up host HID4 value */
1744 stw r0,0(r3) /* drop native_tlbie_lock */
1746 lis r8,0x7fff /* MAX_INT@h */
1749 /* Disable HDEC interrupts */
1752 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1762 /* load host SLB entries */
1763 33: ld r8,PACA_SLBSHADOWPTR(r13)
1765 .rept SLB_NUM_BOLTED
1766 ld r5,SLBSHADOW_SAVEAREA(r8)
1767 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1768 andis. r7,r5,SLB_ESID_V@h
1774 /* Unset guest mode */
1775 li r0, KVM_GUEST_MODE_NONE
1776 stb r0, HSTATE_IN_GUEST(r13)
1778 ld r0, 112+PPC_LR_STKOFF(r1)
1784 * Check whether an HDSI is an HPTE not found fault or something else.
1785 * If it is an HPTE not found fault that is due to the guest accessing
1786 * a page that they have mapped but which we have paged out, then
1787 * we continue on with the guest exit path. In all other cases,
1788 * reflect the HDSI to the guest as a DSI.
1792 mfspr r6, SPRN_HDSISR
1793 /* HPTE not found fault or protection fault? */
1794 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1795 beq 1f /* if not, send it to the guest */
1796 andi. r0, r11, MSR_DR /* data relocation enabled? */
1799 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1800 bne 1f /* if no SLB entry found */
1801 4: std r4, VCPU_FAULT_DAR(r9)
1802 stw r6, VCPU_FAULT_DSISR(r9)
1804 /* Search the hash table. */
1805 mr r3, r9 /* vcpu pointer */
1806 li r7, 1 /* data fault */
1807 bl kvmppc_hpte_hv_fault
1808 ld r9, HSTATE_KVM_VCPU(r13)
1810 ld r11, VCPU_MSR(r9)
1811 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1812 cmpdi r3, 0 /* retry the instruction */
1814 cmpdi r3, -1 /* handle in kernel mode */
1816 cmpdi r3, -2 /* MMIO emulation; need instr word */
1819 /* Synthesize a DSI for the guest */
1820 ld r4, VCPU_FAULT_DAR(r9)
1822 1: mtspr SPRN_DAR, r4
1823 mtspr SPRN_DSISR, r6
1824 mtspr SPRN_SRR0, r10
1825 mtspr SPRN_SRR1, r11
1826 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1827 bl kvmppc_msr_interrupt
1828 fast_interrupt_c_return:
1829 6: ld r7, VCPU_CTR(r9)
1830 lwz r8, VCPU_XER(r9)
1836 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1837 ld r5, KVM_VRMA_SLB_V(r5)
1840 /* If this is for emulated MMIO, load the instruction word */
1841 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1843 /* Set guest mode to 'jump over instruction' so if lwz faults
1844 * we'll just continue at the next IP. */
1845 li r0, KVM_GUEST_MODE_SKIP
1846 stb r0, HSTATE_IN_GUEST(r13)
1848 /* Do the access with MSR:DR enabled */
1850 ori r4, r3, MSR_DR /* Enable paging for data */
1855 /* Store the result */
1856 stw r8, VCPU_LAST_INST(r9)
1858 /* Unset guest mode. */
1859 li r0, KVM_GUEST_MODE_HOST_HV
1860 stb r0, HSTATE_IN_GUEST(r13)
1864 * Similarly for an HISI, reflect it to the guest as an ISI unless
1865 * it is an HPTE not found fault for a page that we have paged out.
1868 andis. r0, r11, SRR1_ISI_NOPT@h
1870 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1873 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1874 bne 1f /* if no SLB entry found */
1876 /* Search the hash table. */
1877 mr r3, r9 /* vcpu pointer */
1880 li r7, 0 /* instruction fault */
1881 bl kvmppc_hpte_hv_fault
1882 ld r9, HSTATE_KVM_VCPU(r13)
1884 ld r11, VCPU_MSR(r9)
1885 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1886 cmpdi r3, 0 /* retry the instruction */
1887 beq fast_interrupt_c_return
1888 cmpdi r3, -1 /* handle in kernel mode */
1891 /* Synthesize an ISI for the guest */
1893 1: mtspr SPRN_SRR0, r10
1894 mtspr SPRN_SRR1, r11
1895 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1896 bl kvmppc_msr_interrupt
1897 b fast_interrupt_c_return
1899 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1900 ld r5, KVM_VRMA_SLB_V(r6)
1904 * Try to handle an hcall in real mode.
1905 * Returns to the guest if we handle it, or continues on up to
1906 * the kernel if we can't (i.e. if we don't have a handler for
1907 * it, or if the handler returns H_TOO_HARD).
1909 .globl hcall_try_real_mode
1910 hcall_try_real_mode:
1911 ld r3,VCPU_GPR(R3)(r9)
1913 /* sc 1 from userspace - reflect to guest syscall */
1914 bne sc_1_fast_return
1916 cmpldi r3,hcall_real_table_end - hcall_real_table
1918 LOAD_REG_ADDR(r4, hcall_real_table)
1924 mr r3,r9 /* get vcpu pointer */
1925 ld r4,VCPU_GPR(R4)(r9)
1928 beq hcall_real_fallback
1929 ld r4,HSTATE_KVM_VCPU(r13)
1930 std r3,VCPU_GPR(R3)(r4)
1938 li r10, BOOK3S_INTERRUPT_SYSCALL
1939 bl kvmppc_msr_interrupt
1943 /* We've attempted a real mode hcall, but it's punted it back
1944 * to userspace. We need to restore some clobbered volatiles
1945 * before resuming the pass-it-to-qemu path */
1946 hcall_real_fallback:
1947 li r12,BOOK3S_INTERRUPT_SYSCALL
1948 ld r9, HSTATE_KVM_VCPU(r13)
1952 .globl hcall_real_table
1954 .long 0 /* 0 - unused */
1955 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1956 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1957 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1958 .long 0 /* 0x10 - H_CLEAR_MOD */
1959 .long 0 /* 0x14 - H_CLEAR_REF */
1960 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1961 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1962 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1963 .long 0 /* 0x24 - H_SET_SPRG0 */
1964 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1979 #ifdef CONFIG_KVM_XICS
1980 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1981 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1982 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1983 .long 0 /* 0x70 - H_IPOLL */
1984 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1986 .long 0 /* 0x64 - H_EOI */
1987 .long 0 /* 0x68 - H_CPPR */
1988 .long 0 /* 0x6c - H_IPI */
1989 .long 0 /* 0x70 - H_IPOLL */
1990 .long 0 /* 0x74 - H_XIRR */
2018 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2035 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2039 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2040 hcall_real_table_end:
2046 _GLOBAL(kvmppc_h_set_xdabr)
2047 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2049 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2052 6: li r3, H_PARAMETER
2055 _GLOBAL(kvmppc_h_set_dabr)
2056 li r5, DABRX_USER | DABRX_KERNEL
2060 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2061 std r4,VCPU_DABR(r3)
2062 stw r5, VCPU_DABRX(r3)
2063 mtspr SPRN_DABRX, r5
2064 /* Work around P7 bug where DABR can get corrupted on mtspr */
2065 1: mtspr SPRN_DABR,r4
2073 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2074 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2075 rlwimi r5, r4, 1, DAWRX_WT
2077 std r4, VCPU_DAWR(r3)
2078 std r5, VCPU_DAWRX(r3)
2080 mtspr SPRN_DAWRX, r5
2084 _GLOBAL(kvmppc_h_cede)
2086 std r11,VCPU_MSR(r3)
2088 stb r0,VCPU_CEDED(r3)
2089 sync /* order setting ceded vs. testing prodded */
2090 lbz r5,VCPU_PRODDED(r3)
2092 bne kvm_cede_prodded
2093 li r0,0 /* set trap to 0 to say hcall is handled */
2094 stw r0,VCPU_TRAP(r3)
2096 std r0,VCPU_GPR(R3)(r3)
2098 b kvm_cede_exit /* just send it up to host on 970 */
2099 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
2102 * Set our bit in the bitmask of napping threads unless all the
2103 * other threads are already napping, in which case we send this
2106 ld r5,HSTATE_KVM_VCORE(r13)
2107 lbz r6,HSTATE_PTID(r13)
2108 lwz r8,VCORE_ENTRY_EXIT(r5)
2112 addi r6,r5,VCORE_NAPPING_THREADS
2120 /* order napping_threads update vs testing entry_exit_count */
2123 stb r0,HSTATE_NAPPING(r13)
2124 lwz r7,VCORE_ENTRY_EXIT(r5)
2126 bge 33f /* another thread already exiting */
2129 * Although not specifically required by the architecture, POWER7
2130 * preserves the following registers in nap mode, even if an SMT mode
2131 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2132 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2134 /* Save non-volatile GPRs */
2135 std r14, VCPU_GPR(R14)(r3)
2136 std r15, VCPU_GPR(R15)(r3)
2137 std r16, VCPU_GPR(R16)(r3)
2138 std r17, VCPU_GPR(R17)(r3)
2139 std r18, VCPU_GPR(R18)(r3)
2140 std r19, VCPU_GPR(R19)(r3)
2141 std r20, VCPU_GPR(R20)(r3)
2142 std r21, VCPU_GPR(R21)(r3)
2143 std r22, VCPU_GPR(R22)(r3)
2144 std r23, VCPU_GPR(R23)(r3)
2145 std r24, VCPU_GPR(R24)(r3)
2146 std r25, VCPU_GPR(R25)(r3)
2147 std r26, VCPU_GPR(R26)(r3)
2148 std r27, VCPU_GPR(R27)(r3)
2149 std r28, VCPU_GPR(R28)(r3)
2150 std r29, VCPU_GPR(R29)(r3)
2151 std r30, VCPU_GPR(R30)(r3)
2152 std r31, VCPU_GPR(R31)(r3)
2158 * Take a nap until a decrementer or external or doobell interrupt
2159 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
2160 * runlatch bit before napping.
2162 mfspr r2, SPRN_CTRLF
2164 mtspr SPRN_CTRLT, r2
2167 stb r0,HSTATE_HWTHREAD_REQ(r13)
2169 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2171 oris r5,r5,LPCR_PECEDP@h
2172 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2176 std r0, HSTATE_SCRATCH0(r13)
2178 ld r0, HSTATE_SCRATCH0(r13)
2190 /* get vcpu pointer */
2191 ld r4, HSTATE_KVM_VCPU(r13)
2193 /* Woken by external or decrementer interrupt */
2194 ld r1, HSTATE_HOST_R1(r13)
2196 /* load up FP state */
2200 ld r14, VCPU_GPR(R14)(r4)
2201 ld r15, VCPU_GPR(R15)(r4)
2202 ld r16, VCPU_GPR(R16)(r4)
2203 ld r17, VCPU_GPR(R17)(r4)
2204 ld r18, VCPU_GPR(R18)(r4)
2205 ld r19, VCPU_GPR(R19)(r4)
2206 ld r20, VCPU_GPR(R20)(r4)
2207 ld r21, VCPU_GPR(R21)(r4)
2208 ld r22, VCPU_GPR(R22)(r4)
2209 ld r23, VCPU_GPR(R23)(r4)
2210 ld r24, VCPU_GPR(R24)(r4)
2211 ld r25, VCPU_GPR(R25)(r4)
2212 ld r26, VCPU_GPR(R26)(r4)
2213 ld r27, VCPU_GPR(R27)(r4)
2214 ld r28, VCPU_GPR(R28)(r4)
2215 ld r29, VCPU_GPR(R29)(r4)
2216 ld r30, VCPU_GPR(R30)(r4)
2217 ld r31, VCPU_GPR(R31)(r4)
2219 /* Check the wake reason in SRR1 to see why we got here */
2220 bl kvmppc_check_wake_reason
2222 /* clear our bit in vcore->napping_threads */
2223 34: ld r5,HSTATE_KVM_VCORE(r13)
2224 lbz r7,HSTATE_PTID(r13)
2227 addi r6,r5,VCORE_NAPPING_THREADS
2233 stb r0,HSTATE_NAPPING(r13)
2235 /* See if the wake reason means we need to exit */
2236 stw r12, VCPU_TRAP(r4)
2241 /* see if any other thread is already exiting */
2242 lwz r0,VCORE_ENTRY_EXIT(r5)
2246 b kvmppc_cede_reentry /* if not go back to guest */
2248 /* cede when already previously prodded case */
2251 stb r0,VCPU_PRODDED(r3)
2252 sync /* order testing prodded vs. clearing ceded */
2253 stb r0,VCPU_CEDED(r3)
2257 /* we've ceded but we want to give control to the host */
2259 b hcall_real_fallback
2261 /* Try to handle a machine check in real mode */
2262 machine_check_realmode:
2263 mr r3, r9 /* get vcpu pointer */
2264 bl kvmppc_realmode_machine_check
2266 cmpdi r3, 0 /* Did we handle MCE ? */
2267 ld r9, HSTATE_KVM_VCPU(r13)
2268 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2270 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2271 * machine check interrupt (set HSRR0 to 0x200). And for handled
2272 * errors (no-fatal), just go back to guest execution with current
2273 * HSRR0 instead of exiting guest. This new approach will inject
2274 * machine check to guest for fatal error causing guest to crash.
2276 * The old code used to return to host for unhandled errors which
2277 * was causing guest to hang with soft lockups inside guest and
2278 * makes it difficult to recover guest instance.
2281 ld r11, VCPU_MSR(r9)
2282 bne 2f /* Continue guest execution. */
2283 /* If not, deliver a machine check. SRR0/1 are already set */
2284 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2285 ld r11, VCPU_MSR(r9)
2286 bl kvmppc_msr_interrupt
2287 2: b fast_interrupt_c_return
2290 * Check the reason we woke from nap, and take appropriate action.
2292 * 0 if nothing needs to be done
2293 * 1 if something happened that needs to be handled by the host
2294 * -1 if there was a guest wakeup (IPI)
2296 * Also sets r12 to the interrupt vector for any interrupt that needs
2297 * to be handled now by the host (0x500 for external interrupt), or zero.
2299 kvmppc_check_wake_reason:
2302 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2304 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2305 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2306 cmpwi r6, 8 /* was it an external interrupt? */
2307 li r12, BOOK3S_INTERRUPT_EXTERNAL
2308 beq kvmppc_read_intr /* if so, see what it was */
2311 cmpwi r6, 6 /* was it the decrementer? */
2314 cmpwi r6, 5 /* privileged doorbell? */
2316 cmpwi r6, 3 /* hypervisor doorbell? */
2318 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2319 li r3, 1 /* anything else, return 1 */
2322 /* hypervisor doorbell */
2323 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2328 * Determine what sort of external interrupt is pending (if any).
2330 * 0 if no interrupt is pending
2331 * 1 if an interrupt is pending that needs to be handled by the host
2332 * -1 if there was a guest wakeup IPI (which has now been cleared)
2335 /* see if a host IPI is pending */
2337 lbz r0, HSTATE_HOST_IPI(r13)
2341 /* Now read the interrupt from the ICP */
2342 ld r6, HSTATE_XICS_PHYS(r13)
2347 rlwinm. r3, r0, 0, 0xffffff
2349 beq 1f /* if nothing pending in the ICP */
2351 /* We found something in the ICP...
2353 * If it's not an IPI, stash it in the PACA and return to
2354 * the host, we don't (yet) handle directing real external
2355 * interrupts directly to the guest
2357 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2360 /* It's an IPI, clear the MFRR and EOI it */
2363 stbcix r3, r6, r8 /* clear the IPI */
2364 stwcix r0, r6, r7 /* EOI it */
2367 /* We need to re-check host IPI now in case it got set in the
2368 * meantime. If it's clear, we bounce the interrupt to the
2371 lbz r0, HSTATE_HOST_IPI(r13)
2375 /* OK, it's an IPI for us */
2379 42: /* It's not an IPI and it's for the host, stash it in the PACA
2380 * before exit, it will be picked up by the host ICP driver
2382 stw r0, HSTATE_SAVED_XIRR(r13)
2386 43: /* We raced with the host, we need to resend that IPI, bummer */
2388 stbcix r0, r6, r8 /* set the IPI */
2394 * Save away FP, VMX and VSX registers.
2396 * N.B. r30 and r31 are volatile across this function,
2397 * thus it is not callable from C.
2404 #ifdef CONFIG_ALTIVEC
2406 oris r8,r8,MSR_VEC@h
2407 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2411 oris r8,r8,MSR_VSX@h
2412 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2416 addi r3,r3,VCPU_FPRS
2418 #ifdef CONFIG_ALTIVEC
2420 addi r3,r31,VCPU_VRS
2422 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2424 mfspr r6,SPRN_VRSAVE
2425 stw r6,VCPU_VRSAVE(r31)
2430 * Load up FP, VMX and VSX registers
2432 * N.B. r30 and r31 are volatile across this function,
2433 * thus it is not callable from C.
2440 #ifdef CONFIG_ALTIVEC
2442 oris r8,r8,MSR_VEC@h
2443 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2447 oris r8,r8,MSR_VSX@h
2448 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2452 addi r3,r4,VCPU_FPRS
2454 #ifdef CONFIG_ALTIVEC
2456 addi r3,r31,VCPU_VRS
2458 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2460 lwz r7,VCPU_VRSAVE(r31)
2461 mtspr SPRN_VRSAVE,r7
2467 * We come here if we get any exception or interrupt while we are
2468 * executing host real mode code while in guest MMU context.
2469 * For now just spin, but we should do something better.
2471 kvmppc_bad_host_intr:
2475 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2476 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2477 * r11 has the guest MSR value (in/out)
2478 * r9 has a vcpu pointer (in)
2479 * r0 is used as a scratch register
2481 kvmppc_msr_interrupt:
2482 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2483 cmpwi r0, 2 /* Check if we are in transactional state.. */
2484 ld r11, VCPU_INTR_MSR(r9)
2486 /* ... if transactional, change to suspended */
2488 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2492 * This works around a hardware bug on POWER8E processors, where
2493 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2494 * performance monitor interrupt. Instead, when we need to have
2495 * an interrupt pending, we have to arrange for a counter to overflow.
2499 mtspr SPRN_MMCR2, r3
2500 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2501 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2502 mtspr SPRN_MMCR0, r3