1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 * Derived from book3s_rmhandlers.S and other files, which are:
8 * Copyright SUSE Linux Products GmbH 2009
10 * Authors: Alexander Graf <agraf@suse.de>
13 #include <asm/ppc_asm.h>
14 #include <asm/kvm_asm.h>
18 #include <asm/ptrace.h>
19 #include <asm/hvcall.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/exception-64s.h>
22 #include <asm/kvm_book3s_asm.h>
23 #include <asm/book3s/64/mmu-hash.h>
24 #include <asm/export.h>
27 #include <asm/xive-regs.h>
28 #include <asm/thread_info.h>
29 #include <asm/asm-compat.h>
30 #include <asm/feature-fixups.h>
31 #include <asm/cpuidle.h>
32 #include <asm/ultravisor-api.h>
34 /* Sign-extend HDEC if not on POWER9 */
35 #define EXTEND_HDEC(reg) \
38 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40 /* Values in HSTATE_NAPPING(r13) */
41 #define NAPPING_CEDE 1
42 #define NAPPING_NOVCPU 2
43 #define NAPPING_UNSPLIT 3
45 /* Stack frame offsets for kvmppc_hv_entry */
47 #define STACK_SLOT_TRAP (SFS-4)
48 #define STACK_SLOT_SHORT_PATH (SFS-8)
49 #define STACK_SLOT_TID (SFS-16)
50 #define STACK_SLOT_PSSCR (SFS-24)
51 #define STACK_SLOT_PID (SFS-32)
52 #define STACK_SLOT_IAMR (SFS-40)
53 #define STACK_SLOT_CIABR (SFS-48)
54 #define STACK_SLOT_DAWR (SFS-56)
55 #define STACK_SLOT_DAWRX (SFS-64)
56 #define STACK_SLOT_HFSCR (SFS-72)
57 #define STACK_SLOT_AMR (SFS-80)
58 #define STACK_SLOT_UAMOR (SFS-88)
59 /* the following is used by the P9 short path */
60 #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
63 * Call kvmppc_hv_entry in real mode.
64 * Must be called with interrupts hard-disabled.
68 * LR = return address to continue at after eventually re-enabling MMU
70 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
72 std r0, PPC_LR_STKOFF(r1)
75 std r10, HSTATE_HOST_MSR(r13)
76 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
81 mtmsrd r0,1 /* clear RI in MSR */
88 /* On P9, do LPCR setting, if necessary */
89 ld r3, HSTATE_SPLIT_MODE(r13)
92 lwz r4, KVM_SPLIT_DO_SET(r3)
98 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
100 ld r4, HSTATE_KVM_VCPU(r13)
103 /* Back from guest - restore host state and return to caller */
106 /* Restore host DABR and DABRX */
107 ld r5,HSTATE_DABR(r13)
111 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
114 ld r3,PACA_SPRG_VDSO(r13)
115 mtspr SPRN_SPRG_VDSO_WRITE,r3
117 /* Reload the host's PMU registers */
118 bl kvmhv_load_host_pmu
121 * Reload DEC. HDEC interrupts were disabled when
122 * we reloaded the host's LPCR value.
124 ld r3, HSTATE_DECEXP(r13)
129 /* hwthread_req may have got set by cede or no vcpu, so clear it */
131 stb r0, HSTATE_HWTHREAD_REQ(r13)
134 * For external interrupts we need to call the Linux
135 * handler to process the interrupt. We do that by jumping
136 * to absolute address 0x500 for external interrupts.
137 * The [h]rfid at the end of the handler will return to
138 * the book3s_hv_interrupts.S code. For other interrupts
139 * we do the rfid to get back to the book3s_hv_interrupts.S
142 ld r8, 112+PPC_LR_STKOFF(r1)
144 ld r7, HSTATE_HOST_MSR(r13)
146 /* Return the trap number on this thread as the return value */
150 * If we came back from the guest via a relocation-on interrupt,
151 * we will be in virtual mode at this point, which makes it a
152 * little easier to get back to the caller.
155 andi. r0, r0, MSR_IR /* in real mode? */
158 /* RFI into the highmem handler */
162 mtmsrd r6, 1 /* Clear RI in MSR */
167 /* Virtual-mode return */
172 kvmppc_primary_no_guest:
173 /* We handle this much like a ceded vcpu */
174 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
175 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
176 /* HDEC value came from DEC in the first place, it will fit */
180 * Make sure the primary has finished the MMU switch.
181 * We should never get here on a secondary thread, but
182 * check it for robustness' sake.
184 ld r5, HSTATE_KVM_VCORE(r13)
185 65: lbz r0, VCORE_IN_GUEST(r5)
192 /* set our bit in napping_threads */
193 ld r5, HSTATE_KVM_VCORE(r13)
194 lbz r7, HSTATE_PTID(r13)
197 addi r6, r5, VCORE_NAPPING_THREADS
202 /* order napping_threads update vs testing entry_exit_map */
205 lwz r7, VCORE_ENTRY_EXIT(r5)
207 bge kvm_novcpu_exit /* another thread already exiting */
208 li r3, NAPPING_NOVCPU
209 stb r3, HSTATE_NAPPING(r13)
211 li r3, 0 /* Don't wake on privileged (OS) doorbell */
216 * Entered from kvm_start_guest if kvm_hstate.napping is set
222 ld r1, HSTATE_HOST_R1(r13)
223 ld r5, HSTATE_KVM_VCORE(r13)
225 stb r0, HSTATE_NAPPING(r13)
227 /* check the wake reason */
228 bl kvmppc_check_wake_reason
231 * Restore volatile registers since we could have called
232 * a C routine in kvmppc_check_wake_reason.
235 ld r5, HSTATE_KVM_VCORE(r13)
237 /* see if any other thread is already exiting */
238 lwz r0, VCORE_ENTRY_EXIT(r5)
242 /* clear our bit in napping_threads */
243 lbz r7, HSTATE_PTID(r13)
246 addi r6, r5, VCORE_NAPPING_THREADS
252 /* See if the wake reason means we need to exit */
256 /* See if our timeslice has expired (HDEC is negative) */
259 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
263 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
264 ld r4, HSTATE_KVM_VCPU(r13)
266 beq kvmppc_primary_no_guest
268 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
269 addi r3, r4, VCPU_TB_RMENTRY
270 bl kvmhv_start_timing
275 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
276 ld r4, HSTATE_KVM_VCPU(r13)
279 addi r3, r4, VCPU_TB_RMEXIT
280 bl kvmhv_accumulate_time
283 stw r12, STACK_SLOT_TRAP(r1)
284 bl kvmhv_commence_exit
286 b kvmhv_switch_to_host
289 * We come in here when wakened from Linux offline idle code.
291 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
293 _GLOBAL(idle_kvm_start_guest)
294 ld r4,PACAEMERGSP(r13)
300 subi r1,r4,STACK_FRAME_OVERHEAD
304 * Could avoid this and pass it through in r3. For now,
305 * code expects it to be in SRR1.
310 stb r0,PACA_FTRACE_ENABLED(r13)
312 li r0,KVM_HWTHREAD_IN_KVM
313 stb r0,HSTATE_HWTHREAD_STATE(r13)
315 /* kvm cede / napping does not come through here */
316 lbz r0,HSTATE_NAPPING(r13)
323 stb r0, HSTATE_NAPPING(r13)
328 * We weren't napping due to cede, so this must be a secondary
329 * thread being woken up to run a guest, or being woken up due
330 * to a stray IPI. (Or due to some machine check or hypervisor
331 * maintenance interrupt while the core is in KVM.)
334 /* Check the wake reason in SRR1 to see why we got here */
335 bl kvmppc_check_wake_reason
337 * kvmppc_check_wake_reason could invoke a C routine, but we
338 * have no volatile registers to restore when we return.
344 /* get vcore pointer, NULL if we have nothing to run */
345 ld r5,HSTATE_KVM_VCORE(r13)
347 /* if we have no vcore to run, go back to sleep */
350 kvm_secondary_got_guest:
352 /* Set HSTATE_DSCR(r13) to something sensible */
353 ld r6, PACA_DSCR_DEFAULT(r13)
354 std r6, HSTATE_DSCR(r13)
356 /* On thread 0 of a subcore, set HDEC to max */
357 lbz r4, HSTATE_PTID(r13)
360 LOAD_REG_ADDR(r6, decrementer_max)
363 /* and set per-LPAR registers, if doing dynamic micro-threading */
364 ld r6, HSTATE_SPLIT_MODE(r13)
368 ld r0, KVM_SPLIT_RPR(r6)
370 ld r0, KVM_SPLIT_PMMAR(r6)
372 ld r0, KVM_SPLIT_LDBAR(r6)
376 /* On P9 we use the split_info for coordinating LPCR changes */
377 lwz r4, KVM_SPLIT_DO_SET(r6)
384 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
386 /* Order load of vcpu after load of vcore */
388 ld r4, HSTATE_KVM_VCPU(r13)
391 /* Back from the guest, go back to nap */
392 /* Clear our vcpu and vcore pointers so we don't come back in early */
394 std r0, HSTATE_KVM_VCPU(r13)
396 * Once we clear HSTATE_KVM_VCORE(r13), the code in
397 * kvmppc_run_core() is going to assume that all our vcpu
398 * state is visible in memory. This lwsync makes sure
402 std r0, HSTATE_KVM_VCORE(r13)
405 * All secondaries exiting guest will fall through this path.
406 * Before proceeding, just check for HMI interrupt and
407 * invoke opal hmi handler. By now we are sure that the
408 * primary thread on this core/subcore has already made partition
409 * switch/TB resync and we are good to call opal hmi handler.
411 cmpwi r12, BOOK3S_INTERRUPT_HMI
414 li r3,0 /* NULL argument */
415 bl hmi_exception_realmode
417 * At this point we have finished executing in the guest.
418 * We need to wait for hwthread_req to become zero, since
419 * we may not turn on the MMU while hwthread_req is non-zero.
420 * While waiting we also need to check if we get given a vcpu to run.
423 lbz r3, HSTATE_HWTHREAD_REQ(r13)
427 li r0, KVM_HWTHREAD_IN_KERNEL
428 stb r0, HSTATE_HWTHREAD_STATE(r13)
429 /* need to recheck hwthread_req after a barrier, to avoid race */
431 lbz r3, HSTATE_HWTHREAD_REQ(r13)
436 * Jump to idle_return_gpr_loss, which returns to the
437 * idle_kvm_start_guest caller.
441 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
443 /* set up r3 for return */
446 addi r1, r1, STACK_FRAME_OVERHEAD
455 ld r5, HSTATE_KVM_VCORE(r13)
458 ld r3, HSTATE_SPLIT_MODE(r13)
461 lwz r0, KVM_SPLIT_DO_SET(r3)
464 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
467 lbz r0, KVM_SPLIT_DO_NAP(r3)
473 b kvm_secondary_got_guest
475 54: li r0, KVM_HWTHREAD_IN_KVM
476 stb r0, HSTATE_HWTHREAD_STATE(r13)
480 /* Set LPCR, LPIDR etc. on P9 */
488 bl kvmhv_p9_restore_lpcr
493 * Here the primary thread is trying to return the core to
494 * whole-core mode, so we need to nap.
498 * When secondaries are napping in kvm_unsplit_nap() with
499 * hwthread_req = 1, HMI goes ignored even though subcores are
500 * already exited the guest. Hence HMI keeps waking up secondaries
501 * from nap in a loop and secondaries always go back to nap since
502 * no vcore is assigned to them. This makes impossible for primary
503 * thread to get hold of secondary threads resulting into a soft
504 * lockup in KVM path.
506 * Let us check if HMI is pending and handle it before we go to nap.
508 cmpwi r12, BOOK3S_INTERRUPT_HMI
510 li r3, 0 /* NULL argument */
511 bl hmi_exception_realmode
514 * Ensure that secondary doesn't nap when it has
515 * its vcore pointer set.
517 sync /* matches smp_mb() before setting split_info.do_nap */
518 ld r0, HSTATE_KVM_VCORE(r13)
521 /* clear any pending message */
523 lis r6, (PPC_DBELL_SERVER << (63-36))@h
525 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
526 /* Set kvm_split_mode.napped[tid] = 1 */
527 ld r3, HSTATE_SPLIT_MODE(r13)
529 lbz r4, HSTATE_TID(r13)
530 addi r4, r4, KVM_SPLIT_NAPPED
532 /* Check the do_nap flag again after setting napped[] */
534 lbz r0, KVM_SPLIT_DO_NAP(r3)
537 li r3, NAPPING_UNSPLIT
538 stb r3, HSTATE_NAPPING(r13)
539 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
541 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
548 /******************************************************************************
552 *****************************************************************************/
554 .global kvmppc_hv_entry
559 * R4 = vcpu pointer (or NULL)
564 * all other volatile GPRS = free
565 * Does not preserve non-volatile GPRs or CR fields
568 std r0, PPC_LR_STKOFF(r1)
571 /* Save R1 in the PACA */
572 std r1, HSTATE_HOST_R1(r13)
574 li r6, KVM_GUEST_MODE_HOST_HV
575 stb r6, HSTATE_IN_GUEST(r13)
577 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
578 /* Store initial timestamp */
581 addi r3, r4, VCPU_TB_RMENTRY
582 bl kvmhv_start_timing
586 ld r5, HSTATE_KVM_VCORE(r13)
587 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
590 * POWER7/POWER8 host -> guest partition switch code.
591 * We don't have to lock against concurrent tlbies,
592 * but we do have to coordinate across hardware threads.
594 /* Set bit in entry map iff exit map is zero. */
596 lbz r6, HSTATE_PTID(r13)
598 addi r8, r5, VCORE_ENTRY_EXIT
600 cmpwi r3, 0x100 /* any threads starting to exit? */
601 bge secondary_too_late /* if so we're too late to the party */
606 /* Primary thread switches to guest partition. */
613 li r0,LPID_RSVD /* switch to reserved LPID */
616 mtspr SPRN_SDR1,r6 /* switch to partition page table */
617 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
621 /* See if we need to flush the TLB. */
622 mr r3, r9 /* kvm pointer */
623 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
624 li r5, 0 /* nested vcpu pointer */
625 bl kvmppc_check_need_tlb_flush
627 ld r5, HSTATE_KVM_VCORE(r13)
629 /* Add timebase offset onto timebase */
630 22: ld r8,VCORE_TB_OFFSET(r5)
633 std r8, VCORE_TB_OFFSET_APPL(r5)
634 mftb r6 /* current host timebase */
636 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
637 mftb r7 /* check if lower 24 bits overflowed */
642 addis r8,r8,0x100 /* if so, increment upper 40 bits */
645 /* Load guest PCR value to select appropriate compat mode */
646 37: ld r7, VCORE_PCR(r5)
647 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
655 /* DPDES and VTB are shared between threads */
656 ld r8, VCORE_DPDES(r5)
660 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
662 /* Mark the subcore state as inside guest */
663 bl kvmppc_subcore_enter_guest
665 ld r5, HSTATE_KVM_VCORE(r13)
666 ld r4, HSTATE_KVM_VCPU(r13)
668 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
670 /* Do we have a guest vcpu to run? */
672 beq kvmppc_primary_no_guest
674 /* Increment yield count if they have a VPA */
678 li r6, LPPACA_YIELDCOUNT
683 stb r6, VCPU_VPA_DIRTY(r4)
686 /* Save purr/spurr */
689 std r5,HSTATE_PURR(r13)
690 std r6,HSTATE_SPURR(r13)
696 /* Save host values of some registers */
701 std r5, STACK_SLOT_TID(r1)
702 std r6, STACK_SLOT_PSSCR(r1)
703 std r7, STACK_SLOT_PID(r1)
705 std r5, STACK_SLOT_HFSCR(r1)
706 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
712 std r5, STACK_SLOT_CIABR(r1)
713 std r6, STACK_SLOT_DAWR(r1)
714 std r7, STACK_SLOT_DAWRX(r1)
715 std r8, STACK_SLOT_IAMR(r1)
716 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
719 std r5, STACK_SLOT_AMR(r1)
721 std r6, STACK_SLOT_UAMOR(r1)
724 /* Set partition DABR */
725 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
726 lwz r5,VCPU_DABRX(r4)
731 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
733 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
735 * Branch around the call if both CPU_FTR_TM and
736 * CPU_FTR_P9_TM_HV_ASSIST are off.
740 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
742 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
746 li r5, 0 /* don't preserve non-vol regs */
747 bl kvmppc_restore_tm_hv
749 ld r4, HSTATE_KVM_VCPU(r13)
753 /* Load guest PMU registers; r4 = vcpu pointer here */
755 bl kvmhv_load_guest_pmu
757 /* Load up FP, VMX and VSX registers */
758 ld r4, HSTATE_KVM_VCPU(r13)
761 ld r14, VCPU_GPR(R14)(r4)
762 ld r15, VCPU_GPR(R15)(r4)
763 ld r16, VCPU_GPR(R16)(r4)
764 ld r17, VCPU_GPR(R17)(r4)
765 ld r18, VCPU_GPR(R18)(r4)
766 ld r19, VCPU_GPR(R19)(r4)
767 ld r20, VCPU_GPR(R20)(r4)
768 ld r21, VCPU_GPR(R21)(r4)
769 ld r22, VCPU_GPR(R22)(r4)
770 ld r23, VCPU_GPR(R23)(r4)
771 ld r24, VCPU_GPR(R24)(r4)
772 ld r25, VCPU_GPR(R25)(r4)
773 ld r26, VCPU_GPR(R26)(r4)
774 ld r27, VCPU_GPR(R27)(r4)
775 ld r28, VCPU_GPR(R28)(r4)
776 ld r29, VCPU_GPR(R29)(r4)
777 ld r30, VCPU_GPR(R30)(r4)
778 ld r31, VCPU_GPR(R31)(r4)
780 /* Switch DSCR to guest value */
785 /* Skip next section on POWER7 */
787 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
788 /* Load up POWER8-specific registers */
790 lwz r6, VCPU_PSPB(r4)
796 * Handle broken DAWR case by not writing it. This means we
797 * can still store the DAWR register for migration.
799 LOAD_REG_ADDR(r5, dawr_force_enable)
804 ld r6, VCPU_DAWRX(r4)
808 ld r7, VCPU_CIABR(r4)
813 ld r8, VCPU_EBBHR(r4)
816 ld r5, VCPU_EBBRR(r4)
817 ld r6, VCPU_BESCR(r4)
818 lwz r7, VCPU_GUEST_PID(r4)
825 /* POWER8-only registers */
826 ld r5, VCPU_TCSCR(r4)
828 ld r7, VCPU_CSIGR(r4)
836 /* POWER9-only registers */
838 ld r6, VCPU_PSSCR(r4)
839 lbz r8, HSTATE_FAKE_SUSPEND(r13)
840 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
841 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
842 ld r7, VCPU_HFSCR(r4)
846 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
849 ld r5, VCPU_SPRG0(r4)
850 ld r6, VCPU_SPRG1(r4)
851 ld r7, VCPU_SPRG2(r4)
852 ld r8, VCPU_SPRG3(r4)
858 /* Load up DAR and DSISR */
860 lwz r6, VCPU_DSISR(r4)
864 /* Restore AMR and UAMOR, set AMOR to all 1s */
872 /* Restore state of CTRL run bit; assume 1 on entry */
880 /* Secondary threads wait for primary to have done partition switch */
881 ld r5, HSTATE_KVM_VCORE(r13)
882 lbz r6, HSTATE_PTID(r13)
885 lbz r0, VCORE_IN_GUEST(r5)
889 20: lwz r3, VCORE_ENTRY_EXIT(r5)
892 lbz r0, VCORE_IN_GUEST(r5)
903 * Set the decrementer to the guest decrementer.
905 ld r8,VCPU_DEC_EXPIRES(r4)
906 /* r8 is a host timebase value here, convert to guest TB */
907 ld r5,HSTATE_KVM_VCORE(r13)
908 ld r6,VCORE_TB_OFFSET_APPL(r5)
914 /* Check if HDEC expires soon */
917 cmpdi r3, 512 /* 1 microsecond */
920 /* For hash guest, clear out and reload the SLB */
922 lbz r0, KVM_RADIX(r6)
930 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
931 lwz r5,VCPU_SLB_MAX(r4)
936 1: ld r8,VCPU_SLB_E(r6)
939 addi r6,r6,VCPU_SLB_SIZE
943 #ifdef CONFIG_KVM_XICS
944 /* We are entering the guest on that thread, push VCPU to XIVE */
945 ld r11, VCPU_XIVE_SAVED_STATE(r4)
947 lwz r8, VCPU_XIVE_CAM_WORD(r4)
950 li r7, TM_QW1_OS + TM_WORD2
952 andi. r0, r0, MSR_DR /* in real mode? */
954 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
961 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
968 stb r9, VCPU_XIVE_PUSHED(r4)
972 * We clear the irq_pending flag. There is a small chance of a
973 * race vs. the escalation interrupt happening on another
974 * processor setting it again, but the only consequence is to
975 * cause a spurrious wakeup on the next H_CEDE which is not an
979 stb r0, VCPU_IRQ_PENDING(r4)
982 * In single escalation mode, if the escalation interrupt is
985 lbz r0, VCPU_XIVE_ESC_ON(r4)
988 li r9, XIVE_ESB_SET_PQ_01
989 beq 4f /* in real mode? */
990 ld r10, VCPU_XIVE_ESC_VADDR(r4)
993 4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
997 /* We have a possible subtle race here: The escalation interrupt might
998 * have fired and be on its way to the host queue while we mask it,
999 * and if we unmask it early enough (re-cede right away), there is
1000 * a theorical possibility that it fires again, thus landing in the
1001 * target queue more than once which is a big no-no.
1003 * Fortunately, solving this is rather easy. If the above load setting
1004 * PQ to 01 returns a previous value where P is set, then we know the
1005 * escalation interrupt is somewhere on its way to the host. In that
1006 * case we simply don't clear the xive_esc_on flag below. It will be
1007 * eventually cleared by the handler for the escalation interrupt.
1009 * Then, when doing a cede, we check that flag again before re-enabling
1010 * the escalation interrupt, and if set, we abort the cede.
1012 andi. r0, r0, XIVE_ESB_VAL_P
1015 /* Now P is 0, we can clear the flag */
1017 stb r0, VCPU_XIVE_ESC_ON(r4)
1020 #endif /* CONFIG_KVM_XICS */
1023 stw r0, STACK_SLOT_SHORT_PATH(r1)
1025 deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
1026 /* Check if we can deliver an external or decrementer interrupt now */
1027 ld r0, VCPU_PENDING_EXC(r4)
1029 /* On POWER9, also check for emulated doorbell interrupt */
1030 lbz r3, VCPU_DBELL_REQ(r4)
1032 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1036 bl kvmppc_guest_entry_inject_int
1037 ld r4, HSTATE_KVM_VCPU(r13)
1039 ld r6, VCPU_SRR0(r4)
1040 ld r7, VCPU_SRR1(r4)
1046 ld r11, VCPU_MSR(r4)
1047 /* r11 = vcpu->arch.msr & ~MSR_HV */
1048 rldicl r11, r11, 63 - MSR_HV_LG, 1
1049 rotldi r11, r11, 1 + MSR_HV_LG
1050 ori r11, r11, MSR_ME
1060 * R10: value for HSRR0
1061 * R11: value for HSRR1
1066 stb r0,VCPU_CEDED(r4) /* cancel cede */
1067 mtspr SPRN_HSRR0,r10
1068 mtspr SPRN_HSRR1,r11
1070 /* Activate guest mode, so faults get handled by KVM */
1071 li r9, KVM_GUEST_MODE_GUEST_HV
1072 stb r9, HSTATE_IN_GUEST(r13)
1074 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1075 /* Accumulate timing */
1076 addi r3, r4, VCPU_TB_GUEST
1077 bl kvmhv_accumulate_time
1083 ld r5, VCPU_CFAR(r4)
1085 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1088 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1093 ld r1, VCPU_GPR(R1)(r4)
1094 ld r5, VCPU_GPR(R5)(r4)
1095 ld r8, VCPU_GPR(R8)(r4)
1096 ld r9, VCPU_GPR(R9)(r4)
1097 ld r10, VCPU_GPR(R10)(r4)
1098 ld r11, VCPU_GPR(R11)(r4)
1099 ld r12, VCPU_GPR(R12)(r4)
1100 ld r13, VCPU_GPR(R13)(r4)
1104 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1106 /* Move canary into DSISR to check for later */
1109 mtspr SPRN_HDSISR, r0
1110 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1113 lbz r7, KVM_SECURE_GUEST(r6)
1115 ld r6, VCPU_GPR(R6)(r4)
1116 ld r7, VCPU_GPR(R7)(r4)
1122 ld r0, VCPU_GPR(R0)(r4)
1123 ld r2, VCPU_GPR(R2)(r4)
1124 ld r3, VCPU_GPR(R3)(r4)
1125 ld r4, VCPU_GPR(R4)(r4)
1129 * Use UV_RETURN ultracall to return control back to the Ultravisor after
1130 * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
1131 * to the Hypervisor.
1133 * All registers have already been loaded, except:
1135 * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
1142 ld r0, VCPU_GPR(R3)(r4)
1145 ori r3, r3, UV_RETURN
1146 ld r4, VCPU_GPR(R4)(r4)
1150 * Enter the guest on a P9 or later system where we have exactly
1151 * one vcpu per vcore and we don't need to go to real mode
1152 * (which implies that host and guest are both using radix MMU mode).
1154 * Most SPRs and all the VSRs have been loaded already.
1156 _GLOBAL(__kvmhv_vcpu_entry_p9)
1157 EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
1159 std r0, PPC_LR_STKOFF(r1)
1163 stw r0, STACK_SLOT_SHORT_PATH(r1)
1165 std r3, HSTATE_KVM_VCPU(r13)
1169 std r1, HSTATE_HOST_R1(r13)
1173 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1179 ld reg, __VCPU_GPR(reg)(r3)
1184 std r10, HSTATE_HOST_MSR(r13)
1187 b fast_guest_entry_c
1188 guest_exit_short_path:
1190 li r0, KVM_GUEST_MODE_NONE
1191 stb r0, HSTATE_IN_GUEST(r13)
1195 std reg, __VCPU_GPR(reg)(r9)
1201 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1208 mr r3, r12 /* trap number */
1211 ld r0, PPC_LR_STKOFF(r1)
1214 /* If we are in real mode, do a rfid to get back to the caller */
1216 andi. r5, r4, MSR_IR
1218 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
1220 ld r10, HSTATE_HOST_MSR(r13)
1221 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
1222 mtspr SPRN_SRR1, r10
1228 stw r12, STACK_SLOT_TRAP(r1)
1231 stw r12, VCPU_TRAP(r4)
1232 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1233 addi r3, r4, VCPU_TB_RMEXIT
1234 bl kvmhv_accumulate_time
1236 11: b kvmhv_switch_to_host
1243 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1244 12: stw r12, VCPU_TRAP(r4)
1246 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1247 addi r3, r4, VCPU_TB_RMEXIT
1248 bl kvmhv_accumulate_time
1252 /******************************************************************************
1256 *****************************************************************************/
1259 * We come here from the first-level interrupt handlers.
1261 .globl kvmppc_interrupt_hv
1262 kvmppc_interrupt_hv:
1264 * Register contents:
1265 * R12 = (guest CR << 32) | interrupt vector
1267 * guest R12 saved in shadow VCPU SCRATCH0
1268 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1269 * guest R13 saved in SPRN_SCRATCH0
1271 std r9, HSTATE_SCRATCH2(r13)
1272 lbz r9, HSTATE_IN_GUEST(r13)
1273 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1274 beq kvmppc_bad_host_intr
1275 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1276 cmpwi r9, KVM_GUEST_MODE_GUEST
1277 ld r9, HSTATE_SCRATCH2(r13)
1278 beq kvmppc_interrupt_pr
1280 /* We're now back in the host but in guest MMU context */
1281 li r9, KVM_GUEST_MODE_HOST_HV
1282 stb r9, HSTATE_IN_GUEST(r13)
1284 ld r9, HSTATE_KVM_VCPU(r13)
1286 /* Save registers */
1288 std r0, VCPU_GPR(R0)(r9)
1289 std r1, VCPU_GPR(R1)(r9)
1290 std r2, VCPU_GPR(R2)(r9)
1291 std r3, VCPU_GPR(R3)(r9)
1292 std r4, VCPU_GPR(R4)(r9)
1293 std r5, VCPU_GPR(R5)(r9)
1294 std r6, VCPU_GPR(R6)(r9)
1295 std r7, VCPU_GPR(R7)(r9)
1296 std r8, VCPU_GPR(R8)(r9)
1297 ld r0, HSTATE_SCRATCH2(r13)
1298 std r0, VCPU_GPR(R9)(r9)
1299 std r10, VCPU_GPR(R10)(r9)
1300 std r11, VCPU_GPR(R11)(r9)
1301 ld r3, HSTATE_SCRATCH0(r13)
1302 std r3, VCPU_GPR(R12)(r9)
1303 /* CR is in the high half of r12 */
1307 ld r3, HSTATE_CFAR(r13)
1308 std r3, VCPU_CFAR(r9)
1309 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1311 ld r4, HSTATE_PPR(r13)
1312 std r4, VCPU_PPR(r9)
1313 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1315 /* Restore R1/R2 so we can handle faults */
1316 ld r1, HSTATE_HOST_R1(r13)
1319 mfspr r10, SPRN_SRR0
1320 mfspr r11, SPRN_SRR1
1321 std r10, VCPU_SRR0(r9)
1322 std r11, VCPU_SRR1(r9)
1323 /* trap is in the low half of r12, clear CR from the high half */
1325 andi. r0, r12, 2 /* need to read HSRR0/1? */
1327 mfspr r10, SPRN_HSRR0
1328 mfspr r11, SPRN_HSRR1
1330 1: std r10, VCPU_PC(r9)
1331 std r11, VCPU_MSR(r9)
1335 std r3, VCPU_GPR(R13)(r9)
1338 stw r12,VCPU_TRAP(r9)
1341 * Now that we have saved away SRR0/1 and HSRR0/1,
1342 * interrupts are recoverable in principle, so set MSR_RI.
1343 * This becomes important for relocation-on interrupts from
1344 * the guest, which we can get in radix mode on POWER9.
1349 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1350 addi r3, r9, VCPU_TB_RMINTR
1352 bl kvmhv_accumulate_time
1353 ld r5, VCPU_GPR(R5)(r9)
1354 ld r6, VCPU_GPR(R6)(r9)
1355 ld r7, VCPU_GPR(R7)(r9)
1356 ld r8, VCPU_GPR(R8)(r9)
1359 /* Save HEIR (HV emulation assist reg) in emul_inst
1360 if this is an HEI (HV emulation interrupt, e40) */
1361 li r3,KVM_INST_FETCH_FAILED
1362 stw r3,VCPU_LAST_INST(r9)
1363 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1366 11: stw r3,VCPU_HEIR(r9)
1368 /* these are volatile across C function calls */
1369 #ifdef CONFIG_RELOCATABLE
1370 ld r3, HSTATE_SCRATCH1(r13)
1376 std r3, VCPU_CTR(r9)
1377 std r4, VCPU_XER(r9)
1379 /* Save more register state */
1382 std r3, VCPU_DAR(r9)
1383 stw r4, VCPU_DSISR(r9)
1385 /* If this is a page table miss then see if it's theirs or ours */
1386 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1388 std r3, VCPU_FAULT_DAR(r9)
1389 stw r4, VCPU_FAULT_DSISR(r9)
1390 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1393 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1394 /* For softpatch interrupt, go off and do TM instruction emulation */
1395 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1399 /* See if this is a leftover HDEC interrupt */
1400 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1406 bge fast_guest_return
1408 /* See if this is an hcall we can handle in real mode */
1409 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1410 beq hcall_try_real_mode
1412 /* Hypervisor doorbell - exit only if host IPI flag set */
1413 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1418 /* always exit if we're running a nested guest */
1419 ld r0, VCPU_NESTED(r9)
1422 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1423 lbz r0, HSTATE_HOST_IPI(r13)
1425 beq maybe_reenter_guest
1428 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1429 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1431 mfspr r3, SPRN_HFSCR
1432 std r3, VCPU_HFSCR(r9)
1435 /* External interrupt ? */
1436 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1437 beq kvmppc_guest_external
1438 /* See if it is a machine check */
1439 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1440 beq machine_check_realmode
1441 /* Or a hypervisor maintenance interrupt */
1442 cmpwi r12, BOOK3S_INTERRUPT_HMI
1445 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1447 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1448 addi r3, r9, VCPU_TB_RMEXIT
1450 bl kvmhv_accumulate_time
1452 #ifdef CONFIG_KVM_XICS
1453 /* We are exiting, pull the VP from the XIVE */
1454 lbz r0, VCPU_XIVE_PUSHED(r9)
1457 li r7, TM_SPC_PULL_OS_CTX
1460 andi. r0, r0, MSR_DR /* in real mode? */
1462 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1465 /* First load to pull the context, we ignore the value */
1468 /* Second load to recover the context state (Words 0 and 1) */
1471 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1474 /* First load to pull the context, we ignore the value */
1477 /* Second load to recover the context state (Words 0 and 1) */
1479 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1480 /* Fixup some of the state for the next load */
1483 stb r10, VCPU_XIVE_PUSHED(r9)
1484 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1485 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1488 #endif /* CONFIG_KVM_XICS */
1490 /* If we came in through the P9 short path, go back out to C now */
1491 lwz r0, STACK_SLOT_SHORT_PATH(r1)
1493 bne guest_exit_short_path
1495 /* For hash guest, read the guest SLB and save it away */
1497 lbz r0, KVM_RADIX(r5)
1500 bne 3f /* for radix, save 0 entries */
1501 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1506 andis. r0,r8,SLB_ESID_V@h
1508 add r8,r8,r6 /* put index in */
1510 std r8,VCPU_SLB_E(r7)
1511 std r3,VCPU_SLB_V(r7)
1512 addi r7,r7,VCPU_SLB_SIZE
1516 /* Finally clear out the SLB */
1521 3: stw r5,VCPU_SLB_MAX(r9)
1523 /* load host SLB entries */
1524 BEGIN_MMU_FTR_SECTION
1526 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1527 ld r8,PACA_SLBSHADOWPTR(r13)
1529 .rept SLB_NUM_BOLTED
1530 li r3, SLBSHADOW_SAVEAREA
1534 andis. r7,r5,SLB_ESID_V@h
1542 stw r12, STACK_SLOT_TRAP(r1)
1545 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1546 ld r3, HSTATE_KVM_VCORE(r13)
1549 /* On P9, if the guest has large decr enabled, don't sign extend */
1551 ld r4, VCORE_LPCR(r3)
1552 andis. r4, r4, LPCR_LD@h
1554 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1557 /* r5 is a guest timebase value here, convert to host TB */
1558 ld r4,VCORE_TB_OFFSET_APPL(r3)
1560 std r5,VCPU_DEC_EXPIRES(r9)
1562 /* Increment exit count, poke other threads to exit */
1564 bl kvmhv_commence_exit
1566 ld r9, HSTATE_KVM_VCPU(r13)
1568 /* Stop others sending VCPU interrupts to this physical CPU */
1570 stw r0, VCPU_CPU(r9)
1571 stw r0, VCPU_THREAD_CPU(r9)
1573 /* Save guest CTRL register, set runlatch to 1 */
1575 stw r6,VCPU_CTRL(r9)
1582 * Save the guest PURR/SPURR
1587 ld r8,VCPU_SPURR(r9)
1588 std r5,VCPU_PURR(r9)
1589 std r6,VCPU_SPURR(r9)
1594 * Restore host PURR/SPURR and add guest times
1595 * so that the time in the guest gets accounted.
1597 ld r3,HSTATE_PURR(r13)
1598 ld r4,HSTATE_SPURR(r13)
1606 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1607 /* Save POWER8-specific registers */
1611 std r5, VCPU_IAMR(r9)
1612 stw r6, VCPU_PSPB(r9)
1613 std r7, VCPU_FSCR(r9)
1617 std r7, VCPU_TAR(r9)
1618 mfspr r8, SPRN_EBBHR
1619 std r8, VCPU_EBBHR(r9)
1620 mfspr r5, SPRN_EBBRR
1621 mfspr r6, SPRN_BESCR
1624 std r5, VCPU_EBBRR(r9)
1625 std r6, VCPU_BESCR(r9)
1626 stw r7, VCPU_GUEST_PID(r9)
1627 std r8, VCPU_WORT(r9)
1629 mfspr r5, SPRN_TCSCR
1631 mfspr r7, SPRN_CSIGR
1633 std r5, VCPU_TCSCR(r9)
1634 std r6, VCPU_ACOP(r9)
1635 std r7, VCPU_CSIGR(r9)
1636 std r8, VCPU_TACR(r9)
1639 mfspr r6, SPRN_PSSCR
1640 std r5, VCPU_TID(r9)
1641 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1643 std r6, VCPU_PSSCR(r9)
1644 /* Restore host HFSCR value */
1645 ld r7, STACK_SLOT_HFSCR(r1)
1646 mtspr SPRN_HFSCR, r7
1647 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1649 * Restore various registers to 0, where non-zero values
1650 * set by the guest could disrupt the host.
1656 mtspr SPRN_TCSCR, r0
1657 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1660 mtspr SPRN_MMCRS, r0
1661 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1663 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1664 ld r8, STACK_SLOT_IAMR(r1)
1667 8: /* Power7 jumps back in here */
1671 std r6,VCPU_UAMOR(r9)
1672 ld r5,STACK_SLOT_AMR(r1)
1673 ld r6,STACK_SLOT_UAMOR(r1)
1675 mtspr SPRN_UAMOR, r6
1677 /* Switch DSCR back to host value */
1679 ld r7, HSTATE_DSCR(r13)
1680 std r8, VCPU_DSCR(r9)
1683 /* Save non-volatile GPRs */
1684 std r14, VCPU_GPR(R14)(r9)
1685 std r15, VCPU_GPR(R15)(r9)
1686 std r16, VCPU_GPR(R16)(r9)
1687 std r17, VCPU_GPR(R17)(r9)
1688 std r18, VCPU_GPR(R18)(r9)
1689 std r19, VCPU_GPR(R19)(r9)
1690 std r20, VCPU_GPR(R20)(r9)
1691 std r21, VCPU_GPR(R21)(r9)
1692 std r22, VCPU_GPR(R22)(r9)
1693 std r23, VCPU_GPR(R23)(r9)
1694 std r24, VCPU_GPR(R24)(r9)
1695 std r25, VCPU_GPR(R25)(r9)
1696 std r26, VCPU_GPR(R26)(r9)
1697 std r27, VCPU_GPR(R27)(r9)
1698 std r28, VCPU_GPR(R28)(r9)
1699 std r29, VCPU_GPR(R29)(r9)
1700 std r30, VCPU_GPR(R30)(r9)
1701 std r31, VCPU_GPR(R31)(r9)
1704 mfspr r3, SPRN_SPRG0
1705 mfspr r4, SPRN_SPRG1
1706 mfspr r5, SPRN_SPRG2
1707 mfspr r6, SPRN_SPRG3
1708 std r3, VCPU_SPRG0(r9)
1709 std r4, VCPU_SPRG1(r9)
1710 std r5, VCPU_SPRG2(r9)
1711 std r6, VCPU_SPRG3(r9)
1717 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1719 * Branch around the call if both CPU_FTR_TM and
1720 * CPU_FTR_P9_TM_HV_ASSIST are off.
1724 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1726 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
1730 li r5, 0 /* don't preserve non-vol regs */
1731 bl kvmppc_save_tm_hv
1733 ld r9, HSTATE_KVM_VCPU(r13)
1737 /* Increment yield count if they have a VPA */
1738 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1741 li r4, LPPACA_YIELDCOUNT
1746 stb r3, VCPU_VPA_DIRTY(r9)
1748 /* Save PMU registers if requested */
1749 /* r8 and cr0.eq are live here */
1752 beq 21f /* if no VPA, save PMU stuff anyway */
1753 lbz r4, LPPACA_PMCINUSE(r8)
1754 21: bl kvmhv_save_guest_pmu
1755 ld r9, HSTATE_KVM_VCPU(r13)
1757 /* Restore host values of some registers */
1759 ld r5, STACK_SLOT_CIABR(r1)
1760 ld r6, STACK_SLOT_DAWR(r1)
1761 ld r7, STACK_SLOT_DAWRX(r1)
1762 mtspr SPRN_CIABR, r5
1764 * If the DAWR doesn't work, it's ok to write these here as
1765 * this value should always be zero
1768 mtspr SPRN_DAWRX, r7
1769 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1771 ld r5, STACK_SLOT_TID(r1)
1772 ld r6, STACK_SLOT_PSSCR(r1)
1773 ld r7, STACK_SLOT_PID(r1)
1775 mtspr SPRN_PSSCR, r6
1777 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1779 #ifdef CONFIG_PPC_RADIX_MMU
1781 * Are we running hash or radix ?
1784 lbz r0, KVM_RADIX(r5)
1789 * Radix: do eieio; tlbsync; ptesync sequence in case we
1790 * interrupted the guest between a tlbie and a ptesync.
1796 /* Radix: Handle the case where the guest used an illegal PID */
1797 LOAD_REG_ADDR(r4, mmu_base_pid)
1798 lwz r3, VCPU_GUEST_PID(r9)
1804 * Illegal PID, the HW might have prefetched and cached in the TLB
1805 * some translations for the LPID 0 / guest PID combination which
1806 * Linux doesn't know about, so we need to flush that PID out of
1807 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1808 * the right context.
1814 /* Then do a congruence class local flush */
1816 lwz r0,KVM_TLB_SETS(r6)
1818 li r7,0x400 /* IS field = 0b01 */
1820 sldi r0,r3,32 /* RS has PID */
1821 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1827 #endif /* CONFIG_PPC_RADIX_MMU */
1830 * POWER7/POWER8 guest -> host partition switch code.
1831 * We don't have to lock against tlbies but we do
1832 * have to coordinate the hardware threads.
1833 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1835 kvmhv_switch_to_host:
1836 /* Secondary threads wait for primary to do partition switch */
1837 ld r5,HSTATE_KVM_VCORE(r13)
1838 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1839 lbz r3,HSTATE_PTID(r13)
1843 13: lbz r3,VCORE_IN_GUEST(r5)
1849 /* Primary thread waits for all the secondaries to exit guest */
1850 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1851 rlwinm r0,r3,32-8,0xff
1857 /* Did we actually switch to the guest at all? */
1858 lbz r6, VCORE_IN_GUEST(r5)
1862 /* Primary thread switches back to host partition */
1863 lwz r7,KVM_HOST_LPID(r4)
1865 ld r6,KVM_HOST_SDR1(r4)
1866 li r8,LPID_RSVD /* switch to reserved LPID */
1869 mtspr SPRN_SDR1,r6 /* switch to host page table */
1870 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1875 /* DPDES and VTB are shared between threads */
1876 mfspr r7, SPRN_DPDES
1878 std r7, VCORE_DPDES(r5)
1879 std r8, VCORE_VTB(r5)
1880 /* clear DPDES so we don't get guest doorbells in the host */
1882 mtspr SPRN_DPDES, r8
1883 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1885 /* Subtract timebase offset from timebase */
1886 ld r8, VCORE_TB_OFFSET_APPL(r5)
1890 std r0, VCORE_TB_OFFSET_APPL(r5)
1891 mftb r6 /* current guest timebase */
1893 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1894 mftb r7 /* check if lower 24 bits overflowed */
1899 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1904 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1905 * above, which may or may not have already called
1906 * kvmppc_subcore_exit_guest. Fortunately, all that
1907 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1908 * it again here is benign even if kvmppc_realmode_hmi_handler
1909 * has already called it.
1911 bl kvmppc_subcore_exit_guest
1913 30: ld r5,HSTATE_KVM_VCORE(r13)
1914 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1917 ld r0, VCORE_PCR(r5)
1918 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
1923 /* Signal secondary CPUs to continue */
1924 stb r0,VCORE_IN_GUEST(r5)
1925 19: lis r8,0x7fff /* MAX_INT@h */
1930 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1931 ld r3, HSTATE_SPLIT_MODE(r13)
1934 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1937 bl kvmhv_p9_restore_lpcr
1941 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1942 ld r8,KVM_HOST_LPCR(r4)
1946 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1947 /* Finish timing, if we have a vcpu */
1948 ld r4, HSTATE_KVM_VCPU(r13)
1952 bl kvmhv_accumulate_time
1955 /* Unset guest mode */
1956 li r0, KVM_GUEST_MODE_NONE
1957 stb r0, HSTATE_IN_GUEST(r13)
1959 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
1960 ld r0, SFS+PPC_LR_STKOFF(r1)
1965 kvmppc_guest_external:
1966 /* External interrupt, first check for host_ipi. If this is
1967 * set, we know the host wants us out so let's do it now
1972 * Restore the active volatile registers after returning from
1975 ld r9, HSTATE_KVM_VCPU(r13)
1976 li r12, BOOK3S_INTERRUPT_EXTERNAL
1979 * kvmppc_read_intr return codes:
1981 * Exit to host (r3 > 0)
1982 * 1 An interrupt is pending that needs to be handled by the host
1983 * Exit guest and return to host by branching to guest_exit_cont
1985 * 2 Passthrough that needs completion in the host
1986 * Exit guest and return to host by branching to guest_exit_cont
1987 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1988 * to indicate to the host to complete handling the interrupt
1990 * Before returning to guest, we check if any CPU is heading out
1991 * to the host and if so, we head out also. If no CPUs are heading
1992 * check return values <= 0.
1994 * Return to guest (r3 <= 0)
1995 * 0 No external interrupt is pending
1996 * -1 A guest wakeup IPI (which has now been cleared)
1997 * In either case, we return to guest to deliver any pending
2000 * -2 A PCI passthrough external interrupt was handled
2001 * (interrupt was delivered directly to guest)
2002 * Return to guest to deliver any pending guest interrupts.
2008 /* Return code = 2 */
2009 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2010 stw r12, VCPU_TRAP(r9)
2013 1: /* Return code <= 1 */
2017 /* Return code <= 0 */
2018 maybe_reenter_guest:
2019 ld r5, HSTATE_KVM_VCORE(r13)
2020 lwz r0, VCORE_ENTRY_EXIT(r5)
2023 blt deliver_guest_interrupt
2026 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2028 * Softpatch interrupt for transactional memory emulation cases
2029 * on POWER9 DD2.2. This is early in the guest exit path - we
2030 * haven't saved registers or done a treclaim yet.
2033 /* Save instruction image in HEIR */
2035 stw r3, VCPU_HEIR(r9)
2038 * The cases we want to handle here are those where the guest
2039 * is in real suspend mode and is trying to transition to
2040 * transactional mode.
2042 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2043 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2045 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2046 cmpwi r3, 1 /* or if not in suspend state */
2049 /* Call C code to do the emulation */
2051 bl kvmhv_p9_tm_emulation_early
2053 ld r9, HSTATE_KVM_VCPU(r13)
2054 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2056 beq guest_exit_cont /* continue exiting if not handled */
2058 ld r11, VCPU_MSR(r9)
2059 b fast_interrupt_c_return /* go back to guest if handled */
2060 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2063 * Check whether an HDSI is an HPTE not found fault or something else.
2064 * If it is an HPTE not found fault that is due to the guest accessing
2065 * a page that they have mapped but which we have paged out, then
2066 * we continue on with the guest exit path. In all other cases,
2067 * reflect the HDSI to the guest as a DSI.
2071 lbz r0, KVM_RADIX(r3)
2073 mfspr r6, SPRN_HDSISR
2075 /* Look for DSISR canary. If we find it, retry instruction */
2078 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2080 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2081 /* HPTE not found fault or protection fault? */
2082 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2083 beq 1f /* if not, send it to the guest */
2084 andi. r0, r11, MSR_DR /* data relocation enabled? */
2087 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2089 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2091 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2092 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2093 bne 7f /* if no SLB entry found */
2094 4: std r4, VCPU_FAULT_DAR(r9)
2095 stw r6, VCPU_FAULT_DSISR(r9)
2097 /* Search the hash table. */
2098 mr r3, r9 /* vcpu pointer */
2099 li r7, 1 /* data fault */
2100 bl kvmppc_hpte_hv_fault
2101 ld r9, HSTATE_KVM_VCPU(r13)
2103 ld r11, VCPU_MSR(r9)
2104 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2105 cmpdi r3, 0 /* retry the instruction */
2107 cmpdi r3, -1 /* handle in kernel mode */
2109 cmpdi r3, -2 /* MMIO emulation; need instr word */
2112 /* Synthesize a DSI (or DSegI) for the guest */
2113 ld r4, VCPU_FAULT_DAR(r9)
2115 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2116 mtspr SPRN_DSISR, r6
2117 7: mtspr SPRN_DAR, r4
2118 mtspr SPRN_SRR0, r10
2119 mtspr SPRN_SRR1, r11
2121 bl kvmppc_msr_interrupt
2122 fast_interrupt_c_return:
2123 6: ld r7, VCPU_CTR(r9)
2130 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2131 ld r5, KVM_VRMA_SLB_V(r5)
2134 /* If this is for emulated MMIO, load the instruction word */
2135 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2137 /* Set guest mode to 'jump over instruction' so if lwz faults
2138 * we'll just continue at the next IP. */
2139 li r0, KVM_GUEST_MODE_SKIP
2140 stb r0, HSTATE_IN_GUEST(r13)
2142 /* Do the access with MSR:DR enabled */
2144 ori r4, r3, MSR_DR /* Enable paging for data */
2149 /* Store the result */
2150 stw r8, VCPU_LAST_INST(r9)
2152 /* Unset guest mode. */
2153 li r0, KVM_GUEST_MODE_HOST_HV
2154 stb r0, HSTATE_IN_GUEST(r13)
2158 std r4, VCPU_FAULT_DAR(r9)
2159 stw r6, VCPU_FAULT_DSISR(r9)
2162 std r5, VCPU_FAULT_GPA(r9)
2166 * Similarly for an HISI, reflect it to the guest as an ISI unless
2167 * it is an HPTE not found fault for a page that we have paged out.
2171 lbz r0, KVM_RADIX(r3)
2173 bne .Lradix_hisi /* for radix, just save ASDR */
2174 andis. r0, r11, SRR1_ISI_NOPT@h
2176 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2179 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2181 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2183 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2184 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2185 bne 7f /* if no SLB entry found */
2187 /* Search the hash table. */
2188 mr r3, r9 /* vcpu pointer */
2191 li r7, 0 /* instruction fault */
2192 bl kvmppc_hpte_hv_fault
2193 ld r9, HSTATE_KVM_VCPU(r13)
2195 ld r11, VCPU_MSR(r9)
2196 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2197 cmpdi r3, 0 /* retry the instruction */
2198 beq fast_interrupt_c_return
2199 cmpdi r3, -1 /* handle in kernel mode */
2202 /* Synthesize an ISI (or ISegI) for the guest */
2204 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2205 7: mtspr SPRN_SRR0, r10
2206 mtspr SPRN_SRR1, r11
2208 bl kvmppc_msr_interrupt
2209 b fast_interrupt_c_return
2211 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2212 ld r5, KVM_VRMA_SLB_V(r6)
2216 * Try to handle an hcall in real mode.
2217 * Returns to the guest if we handle it, or continues on up to
2218 * the kernel if we can't (i.e. if we don't have a handler for
2219 * it, or if the handler returns H_TOO_HARD).
2221 * r5 - r8 contain hcall args,
2222 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2224 hcall_try_real_mode:
2225 ld r3,VCPU_GPR(R3)(r9)
2227 /* sc 1 from userspace - reflect to guest syscall */
2228 bne sc_1_fast_return
2229 /* sc 1 from nested guest - give it to L1 to handle */
2230 ld r0, VCPU_NESTED(r9)
2234 cmpldi r3,hcall_real_table_end - hcall_real_table
2236 /* See if this hcall is enabled for in-kernel handling */
2238 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2239 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2241 ld r0, KVM_ENABLED_HCALLS(r4)
2242 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2246 /* Get pointer to handler, if any, and call it */
2247 LOAD_REG_ADDR(r4, hcall_real_table)
2253 mr r3,r9 /* get vcpu pointer */
2254 ld r4,VCPU_GPR(R4)(r9)
2257 beq hcall_real_fallback
2258 ld r4,HSTATE_KVM_VCPU(r13)
2259 std r3,VCPU_GPR(R3)(r4)
2267 li r10, BOOK3S_INTERRUPT_SYSCALL
2268 bl kvmppc_msr_interrupt
2272 /* We've attempted a real mode hcall, but it's punted it back
2273 * to userspace. We need to restore some clobbered volatiles
2274 * before resuming the pass-it-to-qemu path */
2275 hcall_real_fallback:
2276 li r12,BOOK3S_INTERRUPT_SYSCALL
2277 ld r9, HSTATE_KVM_VCPU(r13)
2281 .globl hcall_real_table
2283 .long 0 /* 0 - unused */
2284 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2285 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2286 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2287 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2288 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2289 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2290 #ifdef CONFIG_SPAPR_TCE_IOMMU
2291 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2292 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2297 .long 0 /* 0x24 - H_SET_SPRG0 */
2298 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2299 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
2313 #ifdef CONFIG_KVM_XICS
2314 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2315 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2316 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2317 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2318 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2320 .long 0 /* 0x64 - H_EOI */
2321 .long 0 /* 0x68 - H_CPPR */
2322 .long 0 /* 0x6c - H_IPI */
2323 .long 0 /* 0x70 - H_IPOLL */
2324 .long 0 /* 0x74 - H_XIRR */
2352 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2353 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2369 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2373 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2374 #ifdef CONFIG_SPAPR_TCE_IOMMU
2375 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2376 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2492 #ifdef CONFIG_KVM_XICS
2493 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2495 .long 0 /* 0x2fc - H_XIRR_X*/
2497 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2498 .globl hcall_real_table_end
2499 hcall_real_table_end:
2501 _GLOBAL(kvmppc_h_set_xdabr)
2502 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
2503 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2505 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2508 6: li r3, H_PARAMETER
2511 _GLOBAL(kvmppc_h_set_dabr)
2512 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
2513 li r5, DABRX_USER | DABRX_KERNEL
2517 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2518 std r4,VCPU_DABR(r3)
2519 stw r5, VCPU_DABRX(r3)
2520 mtspr SPRN_DABRX, r5
2521 /* Work around P7 bug where DABR can get corrupted on mtspr */
2522 1: mtspr SPRN_DABR,r4
2531 LOAD_REG_ADDR(r11, dawr_force_enable)
2538 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2539 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2540 rlwimi r5, r4, 2, DAWRX_WT
2542 std r4, VCPU_DAWR(r3)
2543 std r5, VCPU_DAWRX(r3)
2545 * If came in through the real mode hcall handler then it is necessary
2546 * to write the registers since the return path won't. Otherwise it is
2547 * sufficient to store then in the vcpu struct as they will be loaded
2548 * next time the vcpu is run.
2551 andi. r6, r6, MSR_DR /* in real mode? */
2554 mtspr SPRN_DAWRX, r5
2558 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2560 std r11,VCPU_MSR(r3)
2562 stb r0,VCPU_CEDED(r3)
2563 sync /* order setting ceded vs. testing prodded */
2564 lbz r5,VCPU_PRODDED(r3)
2566 bne kvm_cede_prodded
2567 li r12,0 /* set trap to 0 to say hcall is handled */
2568 stw r12,VCPU_TRAP(r3)
2570 std r0,VCPU_GPR(R3)(r3)
2573 * Set our bit in the bitmask of napping threads unless all the
2574 * other threads are already napping, in which case we send this
2577 ld r5,HSTATE_KVM_VCORE(r13)
2578 lbz r6,HSTATE_PTID(r13)
2579 lwz r8,VCORE_ENTRY_EXIT(r5)
2583 addi r6,r5,VCORE_NAPPING_THREADS
2590 /* order napping_threads update vs testing entry_exit_map */
2593 stb r0,HSTATE_NAPPING(r13)
2594 lwz r7,VCORE_ENTRY_EXIT(r5)
2596 bge 33f /* another thread already exiting */
2599 * Although not specifically required by the architecture, POWER7
2600 * preserves the following registers in nap mode, even if an SMT mode
2601 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2602 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2604 /* Save non-volatile GPRs */
2605 std r14, VCPU_GPR(R14)(r3)
2606 std r15, VCPU_GPR(R15)(r3)
2607 std r16, VCPU_GPR(R16)(r3)
2608 std r17, VCPU_GPR(R17)(r3)
2609 std r18, VCPU_GPR(R18)(r3)
2610 std r19, VCPU_GPR(R19)(r3)
2611 std r20, VCPU_GPR(R20)(r3)
2612 std r21, VCPU_GPR(R21)(r3)
2613 std r22, VCPU_GPR(R22)(r3)
2614 std r23, VCPU_GPR(R23)(r3)
2615 std r24, VCPU_GPR(R24)(r3)
2616 std r25, VCPU_GPR(R25)(r3)
2617 std r26, VCPU_GPR(R26)(r3)
2618 std r27, VCPU_GPR(R27)(r3)
2619 std r28, VCPU_GPR(R28)(r3)
2620 std r29, VCPU_GPR(R29)(r3)
2621 std r30, VCPU_GPR(R30)(r3)
2622 std r31, VCPU_GPR(R31)(r3)
2627 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2629 * Branch around the call if both CPU_FTR_TM and
2630 * CPU_FTR_P9_TM_HV_ASSIST are off.
2634 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2636 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2638 ld r3, HSTATE_KVM_VCPU(r13)
2640 li r5, 0 /* don't preserve non-vol regs */
2641 bl kvmppc_save_tm_hv
2647 * Set DEC to the smaller of DEC and HDEC, so that we wake
2648 * no later than the end of our timeslice (HDEC interrupts
2649 * don't wake us from nap).
2655 /* On P9 check whether the guest has large decrementer mode enabled */
2656 ld r6, HSTATE_KVM_VCORE(r13)
2657 ld r6, VCORE_LPCR(r6)
2658 andis. r6, r6, LPCR_LD@h
2660 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2667 /* save expiry time of guest decrementer */
2669 ld r4, HSTATE_KVM_VCPU(r13)
2670 ld r5, HSTATE_KVM_VCORE(r13)
2671 ld r6, VCORE_TB_OFFSET_APPL(r5)
2672 subf r3, r6, r3 /* convert to host TB value */
2673 std r3, VCPU_DEC_EXPIRES(r4)
2675 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2676 ld r4, HSTATE_KVM_VCPU(r13)
2677 addi r3, r4, VCPU_TB_CEDE
2678 bl kvmhv_accumulate_time
2681 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2683 /* Go back to host stack */
2684 ld r1, HSTATE_HOST_R1(r13)
2687 * Take a nap until a decrementer or external or doobell interrupt
2688 * occurs, with PECE1 and PECE0 set in LPCR.
2689 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2690 * Also clear the runlatch bit before napping.
2693 mfspr r0, SPRN_CTRLF
2695 mtspr SPRN_CTRLT, r0
2698 stb r0,HSTATE_HWTHREAD_REQ(r13)
2700 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2702 ori r5, r5, LPCR_PECEDH
2703 rlwimi r5, r3, 0, LPCR_PECEDP
2704 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2706 kvm_nap_sequence: /* desired LPCR value in r5 */
2709 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2710 * enable state loss = 1 (allow SMT mode switch)
2711 * requested level = 0 (just stop dispatching)
2713 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2714 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2715 li r4, LPCR_PECE_HVEE@higher
2719 li r3, PNV_THREAD_NAP
2720 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2725 bl isa300_idle_stop_mayloss
2727 bl isa206_idle_insn_mayloss
2728 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2730 mfspr r0, SPRN_CTRLF
2732 mtspr SPRN_CTRLT, r0
2737 stb r0, PACA_FTRACE_ENABLED(r13)
2739 li r0, KVM_HWTHREAD_IN_KVM
2740 stb r0, HSTATE_HWTHREAD_STATE(r13)
2742 lbz r0, HSTATE_NAPPING(r13)
2743 cmpwi r0, NAPPING_CEDE
2745 cmpwi r0, NAPPING_NOVCPU
2746 beq kvm_novcpu_wakeup
2747 cmpwi r0, NAPPING_UNSPLIT
2748 beq kvm_unsplit_wakeup
2749 twi 31,0,0 /* Nap state must not be zero */
2757 /* Woken by external or decrementer interrupt */
2759 /* get vcpu pointer */
2760 ld r4, HSTATE_KVM_VCPU(r13)
2762 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2763 addi r3, r4, VCPU_TB_RMINTR
2764 bl kvmhv_accumulate_time
2767 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2769 * Branch around the call if both CPU_FTR_TM and
2770 * CPU_FTR_P9_TM_HV_ASSIST are off.
2774 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2776 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2780 li r5, 0 /* don't preserve non-vol regs */
2781 bl kvmppc_restore_tm_hv
2783 ld r4, HSTATE_KVM_VCPU(r13)
2787 /* load up FP state */
2790 /* Restore guest decrementer */
2791 ld r3, VCPU_DEC_EXPIRES(r4)
2792 ld r5, HSTATE_KVM_VCORE(r13)
2793 ld r6, VCORE_TB_OFFSET_APPL(r5)
2794 add r3, r3, r6 /* convert host TB to guest TB value */
2800 ld r14, VCPU_GPR(R14)(r4)
2801 ld r15, VCPU_GPR(R15)(r4)
2802 ld r16, VCPU_GPR(R16)(r4)
2803 ld r17, VCPU_GPR(R17)(r4)
2804 ld r18, VCPU_GPR(R18)(r4)
2805 ld r19, VCPU_GPR(R19)(r4)
2806 ld r20, VCPU_GPR(R20)(r4)
2807 ld r21, VCPU_GPR(R21)(r4)
2808 ld r22, VCPU_GPR(R22)(r4)
2809 ld r23, VCPU_GPR(R23)(r4)
2810 ld r24, VCPU_GPR(R24)(r4)
2811 ld r25, VCPU_GPR(R25)(r4)
2812 ld r26, VCPU_GPR(R26)(r4)
2813 ld r27, VCPU_GPR(R27)(r4)
2814 ld r28, VCPU_GPR(R28)(r4)
2815 ld r29, VCPU_GPR(R29)(r4)
2816 ld r30, VCPU_GPR(R30)(r4)
2817 ld r31, VCPU_GPR(R31)(r4)
2819 /* Check the wake reason in SRR1 to see why we got here */
2820 bl kvmppc_check_wake_reason
2823 * Restore volatile registers since we could have called a
2824 * C routine in kvmppc_check_wake_reason
2826 * r3 tells us whether we need to return to host or not
2827 * WARNING: it gets checked further down:
2828 * should not modify r3 until this check is done.
2830 ld r4, HSTATE_KVM_VCPU(r13)
2832 /* clear our bit in vcore->napping_threads */
2833 34: ld r5,HSTATE_KVM_VCORE(r13)
2834 lbz r7,HSTATE_PTID(r13)
2837 addi r6,r5,VCORE_NAPPING_THREADS
2843 stb r0,HSTATE_NAPPING(r13)
2845 /* See if the wake reason saved in r3 means we need to exit */
2846 stw r12, VCPU_TRAP(r4)
2850 b maybe_reenter_guest
2852 /* cede when already previously prodded case */
2855 stb r0,VCPU_PRODDED(r3)
2856 sync /* order testing prodded vs. clearing ceded */
2857 stb r0,VCPU_CEDED(r3)
2861 /* we've ceded but we want to give control to the host */
2863 ld r9, HSTATE_KVM_VCPU(r13)
2864 #ifdef CONFIG_KVM_XICS
2865 /* are we using XIVE with single escalation? */
2866 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2869 li r6, XIVE_ESB_SET_PQ_00
2871 * If we still have a pending escalation, abort the cede,
2872 * and we must set PQ to 10 rather than 00 so that we don't
2873 * potentially end up with two entries for the escalation
2874 * interrupt in the XIVE interrupt queue. In that case
2875 * we also don't want to set xive_esc_on to 1 here in
2876 * case we race with xive_esc_irq().
2878 lbz r5, VCPU_XIVE_ESC_ON(r9)
2882 stb r0, VCPU_CEDED(r9)
2883 li r6, XIVE_ESB_SET_PQ_10
2886 stb r0, VCPU_XIVE_ESC_ON(r9)
2887 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2889 5: /* Enable XIVE escalation */
2891 andi. r0, r0, MSR_DR /* in real mode? */
2895 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2898 #endif /* CONFIG_KVM_XICS */
2899 3: b guest_exit_cont
2901 /* Try to do machine check recovery in real mode */
2902 machine_check_realmode:
2903 mr r3, r9 /* get vcpu pointer */
2904 bl kvmppc_realmode_machine_check
2906 /* all machine checks go to virtual mode for further handling */
2907 ld r9, HSTATE_KVM_VCPU(r13)
2908 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2912 * Call C code to handle a HMI in real mode.
2913 * Only the primary thread does the call, secondary threads are handled
2914 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2915 * r9 points to the vcpu on entry
2918 lbz r0, HSTATE_PTID(r13)
2921 bl kvmppc_realmode_hmi_handler
2922 ld r9, HSTATE_KVM_VCPU(r13)
2923 li r12, BOOK3S_INTERRUPT_HMI
2927 * Check the reason we woke from nap, and take appropriate action.
2929 * 0 if nothing needs to be done
2930 * 1 if something happened that needs to be handled by the host
2931 * -1 if there was a guest wakeup (IPI or msgsnd)
2932 * -2 if we handled a PCI passthrough interrupt (returned by
2933 * kvmppc_read_intr only)
2935 * Also sets r12 to the interrupt vector for any interrupt that needs
2936 * to be handled now by the host (0x500 for external interrupt), or zero.
2937 * Modifies all volatile registers (since it may call a C function).
2938 * This routine calls kvmppc_read_intr, a C function, if an external
2939 * interrupt is pending.
2941 kvmppc_check_wake_reason:
2944 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2946 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2947 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2948 cmpwi r6, 8 /* was it an external interrupt? */
2949 beq 7f /* if so, see what it was */
2952 cmpwi r6, 6 /* was it the decrementer? */
2955 cmpwi r6, 5 /* privileged doorbell? */
2957 cmpwi r6, 3 /* hypervisor doorbell? */
2959 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2960 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2962 li r3, 1 /* anything else, return 1 */
2965 /* hypervisor doorbell */
2966 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2969 * Clear the doorbell as we will invoke the handler
2970 * explicitly in the guest exit path.
2972 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2974 /* see if it's a host IPI */
2979 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2980 lbz r0, HSTATE_HOST_IPI(r13)
2983 /* if not, return -1 */
2987 /* Woken up due to Hypervisor maintenance interrupt */
2988 4: li r12, BOOK3S_INTERRUPT_HMI
2992 /* external interrupt - create a stack frame so we can call C */
2994 std r0, PPC_LR_STKOFF(r1)
2995 stdu r1, -PPC_MIN_STKFRM(r1)
2998 li r12, BOOK3S_INTERRUPT_EXTERNAL
3003 * Return code of 2 means PCI passthrough interrupt, but
3004 * we need to return back to host to complete handling the
3005 * interrupt. Trap reason is expected in r12 by guest
3008 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
3010 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3011 addi r1, r1, PPC_MIN_STKFRM
3016 * Save away FP, VMX and VSX registers.
3018 * N.B. r30 and r31 are volatile across this function,
3019 * thus it is not callable from C.
3026 #ifdef CONFIG_ALTIVEC
3028 oris r8,r8,MSR_VEC@h
3029 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3033 oris r8,r8,MSR_VSX@h
3034 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3037 addi r3,r3,VCPU_FPRS
3039 #ifdef CONFIG_ALTIVEC
3041 addi r3,r31,VCPU_VRS
3043 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3045 mfspr r6,SPRN_VRSAVE
3046 stw r6,VCPU_VRSAVE(r31)
3051 * Load up FP, VMX and VSX registers
3053 * N.B. r30 and r31 are volatile across this function,
3054 * thus it is not callable from C.
3061 #ifdef CONFIG_ALTIVEC
3063 oris r8,r8,MSR_VEC@h
3064 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3068 oris r8,r8,MSR_VSX@h
3069 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3072 addi r3,r4,VCPU_FPRS
3074 #ifdef CONFIG_ALTIVEC
3076 addi r3,r31,VCPU_VRS
3078 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3080 lwz r7,VCPU_VRSAVE(r31)
3081 mtspr SPRN_VRSAVE,r7
3086 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3088 * Save transactional state and TM-related registers.
3089 * Called with r3 pointing to the vcpu struct and r4 containing
3090 * the guest MSR value.
3091 * r5 is non-zero iff non-volatile register state needs to be maintained.
3092 * If r5 == 0, this can modify all checkpointed registers, but
3093 * restores r1 and r2 before exit.
3095 _GLOBAL_TOC(kvmppc_save_tm_hv)
3096 EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
3097 /* See if we need to handle fake suspend mode */
3100 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3102 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3104 beq __kvmppc_save_tm
3106 /* The following code handles the fake_suspend = 1 case */
3108 std r0, PPC_LR_STKOFF(r1)
3109 stdu r1, -PPC_MIN_STKFRM(r1)
3114 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3117 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3120 bl pnv_power9_force_smt4_catch
3121 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3124 /* We have to treclaim here because that's the only way to do S->N */
3125 li r3, TM_CAUSE_KVM_RESCHED
3129 * We were in fake suspend, so we are not going to save the
3130 * register state as the guest checkpointed state (since
3131 * we already have it), therefore we can now use any volatile GPR.
3132 * In fact treclaim in fake suspend state doesn't modify
3137 bl pnv_power9_force_smt4_release
3138 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3142 mfspr r3, SPRN_PSSCR
3143 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3144 li r0, PSSCR_FAKE_SUSPEND
3146 mtspr SPRN_PSSCR, r3
3148 /* Don't save TEXASR, use value from last exit in real suspend state */
3149 ld r9, HSTATE_KVM_VCPU(r13)
3150 mfspr r5, SPRN_TFHAR
3151 mfspr r6, SPRN_TFIAR
3152 std r5, VCPU_TFHAR(r9)
3153 std r6, VCPU_TFIAR(r9)
3155 addi r1, r1, PPC_MIN_STKFRM
3156 ld r0, PPC_LR_STKOFF(r1)
3161 * Restore transactional state and TM-related registers.
3162 * Called with r3 pointing to the vcpu struct
3163 * and r4 containing the guest MSR value.
3164 * r5 is non-zero iff non-volatile register state needs to be maintained.
3165 * This potentially modifies all checkpointed registers.
3166 * It restores r1 and r2 from the PACA.
3168 _GLOBAL_TOC(kvmppc_restore_tm_hv)
3169 EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
3171 * If we are doing TM emulation for the guest on a POWER9 DD2,
3172 * then we don't actually do a trechkpt -- we either set up
3173 * fake-suspend mode, or emulate a TM rollback.
3176 b __kvmppc_restore_tm
3177 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3179 std r0, PPC_LR_STKOFF(r1)
3182 stb r0, HSTATE_FAKE_SUSPEND(r13)
3184 /* Turn on TM so we can restore TM SPRs */
3187 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3191 * The user may change these outside of a transaction, so they must
3192 * always be context switched.
3194 ld r5, VCPU_TFHAR(r3)
3195 ld r6, VCPU_TFIAR(r3)
3196 ld r7, VCPU_TEXASR(r3)
3197 mtspr SPRN_TFHAR, r5
3198 mtspr SPRN_TFIAR, r6
3199 mtspr SPRN_TEXASR, r7
3201 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3202 beqlr /* TM not active in guest */
3204 /* Make sure the failure summary is set */
3205 oris r7, r7, (TEXASR_FS)@h
3206 mtspr SPRN_TEXASR, r7
3208 cmpwi r5, 1 /* check for suspended state */
3210 stb r5, HSTATE_FAKE_SUSPEND(r13)
3211 b 9f /* and return */
3212 10: stdu r1, -PPC_MIN_STKFRM(r1)
3213 /* guest is in transactional state, so simulate rollback */
3214 bl kvmhv_emulate_tm_rollback
3216 addi r1, r1, PPC_MIN_STKFRM
3217 9: ld r0, PPC_LR_STKOFF(r1)
3220 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3223 * We come here if we get any exception or interrupt while we are
3224 * executing host real mode code while in guest MMU context.
3225 * r12 is (CR << 32) | vector
3226 * r13 points to our PACA
3227 * r12 is saved in HSTATE_SCRATCH0(r13)
3228 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3229 * r9 is saved in HSTATE_SCRATCH2(r13)
3230 * r13 is saved in HSPRG1
3231 * cfar is saved in HSTATE_CFAR(r13)
3232 * ppr is saved in HSTATE_PPR(r13)
3234 kvmppc_bad_host_intr:
3236 * Switch to the emergency stack, but start half-way down in
3237 * case we were already on it.
3241 ld r1, PACAEMERGSP(r13)
3242 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3255 mfspr r3, SPRN_HSRR0
3256 mfspr r4, SPRN_HSRR1
3258 mfspr r6, SPRN_HDSISR
3260 1: mfspr r3, SPRN_SRR0
3263 mfspr r6, SPRN_DSISR
3268 ld r9, HSTATE_SCRATCH2(r13)
3269 ld r12, HSTATE_SCRATCH0(r13)
3274 ld r5, HSTATE_CFAR(r13)
3275 std r5, ORIG_GPR3(r1)
3277 #ifdef CONFIG_RELOCATABLE
3278 ld r4, HSTATE_SCRATCH1(r13)
3283 lbz r6, PACAIRQSOFTMASK(r13)
3289 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3290 std r3, STACK_FRAME_OVERHEAD-16(r1)
3293 * On POWER9 do a minimal restore of the MMU and call C code,
3294 * which will print a message and panic.
3295 * XXX On POWER7 and POWER8, we just spin here since we don't
3296 * know what the other threads are doing (and we don't want to
3297 * coordinate with them) - but at least we now have register state
3298 * in memory that we might be able to look at from another CPU.
3302 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3303 ld r9, HSTATE_KVM_VCPU(r13)
3304 ld r10, VCPU_KVM(r9)
3309 mtspr SPRN_CIABR, r0
3310 mtspr SPRN_DAWRX, r0
3312 BEGIN_MMU_FTR_SECTION
3314 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3319 ld r8, PACA_SLBSHADOWPTR(r13)
3320 .rept SLB_NUM_BOLTED
3321 li r3, SLBSHADOW_SAVEAREA
3325 andis. r7, r5, SLB_ESID_V@h
3331 4: lwz r7, KVM_HOST_LPID(r10)
3334 ld r8, KVM_HOST_LPCR(r10)
3337 li r0, KVM_GUEST_MODE_NONE
3338 stb r0, HSTATE_IN_GUEST(r13)
3341 * Turn on the MMU and jump to C code
3345 addi r3, r3, 9f - 5b
3347 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3348 ld r4, PACAKMSR(r13)
3352 9: addi r3, r1, STACK_FRAME_OVERHEAD
3353 bl kvmppc_bad_interrupt
3357 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3358 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3359 * r11 has the guest MSR value (in/out)
3360 * r9 has a vcpu pointer (in)
3361 * r0 is used as a scratch register
3363 kvmppc_msr_interrupt:
3364 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3365 cmpwi r0, 2 /* Check if we are in transactional state.. */
3366 ld r11, VCPU_INTR_MSR(r9)
3368 /* ... if transactional, change to suspended */
3370 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3374 * Load up guest PMU state. R3 points to the vcpu struct.
3376 _GLOBAL(kvmhv_load_guest_pmu)
3377 EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
3381 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3382 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3385 ld r3, VCPU_MMCR(r4)
3386 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3387 cmpwi r5, MMCR0_PMAO
3388 beql kvmppc_fix_pmao
3389 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3390 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
3391 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
3392 lwz r6, VCPU_PMC + 8(r4)
3393 lwz r7, VCPU_PMC + 12(r4)
3394 lwz r8, VCPU_PMC + 16(r4)
3395 lwz r9, VCPU_PMC + 20(r4)
3402 ld r3, VCPU_MMCR(r4)
3403 ld r5, VCPU_MMCR + 8(r4)
3404 ld r6, VCPU_MMCR + 16(r4)
3405 ld r7, VCPU_SIAR(r4)
3406 ld r8, VCPU_SDAR(r4)
3407 mtspr SPRN_MMCR1, r5
3408 mtspr SPRN_MMCRA, r6
3412 ld r5, VCPU_MMCR + 24(r4)
3413 ld r6, VCPU_SIER(r4)
3414 mtspr SPRN_MMCR2, r5
3416 BEGIN_FTR_SECTION_NESTED(96)
3417 lwz r7, VCPU_PMC + 24(r4)
3418 lwz r8, VCPU_PMC + 28(r4)
3419 ld r9, VCPU_MMCR + 32(r4)
3420 mtspr SPRN_SPMC1, r7
3421 mtspr SPRN_SPMC2, r8
3422 mtspr SPRN_MMCRS, r9
3423 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3424 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3425 mtspr SPRN_MMCR0, r3
3431 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
3433 _GLOBAL(kvmhv_load_host_pmu)
3434 EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
3436 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
3438 beq 23f /* skip if not */
3440 ld r3, HSTATE_MMCR0(r13)
3441 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3442 cmpwi r4, MMCR0_PMAO
3443 beql kvmppc_fix_pmao
3444 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3445 lwz r3, HSTATE_PMC1(r13)
3446 lwz r4, HSTATE_PMC2(r13)
3447 lwz r5, HSTATE_PMC3(r13)
3448 lwz r6, HSTATE_PMC4(r13)
3449 lwz r8, HSTATE_PMC5(r13)
3450 lwz r9, HSTATE_PMC6(r13)
3457 ld r3, HSTATE_MMCR0(r13)
3458 ld r4, HSTATE_MMCR1(r13)
3459 ld r5, HSTATE_MMCRA(r13)
3460 ld r6, HSTATE_SIAR(r13)
3461 ld r7, HSTATE_SDAR(r13)
3462 mtspr SPRN_MMCR1, r4
3463 mtspr SPRN_MMCRA, r5
3467 ld r8, HSTATE_MMCR2(r13)
3468 ld r9, HSTATE_SIER(r13)
3469 mtspr SPRN_MMCR2, r8
3471 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3472 mtspr SPRN_MMCR0, r3
3478 * Save guest PMU state into the vcpu struct.
3479 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
3481 _GLOBAL(kvmhv_save_guest_pmu)
3482 EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
3487 * POWER8 seems to have a hardware bug where setting
3488 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
3489 * when some counters are already negative doesn't seem
3490 * to cause a performance monitor alert (and hence interrupt).
3491 * The effect of this is that when saving the PMU state,
3492 * if there is no PMU alert pending when we read MMCR0
3493 * before freezing the counters, but one becomes pending
3494 * before we read the counters, we lose it.
3495 * To work around this, we need a way to freeze the counters
3496 * before reading MMCR0. Normally, freezing the counters
3497 * is done by writing MMCR0 (to set MMCR0[FC]) which
3498 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
3499 * we can also freeze the counters using MMCR2, by writing
3500 * 1s to all the counter freeze condition bits (there are
3501 * 9 bits each for 6 counters).
3503 li r3, -1 /* set all freeze bits */
3505 mfspr r10, SPRN_MMCR2
3506 mtspr SPRN_MMCR2, r3
3508 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3510 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3511 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
3512 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3513 mfspr r6, SPRN_MMCRA
3514 /* Clear MMCRA in order to disable SDAR updates */
3516 mtspr SPRN_MMCRA, r7
3518 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
3520 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
3522 21: mfspr r5, SPRN_MMCR1
3525 std r4, VCPU_MMCR(r9)
3526 std r5, VCPU_MMCR + 8(r9)
3527 std r6, VCPU_MMCR + 16(r9)
3529 std r10, VCPU_MMCR + 24(r9)
3530 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3531 std r7, VCPU_SIAR(r9)
3532 std r8, VCPU_SDAR(r9)
3539 stw r3, VCPU_PMC(r9)
3540 stw r4, VCPU_PMC + 4(r9)
3541 stw r5, VCPU_PMC + 8(r9)
3542 stw r6, VCPU_PMC + 12(r9)
3543 stw r7, VCPU_PMC + 16(r9)
3544 stw r8, VCPU_PMC + 20(r9)
3547 std r5, VCPU_SIER(r9)
3548 BEGIN_FTR_SECTION_NESTED(96)
3549 mfspr r6, SPRN_SPMC1
3550 mfspr r7, SPRN_SPMC2
3551 mfspr r8, SPRN_MMCRS
3552 stw r6, VCPU_PMC + 24(r9)
3553 stw r7, VCPU_PMC + 28(r9)
3554 std r8, VCPU_MMCR + 32(r9)
3556 mtspr SPRN_MMCRS, r4
3557 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3558 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3562 * This works around a hardware bug on POWER8E processors, where
3563 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3564 * performance monitor interrupt. Instead, when we need to have
3565 * an interrupt pending, we have to arrange for a counter to overflow.
3569 mtspr SPRN_MMCR2, r3
3570 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3571 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3572 mtspr SPRN_MMCR0, r3
3579 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3581 * Start timing an activity
3582 * r3 = pointer to time accumulation struct, r4 = vcpu
3585 ld r5, HSTATE_KVM_VCORE(r13)
3586 ld r6, VCORE_TB_OFFSET_APPL(r5)
3588 subf r5, r6, r5 /* subtract current timebase offset */
3589 std r3, VCPU_CUR_ACTIVITY(r4)
3590 std r5, VCPU_ACTIVITY_START(r4)
3594 * Accumulate time to one activity and start another.
3595 * r3 = pointer to new time accumulation struct, r4 = vcpu
3597 kvmhv_accumulate_time:
3598 ld r5, HSTATE_KVM_VCORE(r13)
3599 ld r8, VCORE_TB_OFFSET_APPL(r5)
3600 ld r5, VCPU_CUR_ACTIVITY(r4)
3601 ld r6, VCPU_ACTIVITY_START(r4)
3602 std r3, VCPU_CUR_ACTIVITY(r4)
3604 subf r7, r8, r7 /* subtract current timebase offset */
3605 std r7, VCPU_ACTIVITY_START(r4)
3609 ld r8, TAS_SEQCOUNT(r5)
3612 std r8, TAS_SEQCOUNT(r5)
3614 ld r7, TAS_TOTAL(r5)
3616 std r7, TAS_TOTAL(r5)
3622 3: std r3, TAS_MIN(r5)
3628 std r8, TAS_SEQCOUNT(r5)