2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/asm-offsets.h>
27 #ifdef CONFIG_PPC_BOOK3S_64
28 #include <asm/exception-64s.h>
31 /*****************************************************************************
33 * Real Mode handlers that need to be in low physical memory *
35 ****************************************************************************/
37 #if defined(CONFIG_PPC_BOOK3S_64)
39 #define FUNC(name) GLUE(.,name)
41 #elif defined(CONFIG_PPC_BOOK3S_32)
43 #define FUNC(name) name
45 .macro INTERRUPT_TRAMPOLINE intno
47 .global kvmppc_trampoline_\intno
48 kvmppc_trampoline_\intno:
50 mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
53 * First thing to do is to find out if we're coming
54 * from a KVM guest or a Linux process.
56 * To distinguish, we check a magic byte in the PACA/current
58 mfspr r13, SPRN_SPRG_THREAD
59 lwz r13, THREAD_KVM_SVCPU(r13)
60 /* PPC32 can have a NULL pointer - let's check for that */
61 mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */
66 mfspr r12, SPRN_SPRG_SCRATCH1
67 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
68 b kvmppc_resume_\intno /* Get back original handler */
71 stw r12, HSTATE_SCRATCH1(r13)
72 mfspr r12, SPRN_SPRG_SCRATCH1
73 stw r12, HSTATE_SCRATCH0(r13)
74 lbz r12, HSTATE_IN_GUEST(r13)
75 cmpwi r12, KVM_GUEST_MODE_NONE
76 bne ..kvmppc_handler_hasmagic_\intno
77 /* No KVM guest? Then jump back to the Linux handler! */
78 lwz r12, HSTATE_SCRATCH1(r13)
81 /* Now we know we're handling a KVM guest */
82 ..kvmppc_handler_hasmagic_\intno:
84 /* Should we just skip the faulting instruction? */
85 cmpwi r12, KVM_GUEST_MODE_SKIP
86 beq kvmppc_handler_skip_ins
88 /* Let's store which interrupt we're handling */
91 /* Jump into the SLB exit code that goes to the highmem handler */
92 b kvmppc_handler_trampoline_exit
96 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
97 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
98 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
99 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
100 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
101 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
102 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
103 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
104 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER
105 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
106 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
107 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
108 INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
111 * Bring us back to the faulting code, but skip the
112 * faulting instruction.
114 * This is a generic exit path from the interrupt
120 * R13 = Shadow VCPU (PACA)
121 * HSTATE.SCRATCH0 = guest R12
122 * HSTATE.SCRATCH1 = guest CR
123 * SPRG_SCRATCH0 = guest R13
126 kvmppc_handler_skip_ins:
128 /* Patch the IP to the next instruction */
133 /* Clean up all state */
134 lwz r12, HSTATE_SCRATCH1(r13)
136 PPC_LL r12, HSTATE_SCRATCH0(r13)
139 /* And get back into the code */
144 * Call kvmppc_handler_trampoline_enter in real mode
146 * On entry, r4 contains the guest shadow MSR
147 * MSR.EE has to be 0 when calling this function
149 _GLOBAL(kvmppc_entry_trampoline)
151 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
154 li r6, MSR_IR | MSR_DR
155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156 #ifdef CONFIG_PPC_BOOK3S_32
158 * Set EE in HOST_MSR so that it's enabled when we get into our
159 * C exit handler function. On 64-bit we delay enabling
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
169 #if defined(CONFIG_PPC_BOOK3S_32)
170 #define STACK_LR INT_FRAME_SIZE+4
172 /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
173 #define MSR_EXT_START \
174 PPC_STL r20, _NIP(r1); \
176 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
177 andc r3,r20,r3; /* Disable DR,EE */ \
181 #define MSR_EXT_END \
182 mtmsr r20; /* Enable DR,EE */ \
186 #elif defined(CONFIG_PPC_BOOK3S_64)
187 #define STACK_LR _LINK
188 #define MSR_EXT_START
193 * Activate current's external feature (FPU/Altivec/VSX)
195 #define define_load_up(what) \
197 _GLOBAL(kvmppc_load_up_ ## what); \
198 PPC_STLU r1, -INT_FRAME_SIZE(r1); \
200 PPC_STL r3, STACK_LR(r1); \
203 bl FUNC(load_up_ ## what); \
206 PPC_LL r3, STACK_LR(r1); \
208 addi r1, r1, INT_FRAME_SIZE; \
212 #ifdef CONFIG_ALTIVEC
213 define_load_up(altivec)
216 #include "book3s_segment.S"