KVM: PPC: Make SLB switching code the new segment framework
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_rmhandlers.S
CommitLineData
c862125c
AG
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/asm-offsets.h>
8c3a4e0b
AG
25
26#ifdef CONFIG_PPC_BOOK3S_64
c862125c 27#include <asm/exception-64s.h>
8c3a4e0b 28#endif
c862125c
AG
29
30/*****************************************************************************
31 * *
32 * Real Mode handlers that need to be in low physical memory *
33 * *
34 ****************************************************************************/
35
8c3a4e0b
AG
36#if defined(CONFIG_PPC_BOOK3S_64)
37
38#define LOAD_SHADOW_VCPU(reg) \
39 mfspr reg, SPRN_SPRG_PACA
40
41#define SHADOW_VCPU_OFF PACA_KVM_SVCPU
42#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
43#define FUNC(name) GLUE(.,name)
44
45#elif defined(CONFIG_PPC_BOOK3S_32)
46
47#define LOAD_SHADOW_VCPU(reg) \
48 mfspr reg, SPRN_SPRG_THREAD; \
49 lwz reg, THREAD_KVM_SVCPU(reg); \
50 /* PPC32 can have a NULL pointer - let's check for that */ \
51 mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \
52 mfcr r12; \
53 cmpwi reg, 0; \
54 bne 1f; \
55 mfspr reg, SPRN_SPRG_SCRATCH0; \
56 mtcr r12; \
57 mfspr r12, SPRN_SPRG_SCRATCH1; \
58 b kvmppc_resume_\intno; \
591:; \
60 mtcr r12; \
61 mfspr r12, SPRN_SPRG_SCRATCH1; \
62 tophys(reg, reg)
63
64#define SHADOW_VCPU_OFF 0
65#define MSR_NOIRQ MSR_KERNEL
66#define FUNC(name) name
67
68#endif
c862125c
AG
69
70.macro INTERRUPT_TRAMPOLINE intno
71
72.global kvmppc_trampoline_\intno
73kvmppc_trampoline_\intno:
74
75 mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */
76
77 /*
78 * First thing to do is to find out if we're coming
79 * from a KVM guest or a Linux process.
80 *
8c3a4e0b 81 * To distinguish, we check a magic byte in the PACA/current
c862125c 82 */
8c3a4e0b
AG
83 LOAD_SHADOW_VCPU(r13)
84 PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
c862125c 85 mfcr r12
8c3a4e0b
AG
86 stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
87 lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13)
b4433a7c 88 cmpwi r12, KVM_GUEST_MODE_NONE
c862125c
AG
89 bne ..kvmppc_handler_hasmagic_\intno
90 /* No KVM guest? Then jump back to the Linux handler! */
8c3a4e0b 91 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
c862125c 92 mtcr r12
8c3a4e0b 93 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
c862125c
AG
94 mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
95 b kvmppc_resume_\intno /* Get back original handler */
96
97 /* Now we know we're handling a KVM guest */
98..kvmppc_handler_hasmagic_\intno:
b4433a7c
AG
99
100 /* Should we just skip the faulting instruction? */
101 cmpwi r12, KVM_GUEST_MODE_SKIP
102 beq kvmppc_handler_skip_ins
103
c862125c
AG
104 /* Let's store which interrupt we're handling */
105 li r12, \intno
106
107 /* Jump into the SLB exit code that goes to the highmem handler */
108 b kvmppc_handler_trampoline_exit
109
110.endm
111
112INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET
113INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK
114INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE
c862125c 115INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE
c862125c
AG
116INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL
117INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT
118INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM
119INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL
120INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER
121INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL
122INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE
123INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON
124INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
8c3a4e0b
AG
125
126/* Those are only available on 64 bit machines */
127
128#ifdef CONFIG_PPC_BOOK3S_64
129INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT
130INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT
c862125c 131INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
8c3a4e0b 132#endif
c862125c 133
b4433a7c
AG
134/*
135 * Bring us back to the faulting code, but skip the
136 * faulting instruction.
137 *
138 * This is a generic exit path from the interrupt
139 * trampolines above.
140 *
141 * Input Registers:
142 *
8c3a4e0b
AG
143 * R12 = free
144 * R13 = Shadow VCPU (PACA)
145 * SVCPU.SCRATCH0 = guest R12
146 * SVCPU.SCRATCH1 = guest CR
147 * SPRG_SCRATCH0 = guest R13
b4433a7c
AG
148 *
149 */
150kvmppc_handler_skip_ins:
151
152 /* Patch the IP to the next instruction */
153 mfsrr0 r12
154 addi r12, r12, 4
155 mtsrr0 r12
156
157 /* Clean up all state */
8c3a4e0b 158 lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13)
b4433a7c 159 mtcr r12
8c3a4e0b 160 PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13)
b4433a7c
AG
161 mfspr r13, SPRN_SPRG_SCRATCH0
162
163 /* And get back into the code */
164 RFI
165
c862125c
AG
166/*
167 * This trampoline brings us back to a real mode handler
168 *
169 * Input Registers:
170 *
7e57cba0
AG
171 * R5 = SRR0
172 * R6 = SRR1
c862125c
AG
173 * LR = real-mode IP
174 *
175 */
176.global kvmppc_handler_lowmem_trampoline
177kvmppc_handler_lowmem_trampoline:
178
7e57cba0
AG
179 mtsrr0 r5
180 mtsrr1 r6
c862125c
AG
181 blr
182kvmppc_handler_lowmem_trampoline_end:
183
021ec9c6
AG
184/*
185 * Call a function in real mode
186 *
187 * Input Registers:
188 *
189 * R3 = function
190 * R4 = MSR
8c3a4e0b 191 * R5 = scratch register
021ec9c6
AG
192 *
193 */
194_GLOBAL(kvmppc_rmcall)
8c3a4e0b
AG
195 LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
196 mtmsr r5 /* Disable relocation and interrupts, so mtsrr
021ec9c6 197 doesn't get interrupted */
8c3a4e0b 198 sync
021ec9c6
AG
199 mtsrr0 r3
200 mtsrr1 r4
201 RFI
202
8c3a4e0b
AG
203#if defined(CONFIG_PPC_BOOK3S_32)
204#define STACK_LR INT_FRAME_SIZE+4
205#elif defined(CONFIG_PPC_BOOK3S_64)
206#define STACK_LR _LINK
207#endif
208
d5e52813
AG
209/*
210 * Activate current's external feature (FPU/Altivec/VSX)
211 */
8c3a4e0b
AG
212#define define_load_up(what) \
213 \
214_GLOBAL(kvmppc_load_up_ ## what); \
215 PPC_STLU r1, -INT_FRAME_SIZE(r1); \
216 mflr r3; \
217 PPC_STL r3, STACK_LR(r1); \
218 PPC_STL r20, _NIP(r1); \
219 mfmsr r20; \
220 LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \
221 andc r3,r20,r3; /* Disable DR,EE */ \
222 mtmsr r3; \
223 sync; \
224 \
225 bl FUNC(load_up_ ## what); \
226 \
227 mtmsr r20; /* Enable DR,EE */ \
228 sync; \
229 PPC_LL r3, STACK_LR(r1); \
230 PPC_LL r20, _NIP(r1); \
231 mtlr r3; \
232 addi r1, r1, INT_FRAME_SIZE; \
d5e52813
AG
233 blr
234
235define_load_up(fpu)
236#ifdef CONFIG_ALTIVEC
237define_load_up(altivec)
238#endif
239#ifdef CONFIG_VSX
240define_load_up(vsx)
241#endif
242
c862125c
AG
243.global kvmppc_trampoline_lowmem
244kvmppc_trampoline_lowmem:
245 .long kvmppc_handler_lowmem_trampoline - _stext
246
247.global kvmppc_trampoline_enter
248kvmppc_trampoline_enter:
249 .long kvmppc_handler_trampoline_enter - _stext
250
53e5b8bb 251#include "book3s_segment.S"