Commit | Line | Data |
---|---|---|
d30f6e48 SW |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | |
16 | * | |
17 | * Author: Varun Sethi <varun.sethi@freescale.com> | |
18 | * Author: Scott Wood <scotwood@freescale.com> | |
e51f8f32 | 19 | * Author: Mihai Caraman <mihai.caraman@freescale.com> |
d30f6e48 SW |
20 | * |
21 | * This file is derived from arch/powerpc/kvm/booke_interrupts.S | |
22 | */ | |
23 | ||
24 | #include <asm/ppc_asm.h> | |
25 | #include <asm/kvm_asm.h> | |
26 | #include <asm/reg.h> | |
d30f6e48 SW |
27 | #include <asm/page.h> |
28 | #include <asm/asm-compat.h> | |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/bitsperlong.h> | |
31 | ||
e51f8f32 MC |
32 | #ifdef CONFIG_64BIT |
33 | #include <asm/exception-64e.h> | |
9bd880a2 TC |
34 | #include <asm/hw_irq.h> |
35 | #include <asm/irqflags.h> | |
e51f8f32 | 36 | #else |
d30f6e48 | 37 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ |
e51f8f32 | 38 | #endif |
d30f6e48 | 39 | |
d30f6e48 SW |
40 | #define LONGBYTES (BITS_PER_LONG / 8) |
41 | ||
d30f6e48 SW |
42 | #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) |
43 | ||
44 | /* The host stack layout: */ | |
e51f8f32 MC |
45 | #define HOST_R1 0 /* Implied by stwu. */ |
46 | #define HOST_CALLEE_LR PPC_LR_STKOFF | |
47 | #define HOST_RUN (HOST_CALLEE_LR + LONGBYTES) | |
d30f6e48 SW |
48 | /* |
49 | * r2 is special: it holds 'current', and it made nonvolatile in the | |
50 | * kernel with the -ffixed-r2 gcc option. | |
51 | */ | |
e51f8f32 MC |
52 | #define HOST_R2 (HOST_RUN + LONGBYTES) |
53 | #define HOST_CR (HOST_R2 + LONGBYTES) | |
54 | #define HOST_NV_GPRS (HOST_CR + LONGBYTES) | |
38df8501 AG |
55 | #define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) |
56 | #define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n) | |
57 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES) | |
d30f6e48 | 58 | #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ |
e51f8f32 MC |
59 | /* LR in caller stack frame. */ |
60 | #define HOST_STACK_LR (HOST_STACK_SIZE + PPC_LR_STKOFF) | |
d30f6e48 SW |
61 | |
62 | #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ | |
63 | #define NEED_DEAR 0x00000002 /* save faulting DEAR */ | |
64 | #define NEED_ESR 0x00000004 /* save faulting ESR */ | |
65 | ||
66 | /* | |
67 | * On entry: | |
68 | * r4 = vcpu, r5 = srr0, r6 = srr1 | |
69 | * saved in vcpu: cr, ctr, r3-r13 | |
70 | */ | |
71 | .macro kvm_handler_common intno, srr0, flags | |
a2723ce7 | 72 | /* Restore host stack pointer */ |
c75df6f9 MN |
73 | PPC_STL r1, VCPU_GPR(R1)(r4) |
74 | PPC_STL r2, VCPU_GPR(R2)(r4) | |
a2723ce7 AG |
75 | PPC_LL r1, VCPU_HOST_STACK(r4) |
76 | PPC_LL r2, HOST_R2(r1) | |
77 | ||
d30f6e48 SW |
78 | mfspr r10, SPRN_PID |
79 | lwz r8, VCPU_HOST_PID(r4) | |
80 | PPC_LL r11, VCPU_SHARED(r4) | |
c75df6f9 | 81 | PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ |
d30f6e48 SW |
82 | li r14, \intno |
83 | ||
84 | stw r10, VCPU_GUEST_PID(r4) | |
85 | mtspr SPRN_PID, r8 | |
86 | ||
d30f6e48 SW |
87 | #ifdef CONFIG_KVM_EXIT_TIMING |
88 | /* save exit time */ | |
89 | 1: mfspr r7, SPRN_TBRU | |
90 | mfspr r8, SPRN_TBRL | |
91 | mfspr r9, SPRN_TBRU | |
92 | cmpw r9, r7 | |
518f040c | 93 | stw r8, VCPU_TIMING_EXIT_TBL(r4) |
d30f6e48 | 94 | bne- 1b |
518f040c | 95 | stw r9, VCPU_TIMING_EXIT_TBU(r4) |
d30f6e48 SW |
96 | #endif |
97 | ||
98 | oris r8, r6, MSR_CE@h | |
185e4188 | 99 | PPC_STD(r6, VCPU_SHARED_MSR, r11) |
d30f6e48 SW |
100 | ori r8, r8, MSR_ME | MSR_RI |
101 | PPC_STL r5, VCPU_PC(r4) | |
102 | ||
103 | /* | |
104 | * Make sure CE/ME/RI are set (if appropriate for exception type) | |
105 | * whether or not the guest had it set. Since mfmsr/mtmsr are | |
106 | * somewhat expensive, skip in the common case where the guest | |
107 | * had all these bits set (and thus they're still set if | |
108 | * appropriate for the exception type). | |
109 | */ | |
110 | cmpw r6, r8 | |
d30f6e48 SW |
111 | beq 1f |
112 | mfmsr r7 | |
113 | .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 | |
114 | oris r7, r7, MSR_CE@h | |
115 | .endif | |
116 | .if \srr0 != SPRN_MCSRR0 | |
117 | ori r7, r7, MSR_ME | MSR_RI | |
118 | .endif | |
119 | mtmsr r7 | |
120 | 1: | |
121 | ||
122 | .if \flags & NEED_EMU | |
c75df6f9 MN |
123 | PPC_STL r15, VCPU_GPR(R15)(r4) |
124 | PPC_STL r16, VCPU_GPR(R16)(r4) | |
125 | PPC_STL r17, VCPU_GPR(R17)(r4) | |
126 | PPC_STL r18, VCPU_GPR(R18)(r4) | |
127 | PPC_STL r19, VCPU_GPR(R19)(r4) | |
c75df6f9 | 128 | PPC_STL r20, VCPU_GPR(R20)(r4) |
c75df6f9 | 129 | PPC_STL r21, VCPU_GPR(R21)(r4) |
c75df6f9 | 130 | PPC_STL r22, VCPU_GPR(R22)(r4) |
c75df6f9 MN |
131 | PPC_STL r23, VCPU_GPR(R23)(r4) |
132 | PPC_STL r24, VCPU_GPR(R24)(r4) | |
133 | PPC_STL r25, VCPU_GPR(R25)(r4) | |
134 | PPC_STL r26, VCPU_GPR(R26)(r4) | |
135 | PPC_STL r27, VCPU_GPR(R27)(r4) | |
136 | PPC_STL r28, VCPU_GPR(R28)(r4) | |
137 | PPC_STL r29, VCPU_GPR(R29)(r4) | |
138 | PPC_STL r30, VCPU_GPR(R30)(r4) | |
139 | PPC_STL r31, VCPU_GPR(R31)(r4) | |
f5250471 MC |
140 | |
141 | /* | |
142 | * We don't use external PID support. lwepx faults would need to be | |
143 | * handled by KVM and this implies aditional code in DO_KVM (for | |
144 | * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which | |
145 | * is too intrusive for the host. Get last instuction in | |
146 | * kvmppc_get_last_inst(). | |
147 | */ | |
148 | li r9, KVM_INST_FETCH_FAILED | |
d30f6e48 SW |
149 | stw r9, VCPU_LAST_INST(r4) |
150 | .endif | |
151 | ||
152 | .if \flags & NEED_ESR | |
153 | mfspr r8, SPRN_ESR | |
154 | PPC_STL r8, VCPU_FAULT_ESR(r4) | |
155 | .endif | |
156 | ||
157 | .if \flags & NEED_DEAR | |
158 | mfspr r9, SPRN_DEAR | |
159 | PPC_STL r9, VCPU_FAULT_DEAR(r4) | |
160 | .endif | |
161 | ||
162 | b kvmppc_resume_host | |
163 | .endm | |
164 | ||
e51f8f32 MC |
165 | #ifdef CONFIG_64BIT |
166 | /* Exception types */ | |
167 | #define EX_GEN 1 | |
168 | #define EX_GDBELL 2 | |
169 | #define EX_DBG 3 | |
170 | #define EX_MC 4 | |
171 | #define EX_CRIT 5 | |
172 | #define EX_TLB 6 | |
173 | ||
174 | /* | |
175 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h | |
176 | */ | |
177 | .macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags | |
178 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |
179 | mr r11, r4 | |
180 | /* | |
181 | * Get vcpu from Paca: paca->__current.thread->kvm_vcpu | |
182 | */ | |
183 | PPC_LL r4, PACACURRENT(r13) | |
184 | PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4) | |
185 | stw r10, VCPU_CR(r4) | |
186 | PPC_STL r11, VCPU_GPR(R4)(r4) | |
187 | PPC_STL r5, VCPU_GPR(R5)(r4) | |
e51f8f32 MC |
188 | PPC_STL r6, VCPU_GPR(R6)(r4) |
189 | PPC_STL r8, VCPU_GPR(R8)(r4) | |
190 | PPC_STL r9, VCPU_GPR(R9)(r4) | |
a3dc6207 SW |
191 | .if \type == EX_TLB |
192 | PPC_LL r5, EX_TLB_R13(r12) | |
193 | PPC_LL r6, EX_TLB_R10(r12) | |
194 | PPC_LL r8, EX_TLB_R11(r12) | |
195 | mfspr r12, \scratch | |
196 | .else | |
197 | mfspr r5, \scratch | |
e51f8f32 MC |
198 | PPC_LL r6, (\paca_ex + \ex_r10)(r13) |
199 | PPC_LL r8, (\paca_ex + \ex_r11)(r13) | |
a3dc6207 SW |
200 | .endif |
201 | PPC_STL r5, VCPU_GPR(R13)(r4) | |
e51f8f32 MC |
202 | PPC_STL r3, VCPU_GPR(R3)(r4) |
203 | PPC_STL r7, VCPU_GPR(R7)(r4) | |
204 | PPC_STL r12, VCPU_GPR(R12)(r4) | |
205 | PPC_STL r6, VCPU_GPR(R10)(r4) | |
206 | PPC_STL r8, VCPU_GPR(R11)(r4) | |
207 | mfctr r5 | |
208 | PPC_STL r5, VCPU_CTR(r4) | |
209 | mfspr r5, \srr0 | |
210 | mfspr r6, \srr1 | |
211 | kvm_handler_common \intno, \srr0, \flags | |
212 | .endm | |
213 | ||
214 | #define EX_PARAMS(type) \ | |
215 | EX_##type, \ | |
216 | SPRN_SPRG_##type##_SCRATCH, \ | |
217 | PACA_EX##type, \ | |
218 | EX_R10, \ | |
219 | EX_R11 | |
220 | ||
221 | #define EX_PARAMS_TLB \ | |
222 | EX_TLB, \ | |
223 | SPRN_SPRG_GEN_SCRATCH, \ | |
224 | PACA_EXTLB, \ | |
225 | EX_TLB_R10, \ | |
226 | EX_TLB_R11 | |
227 | ||
228 | kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \ | |
229 | SPRN_CSRR0, SPRN_CSRR1, 0 | |
230 | kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \ | |
231 | SPRN_MCSRR0, SPRN_MCSRR1, 0 | |
232 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \ | |
233 | SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR) | |
234 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \ | |
235 | SPRN_SRR0, SPRN_SRR1, NEED_ESR | |
236 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \ | |
237 | SPRN_SRR0, SPRN_SRR1, 0 | |
238 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \ | |
239 | SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR) | |
240 | kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \ | |
033aaa14 | 241 | SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU) |
e51f8f32 MC |
242 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \ |
243 | SPRN_SRR0, SPRN_SRR1, 0 | |
244 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \ | |
245 | SPRN_SRR0, SPRN_SRR1, 0 | |
246 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \ | |
247 | SPRN_SRR0, SPRN_SRR1, 0 | |
248 | kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \ | |
249 | SPRN_SRR0, SPRN_SRR1, 0 | |
250 | kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\ | |
251 | SPRN_CSRR0, SPRN_CSRR1, 0 | |
252 | /* | |
253 | * Only bolted TLB miss exception handlers are supported for now | |
254 | */ | |
255 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \ | |
256 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | |
257 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \ | |
258 | SPRN_SRR0, SPRN_SRR1, 0 | |
95d80a29 | 259 | kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \ |
e51f8f32 | 260 | SPRN_SRR0, SPRN_SRR1, 0 |
95d80a29 | 261 | kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX_PARAMS(GEN), \ |
e51f8f32 MC |
262 | SPRN_SRR0, SPRN_SRR1, 0 |
263 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \ | |
264 | SPRN_SRR0, SPRN_SRR1, 0 | |
265 | kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \ | |
266 | SPRN_SRR0, SPRN_SRR1, 0 | |
267 | kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \ | |
268 | SPRN_CSRR0, SPRN_CSRR1, 0 | |
269 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \ | |
270 | SPRN_SRR0, SPRN_SRR1, NEED_EMU | |
271 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \ | |
272 | SPRN_SRR0, SPRN_SRR1, 0 | |
273 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \ | |
274 | SPRN_GSRR0, SPRN_GSRR1, 0 | |
275 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \ | |
276 | SPRN_CSRR0, SPRN_CSRR1, 0 | |
277 | kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \ | |
278 | SPRN_DSRR0, SPRN_DSRR1, 0 | |
279 | kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \ | |
280 | SPRN_CSRR0, SPRN_CSRR1, 0 | |
228b1a47 MC |
281 | kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \ |
282 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | |
e51f8f32 | 283 | #else |
d30f6e48 SW |
284 | /* |
285 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h | |
286 | */ | |
287 | .macro kvm_handler intno srr0, srr1, flags | |
288 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |
ff594746 | 289 | PPC_LL r11, THREAD_KVM_VCPU(r10) |
c75df6f9 | 290 | PPC_STL r3, VCPU_GPR(R3)(r11) |
d30f6e48 | 291 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
c75df6f9 | 292 | PPC_STL r4, VCPU_GPR(R4)(r11) |
d30f6e48 | 293 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) |
c75df6f9 | 294 | PPC_STL r5, VCPU_GPR(R5)(r11) |
518f040c | 295 | stw r13, VCPU_CR(r11) |
d30f6e48 | 296 | mfspr r5, \srr0 |
c75df6f9 | 297 | PPC_STL r3, VCPU_GPR(R10)(r11) |
d30f6e48 | 298 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) |
c75df6f9 MN |
299 | PPC_STL r6, VCPU_GPR(R6)(r11) |
300 | PPC_STL r4, VCPU_GPR(R11)(r11) | |
d30f6e48 | 301 | mfspr r6, \srr1 |
c75df6f9 MN |
302 | PPC_STL r7, VCPU_GPR(R7)(r11) |
303 | PPC_STL r8, VCPU_GPR(R8)(r11) | |
304 | PPC_STL r9, VCPU_GPR(R9)(r11) | |
305 | PPC_STL r3, VCPU_GPR(R13)(r11) | |
d30f6e48 | 306 | mfctr r7 |
c75df6f9 | 307 | PPC_STL r12, VCPU_GPR(R12)(r11) |
d30f6e48 SW |
308 | PPC_STL r7, VCPU_CTR(r11) |
309 | mr r4, r11 | |
310 | kvm_handler_common \intno, \srr0, \flags | |
311 | .endm | |
312 | ||
313 | .macro kvm_lvl_handler intno scratch srr0, srr1, flags | |
314 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | |
315 | mfspr r10, SPRN_SPRG_THREAD | |
ff594746 | 316 | PPC_LL r11, THREAD_KVM_VCPU(r10) |
c75df6f9 | 317 | PPC_STL r3, VCPU_GPR(R3)(r11) |
d30f6e48 | 318 | mfspr r3, \scratch |
c75df6f9 | 319 | PPC_STL r4, VCPU_GPR(R4)(r11) |
d30f6e48 | 320 | PPC_LL r4, GPR9(r8) |
c75df6f9 | 321 | PPC_STL r5, VCPU_GPR(R5)(r11) |
518f040c | 322 | stw r9, VCPU_CR(r11) |
d30f6e48 | 323 | mfspr r5, \srr0 |
c75df6f9 | 324 | PPC_STL r3, VCPU_GPR(R8)(r11) |
d30f6e48 | 325 | PPC_LL r3, GPR10(r8) |
c75df6f9 MN |
326 | PPC_STL r6, VCPU_GPR(R6)(r11) |
327 | PPC_STL r4, VCPU_GPR(R9)(r11) | |
d30f6e48 SW |
328 | mfspr r6, \srr1 |
329 | PPC_LL r4, GPR11(r8) | |
c75df6f9 MN |
330 | PPC_STL r7, VCPU_GPR(R7)(r11) |
331 | PPC_STL r3, VCPU_GPR(R10)(r11) | |
d30f6e48 | 332 | mfctr r7 |
c75df6f9 MN |
333 | PPC_STL r12, VCPU_GPR(R12)(r11) |
334 | PPC_STL r13, VCPU_GPR(R13)(r11) | |
335 | PPC_STL r4, VCPU_GPR(R11)(r11) | |
d30f6e48 SW |
336 | PPC_STL r7, VCPU_CTR(r11) |
337 | mr r4, r11 | |
338 | kvm_handler_common \intno, \srr0, \flags | |
339 | .endm | |
340 | ||
341 | kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ | |
342 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
343 | kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ | |
344 | SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 | |
345 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ | |
9997782e | 346 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) |
d30f6e48 SW |
347 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR |
348 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 | |
349 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ | |
350 | SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) | |
033aaa14 | 351 | kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU) |
d30f6e48 SW |
352 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 |
353 | kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | |
354 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | |
355 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 | |
356 | kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 | |
357 | kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ | |
358 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
359 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ | |
360 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | |
361 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 | |
d30f6e48 SW |
362 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 |
363 | kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 | |
364 | kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ | |
365 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
366 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU | |
367 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | |
368 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 | |
369 | kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ | |
370 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
371 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |
372 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | |
373 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | |
374 | SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 | |
e51f8f32 | 375 | #endif |
d30f6e48 SW |
376 | |
377 | /* Registers: | |
378 | * SPRG_SCRATCH0: guest r10 | |
379 | * r4: vcpu pointer | |
380 | * r11: vcpu->arch.shared | |
381 | * r14: KVM exit number | |
382 | */ | |
383 | _GLOBAL(kvmppc_resume_host) | |
384 | /* Save remaining volatile guest register state to vcpu. */ | |
385 | mfspr r3, SPRN_VRSAVE | |
c75df6f9 | 386 | PPC_STL r0, VCPU_GPR(R0)(r4) |
d30f6e48 SW |
387 | mflr r5 |
388 | mfspr r6, SPRN_SPRG4 | |
d30f6e48 SW |
389 | PPC_STL r5, VCPU_LR(r4) |
390 | mfspr r7, SPRN_SPRG5 | |
518f040c | 391 | stw r3, VCPU_VRSAVE(r4) |
9d378dfa SW |
392 | #ifdef CONFIG_64BIT |
393 | PPC_LL r3, PACA_SPRG_VDSO(r13) | |
394 | #endif | |
99e99d19 | 395 | mfspr r5, SPRN_SPRG9 |
30124906 | 396 | PPC_STD(r6, VCPU_SHARED_SPRG4, r11) |
d30f6e48 | 397 | mfspr r8, SPRN_SPRG6 |
30124906 | 398 | PPC_STD(r7, VCPU_SHARED_SPRG5, r11) |
d30f6e48 | 399 | mfspr r9, SPRN_SPRG7 |
9d378dfa SW |
400 | #ifdef CONFIG_64BIT |
401 | mtspr SPRN_SPRG_VDSO_WRITE, r3 | |
402 | #endif | |
99e99d19 | 403 | PPC_STD(r5, VCPU_SPRG9, r4) |
30124906 | 404 | PPC_STD(r8, VCPU_SHARED_SPRG6, r11) |
d30f6e48 | 405 | mfxer r3 |
30124906 | 406 | PPC_STD(r9, VCPU_SHARED_SPRG7, r11) |
d30f6e48 SW |
407 | |
408 | /* save guest MAS registers and restore host mas4 & mas6 */ | |
409 | mfspr r5, SPRN_MAS0 | |
410 | PPC_STL r3, VCPU_XER(r4) | |
411 | mfspr r6, SPRN_MAS1 | |
412 | stw r5, VCPU_SHARED_MAS0(r11) | |
413 | mfspr r7, SPRN_MAS2 | |
414 | stw r6, VCPU_SHARED_MAS1(r11) | |
185e4188 | 415 | PPC_STD(r7, VCPU_SHARED_MAS2, r11) |
d30f6e48 SW |
416 | mfspr r5, SPRN_MAS3 |
417 | mfspr r6, SPRN_MAS4 | |
418 | stw r5, VCPU_SHARED_MAS7_3+4(r11) | |
419 | mfspr r7, SPRN_MAS6 | |
420 | stw r6, VCPU_SHARED_MAS4(r11) | |
421 | mfspr r5, SPRN_MAS7 | |
422 | lwz r6, VCPU_HOST_MAS4(r4) | |
423 | stw r7, VCPU_SHARED_MAS6(r11) | |
424 | lwz r8, VCPU_HOST_MAS6(r4) | |
425 | mtspr SPRN_MAS4, r6 | |
426 | stw r5, VCPU_SHARED_MAS7_3+0(r11) | |
427 | mtspr SPRN_MAS6, r8 | |
e9ba39c1 | 428 | /* Enable MAS register updates via exception */ |
d30f6e48 SW |
429 | mfspr r3, SPRN_EPCR |
430 | rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH | |
431 | mtspr SPRN_EPCR, r3 | |
432 | isync | |
433 | ||
9bd880a2 TC |
434 | #ifdef CONFIG_64BIT |
435 | /* | |
436 | * We enter with interrupts disabled in hardware, but | |
437 | * we need to call RECONCILE_IRQ_STATE to ensure | |
438 | * that the software state is kept in sync. | |
439 | */ | |
440 | RECONCILE_IRQ_STATE(r3,r5) | |
441 | #endif | |
442 | ||
d30f6e48 SW |
443 | /* Switch to kernel stack and jump to handler. */ |
444 | PPC_LL r3, HOST_RUN(r1) | |
445 | mr r5, r14 /* intno */ | |
446 | mr r14, r4 /* Save vcpu pointer. */ | |
447 | bl kvmppc_handle_exit | |
448 | ||
449 | /* Restore vcpu pointer and the nonvolatiles we used. */ | |
450 | mr r4, r14 | |
c75df6f9 | 451 | PPC_LL r14, VCPU_GPR(R14)(r4) |
d30f6e48 SW |
452 | |
453 | andi. r5, r3, RESUME_FLAG_NV | |
454 | beq skip_nv_load | |
c75df6f9 MN |
455 | PPC_LL r15, VCPU_GPR(R15)(r4) |
456 | PPC_LL r16, VCPU_GPR(R16)(r4) | |
457 | PPC_LL r17, VCPU_GPR(R17)(r4) | |
458 | PPC_LL r18, VCPU_GPR(R18)(r4) | |
459 | PPC_LL r19, VCPU_GPR(R19)(r4) | |
460 | PPC_LL r20, VCPU_GPR(R20)(r4) | |
461 | PPC_LL r21, VCPU_GPR(R21)(r4) | |
462 | PPC_LL r22, VCPU_GPR(R22)(r4) | |
463 | PPC_LL r23, VCPU_GPR(R23)(r4) | |
464 | PPC_LL r24, VCPU_GPR(R24)(r4) | |
465 | PPC_LL r25, VCPU_GPR(R25)(r4) | |
466 | PPC_LL r26, VCPU_GPR(R26)(r4) | |
467 | PPC_LL r27, VCPU_GPR(R27)(r4) | |
468 | PPC_LL r28, VCPU_GPR(R28)(r4) | |
469 | PPC_LL r29, VCPU_GPR(R29)(r4) | |
470 | PPC_LL r30, VCPU_GPR(R30)(r4) | |
471 | PPC_LL r31, VCPU_GPR(R31)(r4) | |
d30f6e48 SW |
472 | skip_nv_load: |
473 | /* Should we return to the guest? */ | |
474 | andi. r5, r3, RESUME_FLAG_HOST | |
475 | beq lightweight_exit | |
476 | ||
477 | srawi r3, r3, 2 /* Shift -ERR back down. */ | |
478 | ||
479 | heavyweight_exit: | |
480 | /* Not returning to guest. */ | |
481 | PPC_LL r5, HOST_STACK_LR(r1) | |
f6127716 | 482 | lwz r6, HOST_CR(r1) |
d30f6e48 SW |
483 | |
484 | /* | |
485 | * We already saved guest volatile register state; now save the | |
486 | * non-volatiles. | |
487 | */ | |
488 | ||
c75df6f9 MN |
489 | PPC_STL r15, VCPU_GPR(R15)(r4) |
490 | PPC_STL r16, VCPU_GPR(R16)(r4) | |
491 | PPC_STL r17, VCPU_GPR(R17)(r4) | |
492 | PPC_STL r18, VCPU_GPR(R18)(r4) | |
493 | PPC_STL r19, VCPU_GPR(R19)(r4) | |
494 | PPC_STL r20, VCPU_GPR(R20)(r4) | |
495 | PPC_STL r21, VCPU_GPR(R21)(r4) | |
496 | PPC_STL r22, VCPU_GPR(R22)(r4) | |
497 | PPC_STL r23, VCPU_GPR(R23)(r4) | |
498 | PPC_STL r24, VCPU_GPR(R24)(r4) | |
499 | PPC_STL r25, VCPU_GPR(R25)(r4) | |
500 | PPC_STL r26, VCPU_GPR(R26)(r4) | |
501 | PPC_STL r27, VCPU_GPR(R27)(r4) | |
502 | PPC_STL r28, VCPU_GPR(R28)(r4) | |
503 | PPC_STL r29, VCPU_GPR(R29)(r4) | |
504 | PPC_STL r30, VCPU_GPR(R30)(r4) | |
505 | PPC_STL r31, VCPU_GPR(R31)(r4) | |
d30f6e48 SW |
506 | |
507 | /* Load host non-volatile register state from host stack. */ | |
38df8501 AG |
508 | PPC_LL r14, HOST_NV_GPR(R14)(r1) |
509 | PPC_LL r15, HOST_NV_GPR(R15)(r1) | |
510 | PPC_LL r16, HOST_NV_GPR(R16)(r1) | |
511 | PPC_LL r17, HOST_NV_GPR(R17)(r1) | |
512 | PPC_LL r18, HOST_NV_GPR(R18)(r1) | |
513 | PPC_LL r19, HOST_NV_GPR(R19)(r1) | |
514 | PPC_LL r20, HOST_NV_GPR(R20)(r1) | |
515 | PPC_LL r21, HOST_NV_GPR(R21)(r1) | |
516 | PPC_LL r22, HOST_NV_GPR(R22)(r1) | |
517 | PPC_LL r23, HOST_NV_GPR(R23)(r1) | |
518 | PPC_LL r24, HOST_NV_GPR(R24)(r1) | |
519 | PPC_LL r25, HOST_NV_GPR(R25)(r1) | |
520 | PPC_LL r26, HOST_NV_GPR(R26)(r1) | |
521 | PPC_LL r27, HOST_NV_GPR(R27)(r1) | |
522 | PPC_LL r28, HOST_NV_GPR(R28)(r1) | |
523 | PPC_LL r29, HOST_NV_GPR(R29)(r1) | |
524 | PPC_LL r30, HOST_NV_GPR(R30)(r1) | |
525 | PPC_LL r31, HOST_NV_GPR(R31)(r1) | |
d30f6e48 SW |
526 | |
527 | /* Return to kvm_vcpu_run(). */ | |
528 | mtlr r5 | |
f6127716 | 529 | mtcr r6 |
d30f6e48 SW |
530 | addi r1, r1, HOST_STACK_SIZE |
531 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | |
532 | blr | |
533 | ||
534 | /* Registers: | |
535 | * r3: kvm_run pointer | |
536 | * r4: vcpu pointer | |
537 | */ | |
538 | _GLOBAL(__kvmppc_vcpu_run) | |
539 | stwu r1, -HOST_STACK_SIZE(r1) | |
540 | PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | |
541 | ||
542 | /* Save host state to stack. */ | |
543 | PPC_STL r3, HOST_RUN(r1) | |
544 | mflr r3 | |
f6127716 | 545 | mfcr r5 |
d30f6e48 SW |
546 | PPC_STL r3, HOST_STACK_LR(r1) |
547 | ||
f6127716 AG |
548 | stw r5, HOST_CR(r1) |
549 | ||
d30f6e48 | 550 | /* Save host non-volatile register state to stack. */ |
38df8501 AG |
551 | PPC_STL r14, HOST_NV_GPR(R14)(r1) |
552 | PPC_STL r15, HOST_NV_GPR(R15)(r1) | |
553 | PPC_STL r16, HOST_NV_GPR(R16)(r1) | |
554 | PPC_STL r17, HOST_NV_GPR(R17)(r1) | |
555 | PPC_STL r18, HOST_NV_GPR(R18)(r1) | |
556 | PPC_STL r19, HOST_NV_GPR(R19)(r1) | |
557 | PPC_STL r20, HOST_NV_GPR(R20)(r1) | |
558 | PPC_STL r21, HOST_NV_GPR(R21)(r1) | |
559 | PPC_STL r22, HOST_NV_GPR(R22)(r1) | |
560 | PPC_STL r23, HOST_NV_GPR(R23)(r1) | |
561 | PPC_STL r24, HOST_NV_GPR(R24)(r1) | |
562 | PPC_STL r25, HOST_NV_GPR(R25)(r1) | |
563 | PPC_STL r26, HOST_NV_GPR(R26)(r1) | |
564 | PPC_STL r27, HOST_NV_GPR(R27)(r1) | |
565 | PPC_STL r28, HOST_NV_GPR(R28)(r1) | |
566 | PPC_STL r29, HOST_NV_GPR(R29)(r1) | |
567 | PPC_STL r30, HOST_NV_GPR(R30)(r1) | |
568 | PPC_STL r31, HOST_NV_GPR(R31)(r1) | |
d30f6e48 SW |
569 | |
570 | /* Load guest non-volatiles. */ | |
c75df6f9 MN |
571 | PPC_LL r14, VCPU_GPR(R14)(r4) |
572 | PPC_LL r15, VCPU_GPR(R15)(r4) | |
573 | PPC_LL r16, VCPU_GPR(R16)(r4) | |
574 | PPC_LL r17, VCPU_GPR(R17)(r4) | |
575 | PPC_LL r18, VCPU_GPR(R18)(r4) | |
576 | PPC_LL r19, VCPU_GPR(R19)(r4) | |
577 | PPC_LL r20, VCPU_GPR(R20)(r4) | |
578 | PPC_LL r21, VCPU_GPR(R21)(r4) | |
579 | PPC_LL r22, VCPU_GPR(R22)(r4) | |
580 | PPC_LL r23, VCPU_GPR(R23)(r4) | |
581 | PPC_LL r24, VCPU_GPR(R24)(r4) | |
582 | PPC_LL r25, VCPU_GPR(R25)(r4) | |
583 | PPC_LL r26, VCPU_GPR(R26)(r4) | |
584 | PPC_LL r27, VCPU_GPR(R27)(r4) | |
585 | PPC_LL r28, VCPU_GPR(R28)(r4) | |
586 | PPC_LL r29, VCPU_GPR(R29)(r4) | |
587 | PPC_LL r30, VCPU_GPR(R30)(r4) | |
588 | PPC_LL r31, VCPU_GPR(R31)(r4) | |
d30f6e48 SW |
589 | |
590 | ||
591 | lightweight_exit: | |
592 | PPC_STL r2, HOST_R2(r1) | |
593 | ||
594 | mfspr r3, SPRN_PID | |
595 | stw r3, VCPU_HOST_PID(r4) | |
596 | lwz r3, VCPU_GUEST_PID(r4) | |
597 | mtspr SPRN_PID, r3 | |
598 | ||
d30f6e48 | 599 | PPC_LL r11, VCPU_SHARED(r4) |
e9ba39c1 AG |
600 | /* Disable MAS register updates via exception */ |
601 | mfspr r3, SPRN_EPCR | |
602 | oris r3, r3, SPRN_EPCR_DMIUH@h | |
603 | mtspr SPRN_EPCR, r3 | |
604 | isync | |
d30f6e48 SW |
605 | /* Save host mas4 and mas6 and load guest MAS registers */ |
606 | mfspr r3, SPRN_MAS4 | |
607 | stw r3, VCPU_HOST_MAS4(r4) | |
608 | mfspr r3, SPRN_MAS6 | |
609 | stw r3, VCPU_HOST_MAS6(r4) | |
610 | lwz r3, VCPU_SHARED_MAS0(r11) | |
611 | lwz r5, VCPU_SHARED_MAS1(r11) | |
185e4188 | 612 | PPC_LD(r6, VCPU_SHARED_MAS2, r11) |
d30f6e48 SW |
613 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) |
614 | lwz r8, VCPU_SHARED_MAS4(r11) | |
615 | mtspr SPRN_MAS0, r3 | |
616 | mtspr SPRN_MAS1, r5 | |
617 | mtspr SPRN_MAS2, r6 | |
618 | mtspr SPRN_MAS3, r7 | |
619 | mtspr SPRN_MAS4, r8 | |
620 | lwz r3, VCPU_SHARED_MAS6(r11) | |
621 | lwz r5, VCPU_SHARED_MAS7_3+0(r11) | |
622 | mtspr SPRN_MAS6, r3 | |
623 | mtspr SPRN_MAS7, r5 | |
d30f6e48 SW |
624 | |
625 | /* | |
626 | * Host interrupt handlers may have clobbered these guest-readable | |
627 | * SPRGs, so we need to reload them here with the guest's values. | |
628 | */ | |
629 | lwz r3, VCPU_VRSAVE(r4) | |
30124906 | 630 | PPC_LD(r5, VCPU_SHARED_SPRG4, r11) |
d30f6e48 | 631 | mtspr SPRN_VRSAVE, r3 |
30124906 | 632 | PPC_LD(r6, VCPU_SHARED_SPRG5, r11) |
d30f6e48 | 633 | mtspr SPRN_SPRG4W, r5 |
30124906 | 634 | PPC_LD(r7, VCPU_SHARED_SPRG6, r11) |
d30f6e48 | 635 | mtspr SPRN_SPRG5W, r6 |
30124906 | 636 | PPC_LD(r8, VCPU_SHARED_SPRG7, r11) |
d30f6e48 | 637 | mtspr SPRN_SPRG6W, r7 |
99e99d19 | 638 | PPC_LD(r5, VCPU_SPRG9, r4) |
d30f6e48 | 639 | mtspr SPRN_SPRG7W, r8 |
99e99d19 | 640 | mtspr SPRN_SPRG9, r5 |
d30f6e48 SW |
641 | |
642 | /* Load some guest volatiles. */ | |
643 | PPC_LL r3, VCPU_LR(r4) | |
644 | PPC_LL r5, VCPU_XER(r4) | |
645 | PPC_LL r6, VCPU_CTR(r4) | |
518f040c | 646 | lwz r7, VCPU_CR(r4) |
d30f6e48 | 647 | PPC_LL r8, VCPU_PC(r4) |
185e4188 | 648 | PPC_LD(r9, VCPU_SHARED_MSR, r11) |
c75df6f9 MN |
649 | PPC_LL r0, VCPU_GPR(R0)(r4) |
650 | PPC_LL r1, VCPU_GPR(R1)(r4) | |
651 | PPC_LL r2, VCPU_GPR(R2)(r4) | |
652 | PPC_LL r10, VCPU_GPR(R10)(r4) | |
653 | PPC_LL r11, VCPU_GPR(R11)(r4) | |
654 | PPC_LL r12, VCPU_GPR(R12)(r4) | |
655 | PPC_LL r13, VCPU_GPR(R13)(r4) | |
d30f6e48 SW |
656 | mtlr r3 |
657 | mtxer r5 | |
658 | mtctr r6 | |
d30f6e48 SW |
659 | mtsrr0 r8 |
660 | mtsrr1 r9 | |
661 | ||
662 | #ifdef CONFIG_KVM_EXIT_TIMING | |
663 | /* save enter time */ | |
664 | 1: | |
665 | mfspr r6, SPRN_TBRU | |
c0fe7b09 | 666 | mfspr r9, SPRN_TBRL |
d30f6e48 SW |
667 | mfspr r8, SPRN_TBRU |
668 | cmpw r8, r6 | |
518f040c | 669 | stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4) |
d30f6e48 | 670 | bne 1b |
518f040c | 671 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) |
d30f6e48 SW |
672 | #endif |
673 | ||
c0fe7b09 BB |
674 | /* |
675 | * Don't execute any instruction which can change CR after | |
676 | * below instruction. | |
677 | */ | |
678 | mtcr r7 | |
679 | ||
d30f6e48 | 680 | /* Finish loading guest volatiles and jump to guest. */ |
c75df6f9 MN |
681 | PPC_LL r5, VCPU_GPR(R5)(r4) |
682 | PPC_LL r6, VCPU_GPR(R6)(r4) | |
683 | PPC_LL r7, VCPU_GPR(R7)(r4) | |
684 | PPC_LL r8, VCPU_GPR(R8)(r4) | |
685 | PPC_LL r9, VCPU_GPR(R9)(r4) | |
686 | ||
687 | PPC_LL r3, VCPU_GPR(R3)(r4) | |
688 | PPC_LL r4, VCPU_GPR(R4)(r4) | |
d30f6e48 | 689 | rfi |