powerpc/64s/exception: clean up system call entry
[linux-2.6-block.git] / arch / powerpc / kernel / exceptions-64s.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0ebc4cda
BH
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
25985edc 9 * position dependent assembly.
0ebc4cda
BH
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
7230c564 16#include <asm/hw_irq.h>
8aa34ab8 17#include <asm/exception-64s.h>
46f52210 18#include <asm/ptrace.h>
7cba160a 19#include <asm/cpuidle.h>
da2bc464 20#include <asm/head-64.h>
2c86cd18 21#include <asm/feature-fixups.h>
890274c2 22#include <asm/kup.h>
8aa34ab8 23
15820091
NP
24/* PACA save area offsets (exgen, exmc, etc) */
25#define EX_R9 0
26#define EX_R10 8
27#define EX_R11 16
28#define EX_R12 24
29#define EX_R13 32
30#define EX_DAR 40
31#define EX_DSISR 48
32#define EX_CCR 52
33#define EX_CFAR 56
34#define EX_PPR 64
35#if defined(CONFIG_RELOCATABLE)
36#define EX_CTR 72
37.if EX_SIZE != 10
38 .error "EX_SIZE is wrong"
39.endif
40#else
41.if EX_SIZE != 9
42 .error "EX_SIZE is wrong"
43.endif
44#endif
45
12a04809
NP
46/*
47 * We're short on space and time in the exception prolog, so we can't
48 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
49 * Instead we get the base of the kernel from paca->kernelbase and or in the low
50 * part of label. This requires that the label be within 64KB of kernelbase, and
51 * that kernelbase be 64K aligned.
52 */
53#define LOAD_HANDLER(reg, label) \
54 ld reg,PACAKBASE(r13); /* get high part of &label */ \
55 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
56
57#define __LOAD_HANDLER(reg, label) \
58 ld reg,PACAKBASE(r13); \
59 ori reg,reg,(ABS_ADDR(label))@l
60
61/*
62 * Branches from unrelocated code (e.g., interrupts) to labels outside
63 * head-y require >64K offsets.
64 */
65#define __LOAD_FAR_HANDLER(reg, label) \
66 ld reg,PACAKBASE(r13); \
67 ori reg,reg,(ABS_ADDR(label))@l; \
68 addis reg,reg,(ABS_ADDR(label))@h
69
70/* Exception register prefixes */
71#define EXC_HV 1
72#define EXC_STD 0
73
74#if defined(CONFIG_RELOCATABLE)
75/*
76 * If we support interrupts with relocation on AND we're a relocatable kernel,
77 * we need to use CTR to get to the 2nd level handler. So, save/restore it
78 * when required.
79 */
80#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
81#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
82#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
83#else
84/* ...else CTR is unused and in register. */
85#define SAVE_CTR(reg, area)
86#define GET_CTR(reg, area) mfctr reg
87#define RESTORE_CTR(reg, area)
88#endif
89
90/*
91 * PPR save/restore macros used in exceptions-64s.S
92 * Used for P7 or later processors
93 */
94#define SAVE_PPR(area, ra) \
95BEGIN_FTR_SECTION_NESTED(940) \
96 ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
97 std ra,_PPR(r1); \
98END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
99
100#define RESTORE_PPR_PACA(area, ra) \
101BEGIN_FTR_SECTION_NESTED(941) \
102 ld ra,area+EX_PPR(r13); \
103 mtspr SPRN_PPR,ra; \
104END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
105
106/*
107 * Get an SPR into a register if the CPU has the given feature
108 */
109#define OPT_GET_SPR(ra, spr, ftr) \
110BEGIN_FTR_SECTION_NESTED(943) \
111 mfspr ra,spr; \
112END_FTR_SECTION_NESTED(ftr,ftr,943)
113
114/*
115 * Set an SPR from a register if the CPU has the given feature
116 */
117#define OPT_SET_SPR(ra, spr, ftr) \
118BEGIN_FTR_SECTION_NESTED(943) \
119 mtspr spr,ra; \
120END_FTR_SECTION_NESTED(ftr,ftr,943)
121
122/*
123 * Save a register to the PACA if the CPU has the given feature
124 */
125#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
126BEGIN_FTR_SECTION_NESTED(943) \
127 std ra,offset(r13); \
128END_FTR_SECTION_NESTED(ftr,ftr,943)
129
130.macro EXCEPTION_PROLOG_0 area
131 GET_PACA(r13)
132 std r9,\area\()+EX_R9(r13) /* save r9 */
133 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
134 HMT_MEDIUM
135 std r10,\area\()+EX_R10(r13) /* save r10 - r12 */
136 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
137.endm
138
139.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, bitmask
140 OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
141 OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
142 INTERRUPT_TO_KERNEL
143 SAVE_CTR(r10, \area\())
144 mfcr r9
145 .if \kvm
146 KVMTEST \hsrr \vec
147 .endif
148 .if \bitmask
149 lbz r10,PACAIRQSOFTMASK(r13)
150 andi. r10,r10,\bitmask
151 /* Associate vector numbers with bits in paca->irq_happened */
152 .if \vec == 0x500 || \vec == 0xea0
153 li r10,PACA_IRQ_EE
154 .elseif \vec == 0x900
155 li r10,PACA_IRQ_DEC
156 .elseif \vec == 0xa00 || \vec == 0xe80
157 li r10,PACA_IRQ_DBELL
158 .elseif \vec == 0xe60
159 li r10,PACA_IRQ_HMI
160 .elseif \vec == 0xf00
161 li r10,PACA_IRQ_PMI
162 .else
163 .abort "Bad maskable vector"
164 .endif
165
166 .if \hsrr
167 bne masked_Hinterrupt
168 .else
169 bne masked_interrupt
170 .endif
171 .endif
172
173 std r11,\area\()+EX_R11(r13)
174 std r12,\area\()+EX_R12(r13)
175 GET_SCRATCH0(r10)
176 std r10,\area\()+EX_R13(r13)
177.endm
178
179.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri
180 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
181 .if ! \set_ri
182 xori r10,r10,MSR_RI /* Clear MSR_RI */
183 .endif
184 .if \hsrr
185 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
186 .else
187 mfspr r11,SPRN_SRR0 /* save SRR0 */
188 .endif
189 LOAD_HANDLER(r12, \label\())
190 .if \hsrr
191 mtspr SPRN_HSRR0,r12
192 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
193 mtspr SPRN_HSRR1,r10
194 HRFI_TO_KERNEL
195 .else
196 mtspr SPRN_SRR0,r12
197 mfspr r12,SPRN_SRR1 /* and SRR1 */
198 mtspr SPRN_SRR1,r10
199 RFI_TO_KERNEL
200 .endif
201 b . /* prevent speculative execution */
202.endm
203
204.macro EXCEPTION_PROLOG_2_VIRT label, hsrr
205#ifdef CONFIG_RELOCATABLE
206 .if \hsrr
207 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
208 .else
209 mfspr r11,SPRN_SRR0 /* save SRR0 */
210 .endif
211 LOAD_HANDLER(r12, \label\())
212 mtctr r12
213 .if \hsrr
214 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
215 .else
216 mfspr r12,SPRN_SRR1 /* and HSRR1 */
217 .endif
218 li r10,MSR_RI
219 mtmsrd r10,1 /* Set RI (EE=0) */
220 bctr
221#else
222 .if \hsrr
223 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
224 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
225 .else
226 mfspr r11,SPRN_SRR0 /* save SRR0 */
227 mfspr r12,SPRN_SRR1 /* and SRR1 */
228 .endif
229 li r10,MSR_RI
230 mtmsrd r10,1 /* Set RI (EE=0) */
231 b \label
232#endif
233.endm
234
235/*
236 * Branch to label using its 0xC000 address. This results in instruction
237 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
238 * on using mtmsr rather than rfid.
239 *
240 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
241 * load KBASE for a slight optimisation.
242 */
243#define BRANCH_TO_C000(reg, label) \
244 __LOAD_HANDLER(reg, label); \
245 mtctr reg; \
246 bctr
247
248#ifdef CONFIG_RELOCATABLE
12a04809
NP
249#define BRANCH_LINK_TO_FAR(label) \
250 __LOAD_FAR_HANDLER(r12, label); \
251 mtctr r12; \
252 bctrl
253
254#else
12a04809
NP
255#define BRANCH_LINK_TO_FAR(label) \
256 bl label
257#endif
258
259#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
12a04809
NP
260#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
261/*
262 * If hv is possible, interrupts come into to the hv version
263 * of the kvmppc_interrupt code, which then jumps to the PR handler,
264 * kvmppc_interrupt_pr, if the guest is a PR guest.
265 */
266#define kvmppc_interrupt kvmppc_interrupt_hv
267#else
268#define kvmppc_interrupt kvmppc_interrupt_pr
269#endif
270
271.macro KVMTEST hsrr, n
272 lbz r10,HSTATE_IN_GUEST(r13)
273 cmpwi r10,0
274 .if \hsrr
275 bne do_kvm_H\n
276 .else
277 bne do_kvm_\n
278 .endif
279.endm
280
281.macro KVM_HANDLER area, hsrr, n, skip
282 .if \skip
283 cmpwi r10,KVM_GUEST_MODE_SKIP
284 beq 89f
285 .else
bf66e3c4 286BEGIN_FTR_SECTION_NESTED(947)
12a04809
NP
287 ld r10,\area+EX_CFAR(r13)
288 std r10,HSTATE_CFAR(r13)
bf66e3c4 289END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
12a04809
NP
290 .endif
291
bf66e3c4 292BEGIN_FTR_SECTION_NESTED(948)
12a04809
NP
293 ld r10,\area+EX_PPR(r13)
294 std r10,HSTATE_PPR(r13)
bf66e3c4 295END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
12a04809
NP
296 ld r10,\area+EX_R10(r13)
297 std r12,HSTATE_SCRATCH0(r13)
298 sldi r12,r9,32
299 /* HSRR variants have the 0x2 bit added to their trap number */
300 .if \hsrr
301 ori r12,r12,(\n + 0x2)
302 .else
303 ori r12,r12,(\n)
304 .endif
64e41351
NP
305
306#ifdef CONFIG_RELOCATABLE
307 /*
308 * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
309 * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
310 * to be saved in HSTATE_SCRATCH1.
311 */
312 mfctr r9
313 std r9,HSTATE_SCRATCH1(r13)
314 __LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
315 mtctr r9
316 ld r9,\area+EX_R9(r13)
317 bctr
318#else
319 ld r9,\area+EX_R9(r13)
320 b kvmppc_interrupt
321#endif
322
12a04809
NP
323
324 .if \skip
32589: mtocrf 0x80,r9
326 ld r9,\area+EX_R9(r13)
327 ld r10,\area+EX_R10(r13)
328 .if \hsrr
329 b kvmppc_skip_Hinterrupt
330 .else
331 b kvmppc_skip_interrupt
332 .endif
333 .endif
334.endm
335
336#else
337.macro KVMTEST hsrr, n
338.endm
339.macro KVM_HANDLER area, hsrr, n, skip
340.endm
341#endif
342
343#define EXCEPTION_PROLOG_COMMON_1() \
344 std r9,_CCR(r1); /* save CR in stackframe */ \
345 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
346 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
347 std r10,0(r1); /* make stack chain pointer */ \
348 std r0,GPR0(r1); /* save r0 in stackframe */ \
349 std r10,GPR1(r1); /* save r1 in stackframe */ \
350
12a04809
NP
351/* Save original regs values from save area to stack frame. */
352#define EXCEPTION_PROLOG_COMMON_2(area) \
353 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
354 ld r10,area+EX_R10(r13); \
355 std r9,GPR9(r1); \
356 std r10,GPR10(r1); \
357 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
358 ld r10,area+EX_R12(r13); \
359 ld r11,area+EX_R13(r13); \
360 std r9,GPR11(r1); \
361 std r10,GPR12(r1); \
362 std r11,GPR13(r1); \
bf66e3c4 363BEGIN_FTR_SECTION_NESTED(66); \
12a04809
NP
364 ld r10,area+EX_CFAR(r13); \
365 std r10,ORIG_GPR3(r1); \
bf66e3c4 366END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
12a04809
NP
367 GET_CTR(r10, area); \
368 std r10,_CTR(r1);
369
d064151f 370#define EXCEPTION_PROLOG_COMMON_3(trap) \
12a04809
NP
371 std r2,GPR2(r1); /* save r2 in stackframe */ \
372 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
373 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
374 mflr r9; /* Get LR, later save to stack */ \
375 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
376 std r9,_LINK(r1); \
377 lbz r10,PACAIRQSOFTMASK(r13); \
378 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
379 std r10,SOFTE(r1); \
380 std r11,_XER(r1); \
d064151f 381 li r9,(trap)+1; \
12a04809
NP
382 std r9,_TRAP(r1); /* set trap number */ \
383 li r10,0; \
384 ld r11,exception_marker@toc(r2); \
385 std r10,RESULT(r1); /* clear regs->result */ \
386 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
387
d064151f
NP
388/*
389 * On entry r13 points to the paca, r9-r13 are saved in the paca,
390 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
391 * SRR1, and relocation is on.
392 */
393#define EXCEPTION_COMMON(area, trap) \
394 andi. r10,r12,MSR_PR; /* See if coming from user */ \
395 mr r10,r1; /* Save r1 */ \
396 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
397 beq- 1f; \
398 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
3991: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
400 blt+ cr1,3f; /* abort if it is */ \
401 li r1,(trap); /* will be reloaded later */ \
402 sth r1,PACA_TRAP_SAVE(r13); \
403 std r3,area+EX_R3(r13); \
404 addi r3,r13,area; /* r3 -> where regs are saved*/ \
405 RESTORE_CTR(r1, area); \
406 b bad_stack; \
4073: EXCEPTION_PROLOG_COMMON_1(); \
408 kuap_save_amr_and_lock r9, r10, cr1, cr0; \
409 beq 4f; /* if from kernel mode */ \
410 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
411 SAVE_PPR(area, r9); \
4124: EXCEPTION_PROLOG_COMMON_2(area); \
413 EXCEPTION_PROLOG_COMMON_3(trap); \
414 ACCOUNT_STOLEN_TIME
12a04809 415
12a04809
NP
416
417/*
d064151f
NP
418 * Exception where stack is already set in r1, r1 is saved in r10.
419 * PPR save and CPU accounting is not done (for some reason).
12a04809
NP
420 */
421#define EXCEPTION_COMMON_STACK(area, trap) \
422 EXCEPTION_PROLOG_COMMON_1(); \
423 kuap_save_amr_and_lock r9, r10, cr1; \
424 EXCEPTION_PROLOG_COMMON_2(area); \
425 EXCEPTION_PROLOG_COMMON_3(trap)
426
d064151f
NP
427
428#define RUNLATCH_ON \
429BEGIN_FTR_SECTION \
430 ld r3, PACA_THREAD_INFO(r13); \
431 ld r4,TI_LOCAL_FLAGS(r3); \
432 andi. r0,r4,_TLF_RUNLATCH; \
433 beql ppc64_runlatch_on_trampoline; \
434END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
435
12a04809
NP
436/*
437 * When the idle code in power4_idle puts the CPU into NAP mode,
438 * it has to do so in a loop, and relies on the external interrupt
439 * and decrementer interrupt entry code to get it out of the loop.
440 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
441 * to signal that it is in the loop and needs help to get out.
442 */
443#ifdef CONFIG_PPC_970_NAP
444#define FINISH_NAP \
445BEGIN_FTR_SECTION \
446 ld r11, PACA_THREAD_INFO(r13); \
447 ld r9,TI_LOCAL_FLAGS(r11); \
448 andi. r10,r9,_TLF_NAPPING; \
449 bnel power4_fixup_nap; \
450END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
451#else
452#define FINISH_NAP
453#endif
454
a0502434
NP
455/*
456 * Following are the BOOK3S exception handler helper macros.
457 * Handlers come in a number of types, and each type has a number of varieties.
458 *
459 * EXC_REAL_* - real, unrelocated exception vectors
460 * EXC_VIRT_* - virt (AIL), unrelocated exception vectors
461 * TRAMP_REAL_* - real, unrelocated helpers (virt can call these)
462 * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use)
463 * TRAMP_KVM - KVM handlers that get put into real, unrelocated
464 * EXC_COMMON - virt, relocated common handlers
465 *
466 * The EXC handlers are given a name, and branch to name_common, or the
467 * appropriate KVM or masking function. Vector handler verieties are as
468 * follows:
469 *
470 * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
471 *
472 * EXC_{REAL|VIRT} - standard exception
473 *
474 * EXC_{REAL|VIRT}_suffix
475 * where _suffix is:
476 * - _MASKABLE - maskable exception
477 * - _OOL - out of line with trampoline to common handler
478 * - _HV - HV exception
479 *
480 * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
481 *
482 * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is
483 * an OOL vector that branches to a specified handler rather than the usual
484 * trampoline that goes to common. It, and other underscore macros, should
485 * be used with care.
486 *
487 * KVM handlers come in the following verieties:
488 * TRAMP_KVM
489 * TRAMP_KVM_SKIP
490 * TRAMP_KVM_HV
491 * TRAMP_KVM_HV_SKIP
492 *
493 * COMMON handlers come in the following verieties:
494 * EXC_COMMON_BEGIN/END - used to open-code the handler
495 * EXC_COMMON
496 * EXC_COMMON_ASYNC
497 *
498 * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
499 * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
500 */
501
502#define __EXC_REAL(name, start, size, area) \
503 EXC_REAL_BEGIN(name, start, size); \
504 SET_SCRATCH0(r13); /* save r13 */ \
505 EXCEPTION_PROLOG_0 area ; \
506 EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0 ; \
507 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
508 EXC_REAL_END(name, start, size)
509
510#define EXC_REAL(name, start, size) \
511 __EXC_REAL(name, start, size, PACA_EXGEN)
512
513#define __EXC_VIRT(name, start, size, realvec, area) \
514 EXC_VIRT_BEGIN(name, start, size); \
515 SET_SCRATCH0(r13); /* save r13 */ \
516 EXCEPTION_PROLOG_0 area ; \
517 EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0; \
518 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
519 EXC_VIRT_END(name, start, size)
520
521#define EXC_VIRT(name, start, size, realvec) \
522 __EXC_VIRT(name, start, size, realvec, PACA_EXGEN)
523
524#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
525 EXC_REAL_BEGIN(name, start, size); \
526 SET_SCRATCH0(r13); /* save r13 */ \
527 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
528 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, bitmask ; \
529 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
530 EXC_REAL_END(name, start, size)
531
532#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
533 EXC_VIRT_BEGIN(name, start, size); \
534 SET_SCRATCH0(r13); /* save r13 */ \
535 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
536 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, bitmask ; \
537 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
538 EXC_VIRT_END(name, start, size)
539
540#define EXC_REAL_HV(name, start, size) \
541 EXC_REAL_BEGIN(name, start, size); \
542 SET_SCRATCH0(r13); /* save r13 */ \
543 EXCEPTION_PROLOG_0 PACA_EXGEN; \
544 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0 ; \
545 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ; \
546 EXC_REAL_END(name, start, size)
547
548#define EXC_VIRT_HV(name, start, size, realvec) \
549 EXC_VIRT_BEGIN(name, start, size); \
550 SET_SCRATCH0(r13); /* save r13 */ \
551 EXCEPTION_PROLOG_0 PACA_EXGEN; \
552 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0 ; \
553 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ; \
554 EXC_VIRT_END(name, start, size)
555
556#define __EXC_REAL_OOL(name, start, size) \
557 EXC_REAL_BEGIN(name, start, size); \
558 SET_SCRATCH0(r13); \
559 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
560 b tramp_real_##name ; \
561 EXC_REAL_END(name, start, size)
562
563#define __TRAMP_REAL_OOL(name, vec) \
564 TRAMP_REAL_BEGIN(tramp_real_##name); \
565 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0 ; \
566 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
567
568#define EXC_REAL_OOL(name, start, size) \
569 __EXC_REAL_OOL(name, start, size); \
570 __TRAMP_REAL_OOL(name, start)
571
572#define __EXC_REAL_OOL_MASKABLE(name, start, size) \
573 __EXC_REAL_OOL(name, start, size)
574
575#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
576 TRAMP_REAL_BEGIN(tramp_real_##name); \
577 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, bitmask ; \
578 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
579
580#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
581 __EXC_REAL_OOL_MASKABLE(name, start, size); \
582 __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask)
583
584#define __EXC_REAL_OOL_HV_DIRECT(name, start, size, handler) \
585 EXC_REAL_BEGIN(name, start, size); \
586 SET_SCRATCH0(r13); \
587 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
588 b handler; \
589 EXC_REAL_END(name, start, size)
590
591#define __EXC_REAL_OOL_HV(name, start, size) \
592 __EXC_REAL_OOL(name, start, size)
593
594#define __TRAMP_REAL_OOL_HV(name, vec) \
595 TRAMP_REAL_BEGIN(tramp_real_##name); \
596 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0 ; \
597 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
598
599#define EXC_REAL_OOL_HV(name, start, size) \
600 __EXC_REAL_OOL_HV(name, start, size); \
601 __TRAMP_REAL_OOL_HV(name, start)
602
603#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \
604 __EXC_REAL_OOL(name, start, size)
605
606#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \
607 TRAMP_REAL_BEGIN(tramp_real_##name); \
608 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, bitmask ; \
609 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
610
611#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \
612 __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \
613 __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask)
614
615#define __EXC_VIRT_OOL(name, start, size) \
616 EXC_VIRT_BEGIN(name, start, size); \
617 SET_SCRATCH0(r13); \
618 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
619 b tramp_virt_##name; \
620 EXC_VIRT_END(name, start, size)
621
622#define __TRAMP_VIRT_OOL(name, realvec) \
623 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
624 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0 ; \
625 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD
626
627#define EXC_VIRT_OOL(name, start, size, realvec) \
628 __EXC_VIRT_OOL(name, start, size); \
629 __TRAMP_VIRT_OOL(name, realvec)
630
631#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \
632 __EXC_VIRT_OOL(name, start, size)
633
634#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
635 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
636 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, bitmask ; \
637 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
638
639#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
640 __EXC_VIRT_OOL_MASKABLE(name, start, size); \
641 __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)
642
643#define __EXC_VIRT_OOL_HV(name, start, size) \
644 __EXC_VIRT_OOL(name, start, size)
645
646#define __TRAMP_VIRT_OOL_HV(name, realvec) \
647 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
648 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0 ; \
649 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
650
651#define EXC_VIRT_OOL_HV(name, start, size, realvec) \
652 __EXC_VIRT_OOL_HV(name, start, size); \
653 __TRAMP_VIRT_OOL_HV(name, realvec)
654
655#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \
656 __EXC_VIRT_OOL(name, start, size)
657
658#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \
659 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
660 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, bitmask ; \
661 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
662
663#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \
664 __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \
665 __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)
666
667#define TRAMP_KVM(area, n) \
668 TRAMP_KVM_BEGIN(do_kvm_##n); \
669 KVM_HANDLER area, EXC_STD, n, 0
670
671#define TRAMP_KVM_SKIP(area, n) \
672 TRAMP_KVM_BEGIN(do_kvm_##n); \
673 KVM_HANDLER area, EXC_STD, n, 1
674
675#define TRAMP_KVM_HV(area, n) \
676 TRAMP_KVM_BEGIN(do_kvm_H##n); \
677 KVM_HANDLER area, EXC_HV, n, 0
678
679#define TRAMP_KVM_HV_SKIP(area, n) \
680 TRAMP_KVM_BEGIN(do_kvm_H##n); \
681 KVM_HANDLER area, EXC_HV, n, 1
682
683#define EXC_COMMON(name, realvec, hdlr) \
684 EXC_COMMON_BEGIN(name); \
685 EXCEPTION_COMMON(PACA_EXGEN, realvec); \
686 bl save_nvgprs; \
687 RECONCILE_IRQ_STATE(r10, r11); \
688 addi r3,r1,STACK_FRAME_OVERHEAD; \
689 bl hdlr; \
690 b ret_from_except
691
692/*
693 * Like EXC_COMMON, but for exceptions that can occur in the idle task and
694 * therefore need the special idle handling (finish nap and runlatch)
695 */
696#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
697 EXC_COMMON_BEGIN(name); \
698 EXCEPTION_COMMON(PACA_EXGEN, realvec); \
699 FINISH_NAP; \
700 RECONCILE_IRQ_STATE(r10, r11); \
701 RUNLATCH_ON; \
702 addi r3,r1,STACK_FRAME_OVERHEAD; \
703 bl hdlr; \
704 b ret_from_except_lite
705
12a04809 706
0ebc4cda 707/*
57f26649
NP
708 * There are a few constraints to be concerned with.
709 * - Real mode exceptions code/data must be located at their physical location.
710 * - Virtual mode exceptions must be mapped at their 0xc000... location.
711 * - Fixed location code must not call directly beyond the __end_interrupts
712 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
713 * must be used.
714 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
715 * virtual 0xc00...
716 * - Conditional branch targets must be within +/-32K of caller.
717 *
718 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
719 * therefore don't have to run in physically located code or rfid to
720 * virtual mode kernel code. However on relocatable kernels they do have
721 * to branch to KERNELBASE offset because the rest of the kernel (outside
722 * the exception vectors) may be located elsewhere.
723 *
724 * Virtual exceptions correspond with physical, except their entry points
725 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
726 * offset applied. Virtual exceptions are enabled with the Alternate
727 * Interrupt Location (AIL) bit set in the LPCR. However this does not
728 * guarantee they will be delivered virtually. Some conditions (see the ISA)
729 * cause exceptions to be delivered in real mode.
730 *
731 * It's impossible to receive interrupts below 0x300 via AIL.
732 *
733 * KVM: None of the virtual exceptions are from the guest. Anything that
734 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
735 *
736 *
0ebc4cda
BH
737 * We layout physical memory as follows:
738 * 0x0000 - 0x00ff : Secondary processor spin code
57f26649
NP
739 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
740 * 0x1900 - 0x3fff : Real mode trampolines
741 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
742 * 0x5900 - 0x6fff : Relon mode trampolines
0ebc4cda 743 * 0x7000 - 0x7fff : FWNMI data area
57f26649
NP
744 * 0x8000 - .... : Common interrupt handlers, remaining early
745 * setup code, rest of kernel.
e0319829
NP
746 *
747 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
748 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
749 * vectors there.
57f26649
NP
750 */
751OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
752OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
753OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
754OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
ccd47702
NP
755
756#ifdef CONFIG_PPC_POWERNV
bd3524fe
NP
757 .globl start_real_trampolines
758 .globl end_real_trampolines
759 .globl start_virt_trampolines
760 .globl end_virt_trampolines
ccd47702
NP
761#endif
762
57f26649
NP
763#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
764/*
765 * Data area reserved for FWNMI option.
766 * This address (0x7000) is fixed by the RPA.
767 * pseries and powernv need to keep the whole page from
768 * 0x7000 to 0x8000 free for use by the firmware
0ebc4cda 769 */
57f26649
NP
770ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
771OPEN_TEXT_SECTION(0x8000)
772#else
773OPEN_TEXT_SECTION(0x7000)
774#endif
775
776USE_FIXED_SECTION(real_vectors)
777
0ebc4cda
BH
778/*
779 * This is the start of the interrupt handlers for pSeries
780 * This code runs with relocation off.
781 * Code from here to __end_interrupts gets copied down to real
782 * address 0x100 when we are running a relocatable kernel.
783 * Therefore any relative branches in this section must only
784 * branch to labels in this section.
785 */
0ebc4cda
BH
786 .globl __start_interrupts
787__start_interrupts:
788
e0319829 789/* No virt vectors corresponding with 0x0..0x100 */
1a6822d1 790EXC_VIRT_NONE(0x4000, 0x100)
e0319829 791
fb479e44 792
a7c1ca19
NP
793EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
794 SET_SCRATCH0(r13)
5dba1d50 795 EXCEPTION_PROLOG_0 PACA_EXNMI
a7c1ca19
NP
796
797 /* This is EXCEPTION_PROLOG_1 with the idle feature section added */
798 OPT_SAVE_REG_TO_PACA(PACA_EXNMI+EX_PPR, r9, CPU_FTR_HAS_PPR)
799 OPT_SAVE_REG_TO_PACA(PACA_EXNMI+EX_CFAR, r10, CPU_FTR_CFAR)
800 INTERRUPT_TO_KERNEL
801 SAVE_CTR(r10, PACA_EXNMI)
802 mfcr r9
803
948cf67c 804#ifdef CONFIG_PPC_P7_NAP
fb479e44
NP
805 /*
806 * If running native on arch 2.06 or later, check if we are waking up
ba6d334a
BH
807 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
808 * bits 46:47. A non-0 value indicates that we are coming from a power
809 * saving state. The idle wakeup handler initially runs in real mode,
810 * but we branch to the 0xc000... address so we can turn on relocation
811 * with mtmsr.
948cf67c 812 */
bf66e3c4 813BEGIN_FTR_SECTION
a7c1ca19
NP
814 mfspr r10,SPRN_SRR1
815 rlwinm. r10,r10,47-31,30,31
816 beq- 1f
817 cmpwi cr1,r10,2
818 mfspr r3,SPRN_SRR1
819 bltlr cr1 /* no state loss, return to idle caller */
820 BRANCH_TO_C000(r10, system_reset_idle_common)
8211:
bf66e3c4 822END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
fb479e44 823#endif
371fefd6 824
a7c1ca19
NP
825 KVMTEST EXC_STD 0x100
826 std r11,PACA_EXNMI+EX_R11(r13)
827 std r12,PACA_EXNMI+EX_R12(r13)
828 GET_SCRATCH0(r10)
829 std r10,PACA_EXNMI+EX_R13(r13)
830
831 EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
c4f3b52c
NP
832 /*
833 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
834 * being used, so a nested NMI exception would corrupt it.
835 */
fb479e44 836
1a6822d1
NP
837EXC_REAL_END(system_reset, 0x100, 0x100)
838EXC_VIRT_NONE(0x4100, 0x100)
6de6638b 839TRAMP_KVM(PACA_EXNMI, 0x100)
fb479e44
NP
840
841#ifdef CONFIG_PPC_P7_NAP
842EXC_COMMON_BEGIN(system_reset_idle_common)
10d91611
NP
843 /*
844 * This must be a direct branch (without linker branch stub) because
845 * we can not use TOC at this point as r2 may not be restored yet.
846 */
847 b idle_return_gpr_loss
371fefd6
PM
848#endif
849
a3d96f70 850EXC_COMMON_BEGIN(system_reset_common)
c4f3b52c
NP
851 /*
852 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
853 * to recover, but nested NMI will notice in_nmi and not recover
854 * because of the use of the NMI stack. in_nmi reentrancy is tested in
855 * system_reset_exception.
856 */
857 lhz r10,PACA_IN_NMI(r13)
858 addi r10,r10,1
859 sth r10,PACA_IN_NMI(r13)
860 li r10,MSR_RI
861 mtmsrd r10,1
aca79d2b 862
b1ee8a3d
NP
863 mr r10,r1
864 ld r1,PACA_NMI_EMERG_SP(r13)
865 subi r1,r1,INT_FRAME_SIZE
47169fba
NP
866 EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100)
867 bl save_nvgprs
868 /*
869 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
870 * the right thing. We do not want to reconcile because that goes
871 * through irq tracing which we don't want in NMI.
872 *
873 * Save PACAIRQHAPPENED because some code will do a hard disable
874 * (e.g., xmon). So we want to restore this back to where it was
875 * when we return. DAR is unused in the stack, so save it there.
876 */
877 li r10,IRQS_ALL_DISABLED
878 stb r10,PACAIRQSOFTMASK(r13)
879 lbz r10,PACAIRQHAPPENED(r13)
880 std r10,_DAR(r1)
881
c06075f3
NP
882 addi r3,r1,STACK_FRAME_OVERHEAD
883 bl system_reset_exception
15b4dd79
NP
884
885 /* This (and MCE) can be simplified with mtmsrd L=1 */
886 /* Clear MSR_RI before setting SRR0 and SRR1. */
887 li r0,MSR_RI
888 mfmsr r9
889 andc r9,r9,r0
890 mtmsrd r9,1
c4f3b52c
NP
891
892 /*
15b4dd79 893 * MSR_RI is clear, now we can decrement paca->in_nmi.
c4f3b52c
NP
894 */
895 lhz r10,PACA_IN_NMI(r13)
896 subi r10,r10,1
897 sth r10,PACA_IN_NMI(r13)
898
15b4dd79
NP
899 /*
900 * Restore soft mask settings.
901 */
902 ld r10,_DAR(r1)
903 stb r10,PACAIRQHAPPENED(r13)
904 ld r10,SOFTE(r1)
905 stb r10,PACAIRQSOFTMASK(r13)
906
907 /*
908 * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
909 * Should share common bits...
910 */
911
912 /* Move original SRR0 and SRR1 into the respective regs */
913 ld r9,_MSR(r1)
914 mtspr SPRN_SRR1,r9
915 ld r3,_NIP(r1)
916 mtspr SPRN_SRR0,r3
917 ld r9,_CTR(r1)
918 mtctr r9
919 ld r9,_XER(r1)
920 mtxer r9
921 ld r9,_LINK(r1)
922 mtlr r9
923 REST_GPR(0, r1)
924 REST_8GPRS(2, r1)
925 REST_GPR(10, r1)
926 ld r11,_CCR(r1)
927 mtcr r11
928 REST_GPR(11, r1)
929 REST_2GPRS(12, r1)
930 /* restore original r1. */
931 ld r1,GPR1(r1)
932 RFI_TO_USER_OR_KERNEL
582baf44
NP
933
934#ifdef CONFIG_PPC_PSERIES
935/*
936 * Vectors for the FWNMI option. Share common code.
937 */
938TRAMP_REAL_BEGIN(system_reset_fwnmi)
939 SET_SCRATCH0(r13) /* save r13 */
fc557537
NP
940 /* See comment at system_reset exception, don't turn on RI */
941 EXCEPTION_PROLOG_0 PACA_EXNMI
942 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0
943 EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
944
582baf44
NP
945#endif /* CONFIG_PPC_PSERIES */
946
0ebc4cda 947
1a6822d1 948EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
b01c8b54
PM
949 /* This is moved out of line as it can be patched by FW, but
950 * some code path might still want to branch into the original
951 * vector
952 */
1707dd16 953 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 954 EXCEPTION_PROLOG_0 PACA_EXMC
1e9b4507 955BEGIN_FTR_SECTION
db7d31ac 956 b machine_check_common_early
1e9b4507 957FTR_SECTION_ELSE
1707dd16 958 b machine_check_pSeries_0
1e9b4507 959ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1a6822d1
NP
960EXC_REAL_END(machine_check, 0x200, 0x100)
961EXC_VIRT_NONE(0x4200, 0x100)
db7d31ac 962TRAMP_REAL_BEGIN(machine_check_common_early)
fa4cf6b7 963 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0
afcf0095
NP
964 /*
965 * Register contents:
966 * R13 = PACA
967 * R9 = CR
968 * Original R9 to R13 is saved on PACA_EXMC
969 *
970 * Switch to mc_emergency stack and handle re-entrancy (we limit
971 * the nested MCE upto level 4 to avoid stack overflow).
972 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
973 *
974 * We use paca->in_mce to check whether this is the first entry or
975 * nested machine check. We increment paca->in_mce to track nested
976 * machine checks.
977 *
978 * If this is the first entry then set stack pointer to
979 * paca->mc_emergency_sp, otherwise r1 is already pointing to
980 * stack frame on mc_emergency stack.
981 *
982 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
983 * checkstop if we get another machine check exception before we do
984 * rfid with MSR_ME=1.
1945bc45
NP
985 *
986 * This interrupt can wake directly from idle. If that is the case,
987 * the machine check is handled then the idle wakeup code is called
2bf1071a 988 * to restore state.
afcf0095
NP
989 */
990 mr r11,r1 /* Save r1 */
991 lhz r10,PACA_IN_MCE(r13)
992 cmpwi r10,0 /* Are we in nested machine check */
993 bne 0f /* Yes, we are. */
994 /* First machine check entry */
995 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
9960: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
997 addi r10,r10,1 /* increment paca->in_mce */
998 sth r10,PACA_IN_MCE(r13)
999 /* Limit nested MCE to level 4 to avoid stack overflow */
ba41e1e1 1000 cmpwi r10,MAX_MCE_DEPTH
afcf0095
NP
1001 bgt 2f /* Check if we hit limit of 4 */
1002 std r11,GPR1(r1) /* Save r1 on the stack. */
1003 std r11,0(r1) /* make stack chain pointer */
1004 mfspr r11,SPRN_SRR0 /* Save SRR0 */
1005 std r11,_NIP(r1)
1006 mfspr r11,SPRN_SRR1 /* Save SRR1 */
1007 std r11,_MSR(r1)
1008 mfspr r11,SPRN_DAR /* Save DAR */
1009 std r11,_DAR(r1)
1010 mfspr r11,SPRN_DSISR /* Save DSISR */
1011 std r11,_DSISR(r1)
1012 std r9,_CCR(r1) /* Save CR in stackframe */
e13e7cd4 1013 /* We don't touch AMR here, we never go to virtual mode */
afcf0095
NP
1014 /* Save r9 through r13 from EXMC save area to stack frame. */
1015 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
1016 mfmsr r11 /* get MSR value */
db7d31ac 1017BEGIN_FTR_SECTION
afcf0095 1018 ori r11,r11,MSR_ME /* turn on ME bit */
db7d31ac 1019END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
1020 ori r11,r11,MSR_RI /* turn on RI bit */
1021 LOAD_HANDLER(r12, machine_check_handle_early)
10221: mtspr SPRN_SRR0,r12
1023 mtspr SPRN_SRR1,r11
222f20f1 1024 RFI_TO_KERNEL
afcf0095
NP
1025 b . /* prevent speculative execution */
10262:
1027 /* Stack overflow. Stay on emergency stack and panic.
1028 * Keep the ME bit off while panic-ing, so that if we hit
1029 * another machine check we checkstop.
1030 */
1031 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
1032 ld r11,PACAKMSR(r13)
1033 LOAD_HANDLER(r12, unrecover_mce)
1034 li r10,MSR_ME
1035 andc r11,r11,r10 /* Turn off MSR_ME */
1036 b 1b
1037 b . /* prevent speculative execution */
afcf0095
NP
1038
1039TRAMP_REAL_BEGIN(machine_check_pSeries)
1040 .globl machine_check_fwnmi
1041machine_check_fwnmi:
1042 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1043 EXCEPTION_PROLOG_0 PACA_EXMC
a43c1590 1044BEGIN_FTR_SECTION
db7d31ac 1045 b machine_check_common_early
a43c1590 1046END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
afcf0095 1047machine_check_pSeries_0:
fa4cf6b7 1048 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 0
afcf0095 1049 /*
83a980f7
NP
1050 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1051 * nested machine check corrupts it. machine_check_common enables
1052 * MSR_RI.
afcf0095 1053 */
2d046308 1054 EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
afcf0095
NP
1055
1056TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
1057
1058EXC_COMMON_BEGIN(machine_check_common)
1059 /*
1060 * Machine check is different because we use a different
1061 * save area: PACA_EXMC instead of PACA_EXGEN.
1062 */
1063 mfspr r10,SPRN_DAR
1064 std r10,PACA_EXMC+EX_DAR(r13)
1065 mfspr r10,SPRN_DSISR
1066 stw r10,PACA_EXMC+EX_DSISR(r13)
d064151f 1067 EXCEPTION_COMMON(PACA_EXMC, 0x200)
afcf0095
NP
1068 FINISH_NAP
1069 RECONCILE_IRQ_STATE(r10, r11)
1070 ld r3,PACA_EXMC+EX_DAR(r13)
1071 lwz r4,PACA_EXMC+EX_DSISR(r13)
1072 /* Enable MSR_RI when finished with PACA_EXMC */
1073 li r10,MSR_RI
1074 mtmsrd r10,1
1075 std r3,_DAR(r1)
1076 std r4,_DSISR(r1)
1077 bl save_nvgprs
1078 addi r3,r1,STACK_FRAME_OVERHEAD
1079 bl machine_check_exception
1080 b ret_from_except
1081
1082#define MACHINE_CHECK_HANDLER_WINDUP \
1083 /* Clear MSR_RI before setting SRR0 and SRR1. */\
1084 li r0,MSR_RI; \
1085 mfmsr r9; /* get MSR value */ \
1086 andc r9,r9,r0; \
1087 mtmsrd r9,1; /* Clear MSR_RI */ \
1088 /* Move original SRR0 and SRR1 into the respective regs */ \
1089 ld r9,_MSR(r1); \
1090 mtspr SPRN_SRR1,r9; \
1091 ld r3,_NIP(r1); \
1092 mtspr SPRN_SRR0,r3; \
1093 ld r9,_CTR(r1); \
1094 mtctr r9; \
1095 ld r9,_XER(r1); \
1096 mtxer r9; \
1097 ld r9,_LINK(r1); \
1098 mtlr r9; \
1099 REST_GPR(0, r1); \
1100 REST_8GPRS(2, r1); \
1101 REST_GPR(10, r1); \
1102 ld r11,_CCR(r1); \
1103 mtcr r11; \
1104 /* Decrement paca->in_mce. */ \
1105 lhz r12,PACA_IN_MCE(r13); \
1106 subi r12,r12,1; \
1107 sth r12,PACA_IN_MCE(r13); \
1108 REST_GPR(11, r1); \
1109 REST_2GPRS(12, r1); \
1110 /* restore original r1. */ \
1111 ld r1,GPR1(r1)
1112
1945bc45
NP
1113#ifdef CONFIG_PPC_P7_NAP
1114/*
1115 * This is an idle wakeup. Low level machine check has already been
1116 * done. Queue the event then call the idle code to do the wake up.
1117 */
1118EXC_COMMON_BEGIN(machine_check_idle_common)
1119 bl machine_check_queue_event
1120
1121 /*
1122 * We have not used any non-volatile GPRs here, and as a rule
1123 * most exception code including machine check does not.
1124 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
1125 * wakeup will restore volatile registers.
1126 *
1127 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
1128 *
1129 * Then decrement MCE nesting after finishing with the stack.
1130 */
1131 ld r3,_MSR(r1)
10d91611 1132 ld r4,_LINK(r1)
1945bc45
NP
1133
1134 lhz r11,PACA_IN_MCE(r13)
1135 subi r11,r11,1
1136 sth r11,PACA_IN_MCE(r13)
1137
10d91611
NP
1138 mtlr r4
1139 rlwinm r10,r3,47-31,30,31
1140 cmpwi cr1,r10,2
1141 bltlr cr1 /* no state loss, return to idle caller */
1142 b idle_return_gpr_loss
1945bc45 1143#endif
afcf0095
NP
1144 /*
1145 * Handle machine check early in real mode. We come here with
1146 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1147 */
1148EXC_COMMON_BEGIN(machine_check_handle_early)
1149 std r0,GPR0(r1) /* Save r0 */
1150 EXCEPTION_PROLOG_COMMON_3(0x200)
1151 bl save_nvgprs
1152 addi r3,r1,STACK_FRAME_OVERHEAD
1153 bl machine_check_early
1154 std r3,RESULT(r1) /* Save result */
1155 ld r12,_MSR(r1)
db7d31ac
MS
1156BEGIN_FTR_SECTION
1157 b 4f
1158END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
1945bc45 1159
afcf0095
NP
1160#ifdef CONFIG_PPC_P7_NAP
1161 /*
1162 * Check if thread was in power saving mode. We come here when any
1163 * of the following is true:
1164 * a. thread wasn't in power saving mode
1165 * b. thread was in power saving mode with no state loss,
1166 * supervisor state loss or hypervisor state loss.
1167 *
1168 * Go back to nap/sleep/winkle mode again if (b) is true.
1169 */
bf66e3c4 1170BEGIN_FTR_SECTION
1945bc45 1171 rlwinm. r11,r12,47-31,30,31
6102c005 1172 bne machine_check_idle_common
bf66e3c4 1173END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
afcf0095 1174#endif
1945bc45 1175
afcf0095
NP
1176 /*
1177 * Check if we are coming from hypervisor userspace. If yes then we
1178 * continue in host kernel in V mode to deliver the MC event.
1179 */
1180 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
1181 beq 5f
db7d31ac 11824: andi. r11,r12,MSR_PR /* See if coming from user. */
afcf0095
NP
1183 bne 9f /* continue in V mode if we are. */
1184
11855:
1186#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
db7d31ac 1187BEGIN_FTR_SECTION
afcf0095
NP
1188 /*
1189 * We are coming from kernel context. Check if we are coming from
1190 * guest. if yes, then we can continue. We will fall through
1191 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1192 */
1193 lbz r11,HSTATE_IN_GUEST(r13)
1194 cmpwi r11,0 /* Check if coming from guest */
1195 bne 9f /* continue if we are. */
db7d31ac 1196END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
1197#endif
1198 /*
1199 * At this point we are not sure about what context we come from.
1200 * Queue up the MCE event and return from the interrupt.
1201 * But before that, check if this is an un-recoverable exception.
1202 * If yes, then stay on emergency stack and panic.
1203 */
1204 andi. r11,r12,MSR_RI
1205 bne 2f
12061: mfspr r11,SPRN_SRR0
1207 LOAD_HANDLER(r10,unrecover_mce)
1208 mtspr SPRN_SRR0,r10
1209 ld r10,PACAKMSR(r13)
1210 /*
1211 * We are going down. But there are chances that we might get hit by
1212 * another MCE during panic path and we may run into unstable state
1213 * with no way out. Hence, turn ME bit off while going down, so that
1214 * when another MCE is hit during panic path, system will checkstop
1215 * and hypervisor will get restarted cleanly by SP.
1216 */
1217 li r3,MSR_ME
1218 andc r10,r10,r3 /* Turn off MSR_ME */
1219 mtspr SPRN_SRR1,r10
222f20f1 1220 RFI_TO_KERNEL
afcf0095
NP
1221 b .
12222:
1223 /*
1224 * Check if we have successfully handled/recovered from error, if not
1225 * then stay on emergency stack and panic.
1226 */
1227 ld r3,RESULT(r1) /* Load result */
1228 cmpdi r3,0 /* see if we handled MCE successfully */
1229
1230 beq 1b /* if !handled then panic */
db7d31ac 1231BEGIN_FTR_SECTION
afcf0095
NP
1232 /*
1233 * Return from MC interrupt.
1234 * Queue up the MCE event so that we can log it later, while
1235 * returning from kernel or opal call.
1236 */
1237 bl machine_check_queue_event
1238 MACHINE_CHECK_HANDLER_WINDUP
222f20f1 1239 RFI_TO_USER_OR_KERNEL
db7d31ac
MS
1240FTR_SECTION_ELSE
1241 /*
1242 * pSeries: Return from MC interrupt. Before that stay on emergency
1243 * stack and call machine_check_exception to log the MCE event.
1244 */
1245 LOAD_HANDLER(r10,mce_return)
1246 mtspr SPRN_SRR0,r10
1247 ld r10,PACAKMSR(r13)
1248 mtspr SPRN_SRR1,r10
1249 RFI_TO_KERNEL
1250 b .
1251ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
12529:
1253 /* Deliver the machine check to host kernel in V mode. */
1254 MACHINE_CHECK_HANDLER_WINDUP
db7d31ac 1255 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1256 EXCEPTION_PROLOG_0 PACA_EXMC
db7d31ac 1257 b machine_check_pSeries_0
afcf0095
NP
1258
1259EXC_COMMON_BEGIN(unrecover_mce)
1260 /* Invoke machine_check_exception to print MCE event and panic. */
1261 addi r3,r1,STACK_FRAME_OVERHEAD
1262 bl machine_check_exception
1263 /*
1264 * We will not reach here. Even if we did, there is no way out. Call
1265 * unrecoverable_exception and die.
1266 */
12671: addi r3,r1,STACK_FRAME_OVERHEAD
1268 bl unrecoverable_exception
1269 b 1b
1270
a43c1590
MS
1271EXC_COMMON_BEGIN(mce_return)
1272 /* Invoke machine_check_exception to print MCE event and return. */
1273 addi r3,r1,STACK_FRAME_OVERHEAD
1274 bl machine_check_exception
db7d31ac 1275 MACHINE_CHECK_HANDLER_WINDUP
a43c1590
MS
1276 RFI_TO_KERNEL
1277 b .
0ebc4cda 1278
e779fc93 1279EXC_REAL_BEGIN(data_access, 0x300, 0x80)
bf66e3c4
NP
1280 SET_SCRATCH0(r13) /* save r13 */
1281 EXCEPTION_PROLOG_0 PACA_EXGEN
e779fc93
NP
1282 b tramp_real_data_access
1283EXC_REAL_END(data_access, 0x300, 0x80)
1284
1285TRAMP_REAL_BEGIN(tramp_real_data_access)
bf66e3c4 1286 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 0
38555434
NP
1287 /*
1288 * DAR/DSISR must be read before setting MSR[RI], because
1289 * a d-side MCE will clobber those registers so is not
1290 * recoverable if they are live.
1291 */
1292 mfspr r10,SPRN_DAR
1293 mfspr r11,SPRN_DSISR
1294 std r10,PACA_EXGEN+EX_DAR(r13)
1295 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1296EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1
e779fc93
NP
1297
1298EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
bf66e3c4
NP
1299 SET_SCRATCH0(r13) /* save r13 */
1300 EXCEPTION_PROLOG_0 PACA_EXGEN
1301 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 0
38555434
NP
1302 mfspr r10,SPRN_DAR
1303 mfspr r11,SPRN_DSISR
1304 std r10,PACA_EXGEN+EX_DAR(r13)
1305 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1306EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD
e779fc93
NP
1307EXC_VIRT_END(data_access, 0x4300, 0x80)
1308
80795e6c
NP
1309TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
1310
1311EXC_COMMON_BEGIN(data_access_common)
1312 /*
1313 * Here r13 points to the paca, r9 contains the saved CR,
1314 * SRR0 and SRR1 are saved in r11 and r12,
1315 * r9 - r13 are saved in paca->exgen.
38555434 1316 * EX_DAR and EX_DSISR have saved DAR/DSISR
80795e6c 1317 */
d064151f 1318 EXCEPTION_COMMON(PACA_EXGEN, 0x300)
80795e6c
NP
1319 RECONCILE_IRQ_STATE(r10, r11)
1320 ld r12,_MSR(r1)
1321 ld r3,PACA_EXGEN+EX_DAR(r13)
1322 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1323 li r5,0x300
1324 std r3,_DAR(r1)
1325 std r4,_DSISR(r1)
1326BEGIN_MMU_FTR_SECTION
1327 b do_hash_page /* Try to handle as hpte fault */
1328MMU_FTR_SECTION_ELSE
1329 b handle_page_fault
1330ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1331
0ebc4cda 1332
1a6822d1 1333EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
bf66e3c4
NP
1334 SET_SCRATCH0(r13) /* save r13 */
1335 EXCEPTION_PROLOG_0 PACA_EXSLB
e779fc93 1336 b tramp_real_data_access_slb
1a6822d1 1337EXC_REAL_END(data_access_slb, 0x380, 0x80)
0ebc4cda 1338
e779fc93 1339TRAMP_REAL_BEGIN(tramp_real_data_access_slb)
bf66e3c4 1340 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 0
38555434
NP
1341 mfspr r10,SPRN_DAR
1342 std r10,PACA_EXSLB+EX_DAR(r13)
bf66e3c4 1343 EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1
e779fc93 1344
1a6822d1 1345EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
bf66e3c4
NP
1346 SET_SCRATCH0(r13) /* save r13 */
1347 EXCEPTION_PROLOG_0 PACA_EXSLB
1348 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 0
38555434
NP
1349 mfspr r10,SPRN_DAR
1350 std r10,PACA_EXSLB+EX_DAR(r13)
bf66e3c4 1351 EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD
1a6822d1 1352EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
48e7b769 1353
2b9af6e4
NP
1354TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
1355
48e7b769 1356EXC_COMMON_BEGIN(data_access_slb_common)
d064151f 1357 EXCEPTION_COMMON(PACA_EXSLB, 0x380)
48e7b769
NP
1358 ld r4,PACA_EXSLB+EX_DAR(r13)
1359 std r4,_DAR(r1)
1360 addi r3,r1,STACK_FRAME_OVERHEAD
7100e870
NP
1361BEGIN_MMU_FTR_SECTION
1362 /* HPT case, do SLB fault */
48e7b769
NP
1363 bl do_slb_fault
1364 cmpdi r3,0
1365 bne- 1f
1366 b fast_exception_return
13671: /* Error case */
7100e870
NP
1368MMU_FTR_SECTION_ELSE
1369 /* Radix case, access is outside page table range */
1370 li r3,-EFAULT
1371ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
48e7b769
NP
1372 std r3,RESULT(r1)
1373 bl save_nvgprs
1374 RECONCILE_IRQ_STATE(r10, r11)
1375 ld r4,_DAR(r1)
1376 ld r5,RESULT(r1)
1377 addi r3,r1,STACK_FRAME_OVERHEAD
1378 bl do_bad_slb_fault
1379 b ret_from_except
1380
2b9af6e4 1381
1a6822d1
NP
1382EXC_REAL(instruction_access, 0x400, 0x80)
1383EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
27ce77df
NP
1384TRAMP_KVM(PACA_EXGEN, 0x400)
1385
1386EXC_COMMON_BEGIN(instruction_access_common)
d064151f 1387 EXCEPTION_COMMON(PACA_EXGEN, 0x400)
27ce77df
NP
1388 RECONCILE_IRQ_STATE(r10, r11)
1389 ld r12,_MSR(r1)
1390 ld r3,_NIP(r1)
475b581f 1391 andis. r4,r12,DSISR_SRR1_MATCH_64S@h
27ce77df
NP
1392 li r5,0x400
1393 std r3,_DAR(r1)
1394 std r4,_DSISR(r1)
1395BEGIN_MMU_FTR_SECTION
1396 b do_hash_page /* Try to handle as hpte fault */
1397MMU_FTR_SECTION_ELSE
1398 b handle_page_fault
1399ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1400
0ebc4cda 1401
fc557537
NP
1402__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB)
1403__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB)
48e7b769 1404TRAMP_KVM(PACA_EXSLB, 0x480)
54be0b9c 1405
48e7b769 1406EXC_COMMON_BEGIN(instruction_access_slb_common)
d064151f 1407 EXCEPTION_COMMON(PACA_EXSLB, 0x480)
48e7b769
NP
1408 ld r4,_NIP(r1)
1409 addi r3,r1,STACK_FRAME_OVERHEAD
7100e870
NP
1410BEGIN_MMU_FTR_SECTION
1411 /* HPT case, do SLB fault */
48e7b769
NP
1412 bl do_slb_fault
1413 cmpdi r3,0
1414 bne- 1f
1415 b fast_exception_return
14161: /* Error case */
7100e870
NP
1417MMU_FTR_SECTION_ELSE
1418 /* Radix case, access is outside page table range */
1419 li r3,-EFAULT
1420ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
48e7b769 1421 std r3,RESULT(r1)
8d04631a 1422 bl save_nvgprs
8d04631a 1423 RECONCILE_IRQ_STATE(r10, r11)
48e7b769
NP
1424 ld r4,_NIP(r1)
1425 ld r5,RESULT(r1)
1426 addi r3,r1,STACK_FRAME_OVERHEAD
1427 bl do_bad_slb_fault
8d04631a
NP
1428 b ret_from_except
1429
48e7b769 1430
1a6822d1 1431EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
80bd9177
NP
1432 SET_SCRATCH0(r13) /* save r13 */
1433 EXCEPTION_PROLOG_0 PACA_EXGEN
bf66e3c4
NP
1434BEGIN_FTR_SECTION
1435 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1436 EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1
1437FTR_SECTION_ELSE
1438 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1439 EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1
1440ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1a6822d1 1441EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
da2bc464 1442
1a6822d1 1443EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
80bd9177
NP
1444 SET_SCRATCH0(r13) /* save r13 */
1445 EXCEPTION_PROLOG_0 PACA_EXGEN
bf66e3c4
NP
1446BEGIN_FTR_SECTION
1447 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1448 EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV
1449FTR_SECTION_ELSE
1450 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1451 EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD
1452ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1a6822d1 1453EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
c138e588 1454
7ede5317
NP
1455TRAMP_KVM(PACA_EXGEN, 0x500)
1456TRAMP_KVM_HV(PACA_EXGEN, 0x500)
c138e588
NP
1457EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
1458
1459
e779fc93 1460EXC_REAL_BEGIN(alignment, 0x600, 0x100)
bf66e3c4
NP
1461 SET_SCRATCH0(r13) /* save r13 */
1462 EXCEPTION_PROLOG_0 PACA_EXGEN
1463 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 0
38555434
NP
1464 mfspr r10,SPRN_DAR
1465 mfspr r11,SPRN_DSISR
1466 std r10,PACA_EXGEN+EX_DAR(r13)
1467 stw r11,PACA_EXGEN+EX_DSISR(r13)
bf66e3c4 1468 EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1
e779fc93
NP
1469EXC_REAL_END(alignment, 0x600, 0x100)
1470
1471EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
bf66e3c4
NP
1472 SET_SCRATCH0(r13) /* save r13 */
1473 EXCEPTION_PROLOG_0 PACA_EXGEN
1474 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 0
38555434
NP
1475 mfspr r10,SPRN_DAR
1476 mfspr r11,SPRN_DSISR
1477 std r10,PACA_EXGEN+EX_DAR(r13)
1478 stw r11,PACA_EXGEN+EX_DSISR(r13)
bf66e3c4 1479 EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD
e779fc93
NP
1480EXC_VIRT_END(alignment, 0x4600, 0x100)
1481
da2bc464 1482TRAMP_KVM(PACA_EXGEN, 0x600)
f9aa6714 1483EXC_COMMON_BEGIN(alignment_common)
d064151f 1484 EXCEPTION_COMMON(PACA_EXGEN, 0x600)
f9aa6714
NP
1485 ld r3,PACA_EXGEN+EX_DAR(r13)
1486 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1487 std r3,_DAR(r1)
1488 std r4,_DSISR(r1)
1489 bl save_nvgprs
1490 RECONCILE_IRQ_STATE(r10, r11)
1491 addi r3,r1,STACK_FRAME_OVERHEAD
1492 bl alignment_exception
1493 b ret_from_except
1494
da2bc464 1495
1a6822d1
NP
1496EXC_REAL(program_check, 0x700, 0x100)
1497EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
da2bc464 1498TRAMP_KVM(PACA_EXGEN, 0x700)
11e87346 1499EXC_COMMON_BEGIN(program_check_common)
265e60a1
CB
1500 /*
1501 * It's possible to receive a TM Bad Thing type program check with
1502 * userspace register values (in particular r1), but with SRR1 reporting
1503 * that we came from the kernel. Normally that would confuse the bad
1504 * stack logic, and we would report a bad kernel stack pointer. Instead
1505 * we switch to the emergency stack if we're taking a TM Bad Thing from
1506 * the kernel.
1507 */
1508 li r10,MSR_PR /* Build a mask of MSR_PR .. */
1509 oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
1510 and r10,r10,r12 /* Mask SRR1 with that. */
1511 srdi r10,r10,8 /* Shift it so we can compare */
1512 cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
1513 bne 1f /* If != go to normal path. */
1514
1515 /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
1516 andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
1517 /* 3 in EXCEPTION_PROLOG_COMMON */
1518 mr r10,r1 /* Save r1 */
1519 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
1520 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1521 b 3f /* Jump into the macro !! */
d064151f 15221: EXCEPTION_COMMON(PACA_EXGEN, 0x700)
11e87346
NP
1523 bl save_nvgprs
1524 RECONCILE_IRQ_STATE(r10, r11)
1525 addi r3,r1,STACK_FRAME_OVERHEAD
1526 bl program_check_exception
1527 b ret_from_except
1528
b01c8b54 1529
1a6822d1
NP
1530EXC_REAL(fp_unavailable, 0x800, 0x100)
1531EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
da2bc464 1532TRAMP_KVM(PACA_EXGEN, 0x800)
c78d9b97 1533EXC_COMMON_BEGIN(fp_unavailable_common)
d064151f 1534 EXCEPTION_COMMON(PACA_EXGEN, 0x800)
c78d9b97
NP
1535 bne 1f /* if from user, just load it up */
1536 bl save_nvgprs
1537 RECONCILE_IRQ_STATE(r10, r11)
1538 addi r3,r1,STACK_FRAME_OVERHEAD
1539 bl kernel_fp_unavailable_exception
1540 BUG_OPCODE
15411:
1542#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1543BEGIN_FTR_SECTION
1544 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1545 * transaction), go do TM stuff
1546 */
1547 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1548 bne- 2f
1549END_FTR_SECTION_IFSET(CPU_FTR_TM)
1550#endif
1551 bl load_up_fpu
1552 b fast_exception_return
1553#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
15542: /* User process was in a transaction */
1555 bl save_nvgprs
1556 RECONCILE_IRQ_STATE(r10, r11)
1557 addi r3,r1,STACK_FRAME_OVERHEAD
1558 bl fp_unavailable_tm
1559 b ret_from_except
1560#endif
1561
a5d4f3ad 1562
a048a07d 1563EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
f14e953b 1564EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
39c0da57
NP
1565TRAMP_KVM(PACA_EXGEN, 0x900)
1566EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
1567
a485c709 1568
1a6822d1
NP
1569EXC_REAL_HV(hdecrementer, 0x980, 0x80)
1570EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
facc6d74
NP
1571TRAMP_KVM_HV(PACA_EXGEN, 0x980)
1572EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
1573
a5d4f3ad 1574
f14e953b
MS
1575EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
1576EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
da2bc464 1577TRAMP_KVM(PACA_EXGEN, 0xa00)
ca243163
NP
1578#ifdef CONFIG_PPC_DOORBELL
1579EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
1580#else
1581EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
1582#endif
1583
0ebc4cda 1584
1a6822d1
NP
1585EXC_REAL(trap_0b, 0xb00, 0x100)
1586EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
da2bc464 1587TRAMP_KVM(PACA_EXGEN, 0xb00)
341215dc
NP
1588EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
1589
acd7d8ce
NP
1590/*
1591 * system call / hypercall (0xc00, 0x4c00)
1592 *
1593 * The system call exception is invoked with "sc 0" and does not alter HV bit.
1594 * There is support for kernel code to invoke system calls but there are no
1595 * in-tree users.
1596 *
1597 * The hypercall is invoked with "sc 1" and sets HV=1.
1598 *
1599 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1600 * 0x4c00 virtual mode.
1601 *
1602 * Call convention:
1603 *
1604 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
1605 *
1606 * For hypercalls, the register convention is as follows:
1607 * r0 volatile
1608 * r1-2 nonvolatile
1609 * r3 volatile parameter and return value for status
1610 * r4-r10 volatile input and output value
1611 * r11 volatile hypercall number and output value
76fc0cfc 1612 * r12 volatile input and output value
acd7d8ce
NP
1613 * r13-r31 nonvolatile
1614 * LR nonvolatile
1615 * CTR volatile
1616 * XER volatile
1617 * CR0-1 CR5-7 volatile
1618 * CR2-4 nonvolatile
1619 * Other registers nonvolatile
1620 *
1621 * The intersection of volatile registers that don't contain possible
76fc0cfc
NP
1622 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1623 * without saving, though xer is not a good idea to use, as hardware may
1624 * interpret some bits so it may be costly to change them.
acd7d8ce 1625 */
1b4d4a79 1626.macro SYSTEM_CALL virt
bc355125 1627#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
acd7d8ce
NP
1628 /*
1629 * There is a little bit of juggling to get syscall and hcall
76fc0cfc
NP
1630 * working well. Save r13 in ctr to avoid using SPRG scratch
1631 * register.
acd7d8ce
NP
1632 *
1633 * Userspace syscalls have already saved the PPR, hcalls must save
1634 * it before setting HMT_MEDIUM.
1635 */
1b4d4a79
NP
1636 mtctr r13
1637 GET_PACA(r13)
1638 std r10,PACA_EXGEN+EX_R10(r13)
1639 INTERRUPT_TO_KERNEL
1640 KVMTEST EXC_STD 0xc00 /* uses r10, branch to do_kvm_0xc00_system_call */
1b4d4a79 1641 mfctr r9
bc355125 1642#else
1b4d4a79
NP
1643 mr r9,r13
1644 GET_PACA(r13)
1645 INTERRUPT_TO_KERNEL
bc355125 1646#endif
d807ad37 1647
727f1361 1648#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1b4d4a79
NP
1649BEGIN_FTR_SECTION
1650 cmpdi r0,0x1ebe
1651 beq- 1f
1652END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1653#endif
d807ad37 1654
b0b2a93d 1655 /* We reach here with PACA in r13, r13 in r9. */
1b4d4a79
NP
1656 mfspr r11,SPRN_SRR0
1657 mfspr r12,SPRN_SRR1
b0b2a93d
NP
1658
1659 HMT_MEDIUM
1660
1661 .if ! \virt
1b4d4a79
NP
1662 __LOAD_HANDLER(r10, system_call_common)
1663 mtspr SPRN_SRR0,r10
1664 ld r10,PACAKMSR(r13)
1665 mtspr SPRN_SRR1,r10
1666 RFI_TO_KERNEL
1667 b . /* prevent speculative execution */
1668 .else
b0b2a93d
NP
1669 li r10,MSR_RI
1670 mtmsrd r10,1 /* Set RI (EE=0) */
1b4d4a79 1671#ifdef CONFIG_RELOCATABLE
1b4d4a79
NP
1672 __LOAD_HANDLER(r10, system_call_common)
1673 mtctr r10
1b4d4a79 1674 bctr
d807ad37 1675#else
1b4d4a79
NP
1676 b system_call_common
1677#endif
1678 .endif
1679
1680#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1681 /* Fast LE/BE switch system call */
16821: mfspr r12,SPRN_SRR1
1683 xori r12,r12,MSR_LE
1684 mtspr SPRN_SRR1,r12
1685 mr r13,r9
1686 RFI_TO_USER /* return to userspace */
1687 b . /* prevent speculative execution */
d807ad37 1688#endif
1b4d4a79 1689.endm
d807ad37 1690
1a6822d1 1691EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1b4d4a79 1692 SYSTEM_CALL 0
1a6822d1 1693EXC_REAL_END(system_call, 0xc00, 0x100)
da2bc464 1694
1a6822d1 1695EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1b4d4a79 1696 SYSTEM_CALL 1
1a6822d1 1697EXC_VIRT_END(system_call, 0x4c00, 0x100)
d807ad37 1698
acd7d8ce
NP
1699#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1700 /*
1701 * This is a hcall, so register convention is as above, with these
1702 * differences:
1703 * r13 = PACA
76fc0cfc
NP
1704 * ctr = orig r13
1705 * orig r10 saved in PACA
acd7d8ce
NP
1706 */
1707TRAMP_KVM_BEGIN(do_kvm_0xc00)
1708 /*
1709 * Save the PPR (on systems that support it) before changing to
1710 * HMT_MEDIUM. That allows the KVM code to save that value into the
1711 * guest state (it is the guest's PPR value).
1712 */
76fc0cfc 1713 OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
acd7d8ce 1714 HMT_MEDIUM
76fc0cfc 1715 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
acd7d8ce 1716 mfctr r10
76fc0cfc 1717 SET_SCRATCH0(r10)
acd7d8ce
NP
1718 std r9,PACA_EXGEN+EX_R9(r13)
1719 mfcr r9
17bdc064 1720 KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0
acd7d8ce 1721#endif
da2bc464 1722
d807ad37 1723
1a6822d1
NP
1724EXC_REAL(single_step, 0xd00, 0x100)
1725EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
da2bc464 1726TRAMP_KVM(PACA_EXGEN, 0xd00)
bc6675c6 1727EXC_COMMON(single_step_common, 0xd00, single_step_exception)
b01c8b54 1728
1a6822d1 1729EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
da0e7e62 1730EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
f5c32c1d
NP
1731TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1732EXC_COMMON_BEGIN(h_data_storage_common)
1733 mfspr r10,SPRN_HDAR
1734 std r10,PACA_EXGEN+EX_DAR(r13)
1735 mfspr r10,SPRN_HDSISR
1736 stw r10,PACA_EXGEN+EX_DSISR(r13)
d064151f 1737 EXCEPTION_COMMON(PACA_EXGEN, 0xe00)
f5c32c1d
NP
1738 bl save_nvgprs
1739 RECONCILE_IRQ_STATE(r10, r11)
1740 addi r3,r1,STACK_FRAME_OVERHEAD
d7b45615
SJS
1741BEGIN_MMU_FTR_SECTION
1742 ld r4,PACA_EXGEN+EX_DAR(r13)
1743 lwz r5,PACA_EXGEN+EX_DSISR(r13)
1744 std r4,_DAR(r1)
1745 std r5,_DSISR(r1)
1746 li r5,SIGSEGV
1747 bl bad_page_fault
1748MMU_FTR_SECTION_ELSE
f5c32c1d 1749 bl unknown_exception
d7b45615 1750ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
f5c32c1d 1751 b ret_from_except
f5c32c1d 1752
1707dd16 1753
1a6822d1 1754EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
da0e7e62 1755EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
82517cab
NP
1756TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1757EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1758
1707dd16 1759
1a6822d1
NP
1760EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1761EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
031b4026
NP
1762TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1763EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1764
1707dd16 1765
e0319829
NP
1766/*
1767 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1768 * first, and then eventaully from there to the trampoline to get into virtual
1769 * mode.
1770 */
1a6822d1 1771__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
f14e953b 1772__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
1a6822d1 1773EXC_VIRT_NONE(0x4e60, 0x20)
62f9b03b
NP
1774TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1775TRAMP_REAL_BEGIN(hmi_exception_early)
fa4cf6b7 1776 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0
a4087a4d
NP
1777 mr r10,r1 /* Save r1 */
1778 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
62f9b03b 1779 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
62f9b03b 1780 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
a4087a4d
NP
1781 mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
1782 EXCEPTION_PROLOG_COMMON_1()
890274c2 1783 /* We don't touch AMR here, we never go to virtual mode */
62f9b03b
NP
1784 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1785 EXCEPTION_PROLOG_COMMON_3(0xe60)
1786 addi r3,r1,STACK_FRAME_OVERHEAD
505a314f 1787 BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
5080332c
MN
1788 cmpdi cr0,r3,0
1789
62f9b03b
NP
1790 /* Windup the stack. */
1791 /* Move original HSRR0 and HSRR1 into the respective regs */
1792 ld r9,_MSR(r1)
1793 mtspr SPRN_HSRR1,r9
1794 ld r3,_NIP(r1)
1795 mtspr SPRN_HSRR0,r3
1796 ld r9,_CTR(r1)
1797 mtctr r9
1798 ld r9,_XER(r1)
1799 mtxer r9
1800 ld r9,_LINK(r1)
1801 mtlr r9
1802 REST_GPR(0, r1)
1803 REST_8GPRS(2, r1)
1804 REST_GPR(10, r1)
1805 ld r11,_CCR(r1)
5080332c
MN
1806 REST_2GPRS(12, r1)
1807 bne 1f
62f9b03b
NP
1808 mtcr r11
1809 REST_GPR(11, r1)
5080332c 1810 ld r1,GPR1(r1)
222f20f1 1811 HRFI_TO_USER_OR_KERNEL
5080332c
MN
1812
18131: mtcr r11
1814 REST_GPR(11, r1)
62f9b03b
NP
1815 ld r1,GPR1(r1)
1816
1817 /*
1818 * Go to virtual mode and pull the HMI event information from
1819 * firmware.
1820 */
1821 .globl hmi_exception_after_realmode
1822hmi_exception_after_realmode:
1823 SET_SCRATCH0(r13)
5dba1d50 1824 EXCEPTION_PROLOG_0 PACA_EXGEN
62f9b03b
NP
1825 b tramp_real_hmi_exception
1826
5080332c 1827EXC_COMMON_BEGIN(hmi_exception_common)
47169fba
NP
1828 EXCEPTION_COMMON(PACA_EXGEN, 0xe60)
1829 FINISH_NAP
1830 bl save_nvgprs
1831 RECONCILE_IRQ_STATE(r10, r11)
1832 RUNLATCH_ON
c06075f3
NP
1833 addi r3,r1,STACK_FRAME_OVERHEAD
1834 bl handle_hmi_exception
1835 b ret_from_except
1707dd16 1836
f14e953b
MS
1837EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1838EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
9bcb81bf
NP
1839TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1840#ifdef CONFIG_PPC_DOORBELL
1841EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1842#else
1843EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1844#endif
1845
0ebc4cda 1846
f14e953b
MS
1847EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1848EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
74408776
NP
1849TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1850EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1851
9baaef0a 1852
1a6822d1
NP
1853EXC_REAL_NONE(0xec0, 0x20)
1854EXC_VIRT_NONE(0x4ec0, 0x20)
1855EXC_REAL_NONE(0xee0, 0x20)
1856EXC_VIRT_NONE(0x4ee0, 0x20)
bda7fea2 1857
0ebc4cda 1858
f442d004
MS
1859EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1860EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
b1c7f150
NP
1861TRAMP_KVM(PACA_EXGEN, 0xf00)
1862EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1863
0ebc4cda 1864
1a6822d1
NP
1865EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1866EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
d1a0ca9c
NP
1867TRAMP_KVM(PACA_EXGEN, 0xf20)
1868EXC_COMMON_BEGIN(altivec_unavailable_common)
d064151f 1869 EXCEPTION_COMMON(PACA_EXGEN, 0xf20)
d1a0ca9c
NP
1870#ifdef CONFIG_ALTIVEC
1871BEGIN_FTR_SECTION
1872 beq 1f
1873#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1874 BEGIN_FTR_SECTION_NESTED(69)
1875 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1876 * transaction), go do TM stuff
1877 */
1878 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1879 bne- 2f
1880 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1881#endif
1882 bl load_up_altivec
1883 b fast_exception_return
1884#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
18852: /* User process was in a transaction */
1886 bl save_nvgprs
1887 RECONCILE_IRQ_STATE(r10, r11)
1888 addi r3,r1,STACK_FRAME_OVERHEAD
1889 bl altivec_unavailable_tm
1890 b ret_from_except
1891#endif
18921:
1893END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1894#endif
1895 bl save_nvgprs
1896 RECONCILE_IRQ_STATE(r10, r11)
1897 addi r3,r1,STACK_FRAME_OVERHEAD
1898 bl altivec_unavailable_exception
1899 b ret_from_except
1900
0ebc4cda 1901
1a6822d1
NP
1902EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1903EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
792cbddd
NP
1904TRAMP_KVM(PACA_EXGEN, 0xf40)
1905EXC_COMMON_BEGIN(vsx_unavailable_common)
d064151f 1906 EXCEPTION_COMMON(PACA_EXGEN, 0xf40)
792cbddd
NP
1907#ifdef CONFIG_VSX
1908BEGIN_FTR_SECTION
1909 beq 1f
1910#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1911 BEGIN_FTR_SECTION_NESTED(69)
1912 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1913 * transaction), go do TM stuff
1914 */
1915 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1916 bne- 2f
1917 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1918#endif
1919 b load_up_vsx
1920#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
19212: /* User process was in a transaction */
1922 bl save_nvgprs
1923 RECONCILE_IRQ_STATE(r10, r11)
1924 addi r3,r1,STACK_FRAME_OVERHEAD
1925 bl vsx_unavailable_tm
1926 b ret_from_except
1927#endif
19281:
1929END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1930#endif
1931 bl save_nvgprs
1932 RECONCILE_IRQ_STATE(r10, r11)
1933 addi r3,r1,STACK_FRAME_OVERHEAD
1934 bl vsx_unavailable_exception
1935 b ret_from_except
1936
da2bc464 1937
1a6822d1
NP
1938EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1939EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1134713c
NP
1940TRAMP_KVM(PACA_EXGEN, 0xf60)
1941EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1942
da2bc464 1943
1a6822d1
NP
1944EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1945EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
14b0072c
NP
1946TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1947EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1948
da2bc464 1949
1a6822d1
NP
1950EXC_REAL_NONE(0xfa0, 0x20)
1951EXC_VIRT_NONE(0x4fa0, 0x20)
1952EXC_REAL_NONE(0xfc0, 0x20)
1953EXC_VIRT_NONE(0x4fc0, 0x20)
1954EXC_REAL_NONE(0xfe0, 0x20)
1955EXC_VIRT_NONE(0x4fe0, 0x20)
1956
1957EXC_REAL_NONE(0x1000, 0x100)
1958EXC_VIRT_NONE(0x5000, 0x100)
1959EXC_REAL_NONE(0x1100, 0x100)
1960EXC_VIRT_NONE(0x5100, 0x100)
d0c0c9a1 1961
0ebc4cda 1962#ifdef CONFIG_CBE_RAS
1a6822d1
NP
1963EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
1964EXC_VIRT_NONE(0x5200, 0x100)
da2bc464 1965TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
ff1b3206 1966EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
da2bc464 1967#else /* CONFIG_CBE_RAS */
1a6822d1
NP
1968EXC_REAL_NONE(0x1200, 0x100)
1969EXC_VIRT_NONE(0x5200, 0x100)
da2bc464 1970#endif
b01c8b54 1971
ff1b3206 1972
1a6822d1
NP
1973EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
1974EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
da2bc464 1975TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
4e96dbbf
NP
1976EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
1977
1a6822d1
NP
1978EXC_REAL_NONE(0x1400, 0x100)
1979EXC_VIRT_NONE(0x5400, 0x100)
da2bc464 1980
1a6822d1 1981EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
b92a66a6 1982 mtspr SPRN_SPRG_HSCRATCH0,r13
5dba1d50 1983 EXCEPTION_PROLOG_0 PACA_EXGEN
fa4cf6b7 1984 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0
b92a66a6
MN
1985
1986#ifdef CONFIG_PPC_DENORMALISATION
1987 mfspr r10,SPRN_HSRR1
afcf0095 1988 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
afcf0095
NP
1989 bne+ denorm_assist
1990#endif
1e9b4507 1991
a7c1ca19 1992 KVMTEST EXC_HV 0x1500
2d046308 1993 EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1
1a6822d1 1994EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
a74599a5 1995
d7e89849 1996#ifdef CONFIG_PPC_DENORMALISATION
1a6822d1 1997EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
d7e89849 1998 b exc_real_0x1500_denorm_exception_hv
1a6822d1 1999EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
d7e89849 2000#else
1a6822d1 2001EXC_VIRT_NONE(0x5500, 0x100)
afcf0095
NP
2002#endif
2003
4bb3c7a0 2004TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
b01c8b54 2005
b92a66a6 2006#ifdef CONFIG_PPC_DENORMALISATION
da2bc464 2007TRAMP_REAL_BEGIN(denorm_assist)
b92a66a6
MN
2008BEGIN_FTR_SECTION
2009/*
2010 * To denormalise we need to move a copy of the register to itself.
2011 * For POWER6 do that here for all FP regs.
2012 */
2013 mfmsr r10
2014 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2015 xori r10,r10,(MSR_FE0|MSR_FE1)
2016 mtmsrd r10
2017 sync
d7c67fb1 2018
f3c8b6c6
NP
2019 .Lreg=0
2020 .rept 32
2021 fmr .Lreg,.Lreg
2022 .Lreg=.Lreg+1
2023 .endr
d7c67fb1 2024
b92a66a6
MN
2025FTR_SECTION_ELSE
2026/*
2027 * To denormalise we need to move a copy of the register to itself.
2028 * For POWER7 do that here for the first 32 VSX registers only.
2029 */
2030 mfmsr r10
2031 oris r10,r10,MSR_VSX@h
2032 mtmsrd r10
2033 sync
d7c67fb1 2034
f3c8b6c6
NP
2035 .Lreg=0
2036 .rept 32
2037 XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2038 .Lreg=.Lreg+1
2039 .endr
d7c67fb1 2040
b92a66a6 2041ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
fb0fce3e
MN
2042
2043BEGIN_FTR_SECTION
2044 b denorm_done
2045END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2046/*
2047 * To denormalise we need to move a copy of the register to itself.
2048 * For POWER8 we need to do that for all 64 VSX registers
2049 */
f3c8b6c6
NP
2050 .Lreg=32
2051 .rept 32
2052 XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2053 .Lreg=.Lreg+1
2054 .endr
2055
fb0fce3e 2056denorm_done:
f14040bc
MN
2057 mfspr r11,SPRN_HSRR0
2058 subi r11,r11,4
b92a66a6
MN
2059 mtspr SPRN_HSRR0,r11
2060 mtcrf 0x80,r9
2061 ld r9,PACA_EXGEN+EX_R9(r13)
44e9309f 2062 RESTORE_PPR_PACA(PACA_EXGEN, r10)
630573c1
PM
2063BEGIN_FTR_SECTION
2064 ld r10,PACA_EXGEN+EX_CFAR(r13)
2065 mtspr SPRN_CFAR,r10
2066END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
b92a66a6
MN
2067 ld r10,PACA_EXGEN+EX_R10(r13)
2068 ld r11,PACA_EXGEN+EX_R11(r13)
2069 ld r12,PACA_EXGEN+EX_R12(r13)
2070 ld r13,PACA_EXGEN+EX_R13(r13)
222f20f1 2071 HRFI_TO_UNKNOWN
b92a66a6
MN
2072 b .
2073#endif
2074
872e2ae4 2075EXC_COMMON(denorm_common, 0x1500, unknown_exception)
d7e89849
NP
2076
2077
2078#ifdef CONFIG_CBE_RAS
1a6822d1
NP
2079EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
2080EXC_VIRT_NONE(0x5600, 0x100)
d7e89849 2081TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
69a79344 2082EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
d7e89849 2083#else /* CONFIG_CBE_RAS */
1a6822d1
NP
2084EXC_REAL_NONE(0x1600, 0x100)
2085EXC_VIRT_NONE(0x5600, 0x100)
d7e89849
NP
2086#endif
2087
69a79344 2088
1a6822d1
NP
2089EXC_REAL(altivec_assist, 0x1700, 0x100)
2090EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
d7e89849 2091TRAMP_KVM(PACA_EXGEN, 0x1700)
b51c079e
NP
2092#ifdef CONFIG_ALTIVEC
2093EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
2094#else
2095EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
2096#endif
2097
d7e89849
NP
2098
2099#ifdef CONFIG_CBE_RAS
1a6822d1
NP
2100EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
2101EXC_VIRT_NONE(0x5800, 0x100)
d7e89849 2102TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
3965f8ab 2103EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
d7e89849 2104#else /* CONFIG_CBE_RAS */
1a6822d1
NP
2105EXC_REAL_NONE(0x1800, 0x100)
2106EXC_VIRT_NONE(0x5800, 0x100)
d7e89849
NP
2107#endif
2108
75eb767e 2109#ifdef CONFIG_PPC_WATCHDOG
2104180a
NP
2110
2111#define MASKED_DEC_HANDLER_LABEL 3f
2112
2113#define MASKED_DEC_HANDLER(_H) \
21143: /* soft-nmi */ \
2115 std r12,PACA_EXGEN+EX_R12(r13); \
2116 GET_SCRATCH0(r10); \
2117 std r10,PACA_EXGEN+EX_R13(r13); \
2d046308 2118 EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1
2104180a 2119
cc491f1d
NP
2120/*
2121 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2122 * stack is one that is usable by maskable interrupts so long as MSR_EE
2123 * remains off. It is used for recovery when something has corrupted the
2124 * normal kernel stack, for example. The "soft NMI" must not use the process
2125 * stack because we want irq disabled sections to avoid touching the stack
2126 * at all (other than PMU interrupts), so use the emergency stack for this,
2127 * and run it entirely with interrupts hard disabled.
2128 */
2104180a
NP
2129EXC_COMMON_BEGIN(soft_nmi_common)
2130 mr r10,r1
2131 ld r1,PACAEMERGSP(r13)
2104180a 2132 subi r1,r1,INT_FRAME_SIZE
47169fba
NP
2133 EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900)
2134 bl save_nvgprs
2135 RECONCILE_IRQ_STATE(r10, r11)
c06075f3
NP
2136 addi r3,r1,STACK_FRAME_OVERHEAD
2137 bl soft_nmi_interrupt
2104180a
NP
2138 b ret_from_except
2139
75eb767e 2140#else /* CONFIG_PPC_WATCHDOG */
2104180a
NP
2141#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
2142#define MASKED_DEC_HANDLER(_H)
75eb767e 2143#endif /* CONFIG_PPC_WATCHDOG */
d7e89849 2144
0ebc4cda 2145/*
fe9e1d54
IM
2146 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2147 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2148 * - If it was a doorbell we return immediately since doorbells are edge
2149 * triggered and won't automatically refire.
0869b6fd
MS
2150 * - If it was a HMI we return immediately since we handled it in realmode
2151 * and it won't refire.
6cc3f91b 2152 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
fe9e1d54 2153 * This is called with r10 containing the value to OR to the paca field.
0ebc4cda 2154 */
4508a74a
NP
2155.macro MASKED_INTERRUPT hsrr
2156 .if \hsrr
2157masked_Hinterrupt:
2158 .else
2159masked_interrupt:
2160 .endif
2161 std r11,PACA_EXGEN+EX_R11(r13)
2162 lbz r11,PACAIRQHAPPENED(r13)
2163 or r11,r11,r10
2164 stb r11,PACAIRQHAPPENED(r13)
2165 cmpwi r10,PACA_IRQ_DEC
2166 bne 1f
2167 lis r10,0x7fff
2168 ori r10,r10,0xffff
2169 mtspr SPRN_DEC,r10
2170 b MASKED_DEC_HANDLER_LABEL
21711: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
2172 beq 2f
2173 .if \hsrr
2174 mfspr r10,SPRN_HSRR1
2175 xori r10,r10,MSR_EE /* clear MSR_EE */
2176 mtspr SPRN_HSRR1,r10
2177 .else
2178 mfspr r10,SPRN_SRR1
2179 xori r10,r10,MSR_EE /* clear MSR_EE */
2180 mtspr SPRN_SRR1,r10
2181 .endif
2182 ori r11,r11,PACA_IRQ_HARD_DIS
2183 stb r11,PACAIRQHAPPENED(r13)
21842: /* done */
2185 mtcrf 0x80,r9
2186 std r1,PACAR1(r13)
2187 ld r9,PACA_EXGEN+EX_R9(r13)
2188 ld r10,PACA_EXGEN+EX_R10(r13)
2189 ld r11,PACA_EXGEN+EX_R11(r13)
2190 /* returns to kernel where r13 must be set up, so don't restore it */
2191 .if \hsrr
2192 HRFI_TO_KERNEL
2193 .else
2194 RFI_TO_KERNEL
2195 .endif
2196 b .
2197 MASKED_DEC_HANDLER(\hsrr\())
2198.endm
57f26649 2199
a048a07d
NP
2200TRAMP_REAL_BEGIN(stf_barrier_fallback)
2201 std r9,PACA_EXRFI+EX_R9(r13)
2202 std r10,PACA_EXRFI+EX_R10(r13)
2203 sync
2204 ld r9,PACA_EXRFI+EX_R9(r13)
2205 ld r10,PACA_EXRFI+EX_R10(r13)
2206 ori 31,31,0
2207 .rept 14
2208 b 1f
22091:
2210 .endr
2211 blr
2212
aa8a5e00
ME
2213TRAMP_REAL_BEGIN(rfi_flush_fallback)
2214 SET_SCRATCH0(r13);
2215 GET_PACA(r13);
78ee9946
ME
2216 std r1,PACA_EXRFI+EX_R12(r13)
2217 ld r1,PACAKSAVE(r13)
aa8a5e00
ME
2218 std r9,PACA_EXRFI+EX_R9(r13)
2219 std r10,PACA_EXRFI+EX_R10(r13)
2220 std r11,PACA_EXRFI+EX_R11(r13)
aa8a5e00
ME
2221 mfctr r9
2222 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
bdcb1aef
NP
2223 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2224 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
aa8a5e00 2225 mtctr r11
15a3204d 2226 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
aa8a5e00
ME
2227
2228 /* order ld/st prior to dcbt stop all streams with flushing */
2229 sync
bdcb1aef
NP
2230
2231 /*
2232 * The load adresses are at staggered offsets within cachelines,
2233 * which suits some pipelines better (on others it should not
2234 * hurt).
2235 */
22361:
2237 ld r11,(0x80 + 8)*0(r10)
2238 ld r11,(0x80 + 8)*1(r10)
2239 ld r11,(0x80 + 8)*2(r10)
2240 ld r11,(0x80 + 8)*3(r10)
2241 ld r11,(0x80 + 8)*4(r10)
2242 ld r11,(0x80 + 8)*5(r10)
2243 ld r11,(0x80 + 8)*6(r10)
2244 ld r11,(0x80 + 8)*7(r10)
2245 addi r10,r10,0x80*8
aa8a5e00
ME
2246 bdnz 1b
2247
2248 mtctr r9
2249 ld r9,PACA_EXRFI+EX_R9(r13)
2250 ld r10,PACA_EXRFI+EX_R10(r13)
2251 ld r11,PACA_EXRFI+EX_R11(r13)
78ee9946 2252 ld r1,PACA_EXRFI+EX_R12(r13)
aa8a5e00
ME
2253 GET_SCRATCH0(r13);
2254 rfid
2255
2256TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2257 SET_SCRATCH0(r13);
2258 GET_PACA(r13);
78ee9946
ME
2259 std r1,PACA_EXRFI+EX_R12(r13)
2260 ld r1,PACAKSAVE(r13)
aa8a5e00
ME
2261 std r9,PACA_EXRFI+EX_R9(r13)
2262 std r10,PACA_EXRFI+EX_R10(r13)
2263 std r11,PACA_EXRFI+EX_R11(r13)
aa8a5e00
ME
2264 mfctr r9
2265 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
bdcb1aef
NP
2266 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2267 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
aa8a5e00 2268 mtctr r11
15a3204d 2269 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
aa8a5e00
ME
2270
2271 /* order ld/st prior to dcbt stop all streams with flushing */
2272 sync
bdcb1aef
NP
2273
2274 /*
2275 * The load adresses are at staggered offsets within cachelines,
2276 * which suits some pipelines better (on others it should not
2277 * hurt).
2278 */
22791:
2280 ld r11,(0x80 + 8)*0(r10)
2281 ld r11,(0x80 + 8)*1(r10)
2282 ld r11,(0x80 + 8)*2(r10)
2283 ld r11,(0x80 + 8)*3(r10)
2284 ld r11,(0x80 + 8)*4(r10)
2285 ld r11,(0x80 + 8)*5(r10)
2286 ld r11,(0x80 + 8)*6(r10)
2287 ld r11,(0x80 + 8)*7(r10)
2288 addi r10,r10,0x80*8
aa8a5e00
ME
2289 bdnz 1b
2290
2291 mtctr r9
2292 ld r9,PACA_EXRFI+EX_R9(r13)
2293 ld r10,PACA_EXRFI+EX_R10(r13)
2294 ld r11,PACA_EXRFI+EX_R11(r13)
78ee9946 2295 ld r1,PACA_EXRFI+EX_R12(r13)
aa8a5e00
ME
2296 GET_SCRATCH0(r13);
2297 hrfid
2298
57f26649
NP
2299/*
2300 * Real mode exceptions actually use this too, but alternate
2301 * instruction code patches (which end up in the common .text area)
2302 * cannot reach these if they are put there.
2303 */
2304USE_FIXED_SECTION(virt_trampolines)
4508a74a
NP
2305 MASKED_INTERRUPT EXC_STD
2306 MASKED_INTERRUPT EXC_HV
0ebc4cda 2307
4f6c11db 2308#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
da2bc464 2309TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
4f6c11db
PM
2310 /*
2311 * Here all GPRs are unchanged from when the interrupt happened
2312 * except for r13, which is saved in SPRG_SCRATCH0.
2313 */
2314 mfspr r13, SPRN_SRR0
2315 addi r13, r13, 4
2316 mtspr SPRN_SRR0, r13
2317 GET_SCRATCH0(r13)
222f20f1 2318 RFI_TO_KERNEL
4f6c11db
PM
2319 b .
2320
da2bc464 2321TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
4f6c11db
PM
2322 /*
2323 * Here all GPRs are unchanged from when the interrupt happened
2324 * except for r13, which is saved in SPRG_SCRATCH0.
2325 */
2326 mfspr r13, SPRN_HSRR0
2327 addi r13, r13, 4
2328 mtspr SPRN_HSRR0, r13
2329 GET_SCRATCH0(r13)
222f20f1 2330 HRFI_TO_KERNEL
4f6c11db
PM
2331 b .
2332#endif
2333
0ebc4cda 2334/*
057b6d7e
HB
2335 * Ensure that any handlers that get invoked from the exception prologs
2336 * above are below the first 64KB (0x10000) of the kernel image because
2337 * the prologs assemble the addresses of these handlers using the
2338 * LOAD_HANDLER macro, which uses an ori instruction.
0ebc4cda
BH
2339 */
2340
2341/*** Common interrupt handlers ***/
2342
0ebc4cda 2343
c1fb6816
MN
2344 /*
2345 * Relocation-on interrupts: A subset of the interrupts can be delivered
2346 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
2347 * it. Addresses are the same as the original interrupt addresses, but
2348 * offset by 0xc000000000004000.
2349 * It's impossible to receive interrupts below 0x300 via this mechanism.
2350 * KVM: None of these traps are from the guest ; anything that escalated
2351 * to HV=1 from HV=0 is delivered via real mode handlers.
2352 */
2353
2354 /*
2355 * This uses the standard macro, since the original 0x300 vector
2356 * only has extra guff for STAB-based processors -- which never
2357 * come here.
2358 */
da2bc464 2359
57f26649 2360EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
b1576fec 2361 b __ppc64_runlatch_on
fe1952fc 2362
57f26649 2363USE_FIXED_SECTION(virt_trampolines)
8ed8ab40
HB
2364 /*
2365 * The __end_interrupts marker must be past the out-of-line (OOL)
2366 * handlers, so that they are copied to real address 0x100 when running
2367 * a relocatable kernel. This ensures they can be reached from the short
2368 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
2369 * directly, without using LOAD_HANDLER().
2370 */
2371 .align 7
2372 .globl __end_interrupts
2373__end_interrupts:
57f26649 2374DEFINE_FIXED_SYMBOL(__end_interrupts)
61383407 2375
087aa036 2376#ifdef CONFIG_PPC_970_NAP
7c8cb4b5 2377EXC_COMMON_BEGIN(power4_fixup_nap)
087aa036
CG
2378 andc r9,r9,r10
2379 std r9,TI_LOCAL_FLAGS(r11)
2380 ld r10,_LINK(r1) /* make idle task do the */
2381 std r10,_NIP(r1) /* equivalent of a blr */
2382 blr
2383#endif
2384
57f26649
NP
2385CLOSE_FIXED_SECTION(real_vectors);
2386CLOSE_FIXED_SECTION(real_trampolines);
2387CLOSE_FIXED_SECTION(virt_vectors);
2388CLOSE_FIXED_SECTION(virt_trampolines);
2389
2390USE_TEXT_SECTION()
2391
0ebc4cda
BH
2392/*
2393 * Hash table stuff
2394 */
f4329f2e 2395 .balign IFETCH_ALIGN_BYTES
6a3bab90 2396do_hash_page:
4e003747 2397#ifdef CONFIG_PPC_BOOK3S_64
e6c2a479 2398 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
398a719d
BH
2399 ori r0,r0,DSISR_BAD_FAULT_64S@l
2400 and. r0,r4,r0 /* weird error? */
0ebc4cda 2401 bne- handle_page_fault /* if not, try to insert a HPTE */
c911d2e1 2402 ld r11, PACA_THREAD_INFO(r13)
9c1e1052
PM
2403 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
2404 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
2405 bne 77f /* then don't call hash_page now */
0ebc4cda
BH
2406
2407 /*
2408 * r3 contains the faulting address
106713a1 2409 * r4 msr
0ebc4cda 2410 * r5 contains the trap number
aefa5688 2411 * r6 contains dsisr
0ebc4cda 2412 *
7230c564 2413 * at return r3 = 0 for success, 1 for page fault, negative for error
0ebc4cda 2414 */
106713a1 2415 mr r4,r12
aefa5688 2416 ld r6,_DSISR(r1)
106713a1
AK
2417 bl __hash_page /* build HPTE if possible */
2418 cmpdi r3,0 /* see if __hash_page succeeded */
0ebc4cda 2419
7230c564 2420 /* Success */
0ebc4cda 2421 beq fast_exc_return_irq /* Return from exception on success */
0ebc4cda 2422
7230c564
BH
2423 /* Error */
2424 blt- 13f
d89ba535
NR
2425
2426 /* Reload DSISR into r4 for the DABR check below */
2427 ld r4,_DSISR(r1)
4e003747 2428#endif /* CONFIG_PPC_BOOK3S_64 */
9c7cc234 2429
0ebc4cda
BH
2430/* Here we have a page fault that hash_page can't handle. */
2431handle_page_fault:
d89ba535
NR
243211: andis. r0,r4,DSISR_DABRMATCH@h
2433 bne- handle_dabr_fault
2434 ld r4,_DAR(r1)
0ebc4cda
BH
2435 ld r5,_DSISR(r1)
2436 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 2437 bl do_page_fault
0ebc4cda 2438 cmpdi r3,0
f474c28f 2439 beq+ ret_from_except_lite
b1576fec 2440 bl save_nvgprs
0ebc4cda
BH
2441 mr r5,r3
2442 addi r3,r1,STACK_FRAME_OVERHEAD
2443 lwz r4,_DAR(r1)
b1576fec
AB
2444 bl bad_page_fault
2445 b ret_from_except
0ebc4cda 2446
a546498f
BH
2447/* We have a data breakpoint exception - handle it */
2448handle_dabr_fault:
b1576fec 2449 bl save_nvgprs
a546498f
BH
2450 ld r4,_DAR(r1)
2451 ld r5,_DSISR(r1)
2452 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 2453 bl do_break
f474c28f
RB
2454 /*
2455 * do_break() may have changed the NV GPRS while handling a breakpoint.
2456 * If so, we need to restore them with their updated values. Don't use
2457 * ret_from_except_lite here.
2458 */
2459 b ret_from_except
a546498f 2460
0ebc4cda 2461
4e003747 2462#ifdef CONFIG_PPC_BOOK3S_64
0ebc4cda
BH
2463/* We have a page fault that hash_page could handle but HV refused
2464 * the PTE insertion
2465 */
b1576fec 246613: bl save_nvgprs
0ebc4cda
BH
2467 mr r5,r3
2468 addi r3,r1,STACK_FRAME_OVERHEAD
2469 ld r4,_DAR(r1)
b1576fec
AB
2470 bl low_hash_fault
2471 b ret_from_except
caca285e 2472#endif
0ebc4cda 2473
9c1e1052
PM
2474/*
2475 * We come here as a result of a DSI at a point where we don't want
2476 * to call hash_page, such as when we are accessing memory (possibly
2477 * user memory) inside a PMU interrupt that occurred while interrupts
2478 * were soft-disabled. We want to invoke the exception handler for
2479 * the access, or panic if there isn't a handler.
2480 */
b1576fec 248177: bl save_nvgprs
9c1e1052
PM
2482 mr r4,r3
2483 addi r3,r1,STACK_FRAME_OVERHEAD
2484 li r5,SIGSEGV
b1576fec
AB
2485 bl bad_page_fault
2486 b ret_from_except
4e2bf01b
ME
2487
2488/*
2489 * Here we have detected that the kernel stack pointer is bad.
2490 * R9 contains the saved CR, r13 points to the paca,
2491 * r10 contains the (bad) kernel stack pointer,
2492 * r11 and r12 contain the saved SRR0 and SRR1.
2493 * We switch to using an emergency stack, save the registers there,
2494 * and call kernel_bad_stack(), which panics.
2495 */
2496bad_stack:
2497 ld r1,PACAEMERGSP(r13)
2498 subi r1,r1,64+INT_FRAME_SIZE
2499 std r9,_CCR(r1)
2500 std r10,GPR1(r1)
2501 std r11,_NIP(r1)
2502 std r12,_MSR(r1)
2503 mfspr r11,SPRN_DAR
2504 mfspr r12,SPRN_DSISR
2505 std r11,_DAR(r1)
2506 std r12,_DSISR(r1)
2507 mflr r10
2508 mfctr r11
2509 mfxer r12
2510 std r10,_LINK(r1)
2511 std r11,_CTR(r1)
2512 std r12,_XER(r1)
2513 SAVE_GPR(0,r1)
2514 SAVE_GPR(2,r1)
2515 ld r10,EX_R3(r3)
2516 std r10,GPR3(r1)
2517 SAVE_GPR(4,r1)
2518 SAVE_4GPRS(5,r1)
2519 ld r9,EX_R9(r3)
2520 ld r10,EX_R10(r3)
2521 SAVE_2GPRS(9,r1)
2522 ld r9,EX_R11(r3)
2523 ld r10,EX_R12(r3)
2524 ld r11,EX_R13(r3)
2525 std r9,GPR11(r1)
2526 std r10,GPR12(r1)
2527 std r11,GPR13(r1)
2528BEGIN_FTR_SECTION
2529 ld r10,EX_CFAR(r3)
2530 std r10,ORIG_GPR3(r1)
2531END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2532 SAVE_8GPRS(14,r1)
2533 SAVE_10GPRS(22,r1)
2534 lhz r12,PACA_TRAP_SAVE(r13)
2535 std r12,_TRAP(r1)
2536 addi r11,r1,INT_FRAME_SIZE
2537 std r11,0(r1)
2538 li r12,0
2539 std r12,0(r11)
2540 ld r2,PACATOC(r13)
2541 ld r11,exception_marker@toc(r2)
2542 std r12,RESULT(r1)
2543 std r11,STACK_FRAME_OVERHEAD-16(r1)
25441: addi r3,r1,STACK_FRAME_OVERHEAD
2545 bl kernel_bad_stack
2546 b 1b
15770a13 2547_ASM_NOKPROBE_SYMBOL(bad_stack);
0f0c6ca1 2548
a9af97aa
NP
2549/*
2550 * When doorbell is triggered from system reset wakeup, the message is
2551 * not cleared, so it would fire again when EE is enabled.
2552 *
2553 * When coming from local_irq_enable, there may be the same problem if
2554 * we were hard disabled.
2555 *
2556 * Execute msgclr to clear pending exceptions before handling it.
2557 */
2558h_doorbell_common_msgclr:
2559 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2560 PPC_MSGCLR(3)
2561 b h_doorbell_common
2562
2563doorbell_super_common_msgclr:
2564 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2565 PPC_MSGCLRP(3)
2566 b doorbell_super_common
2567
0f0c6ca1
NP
2568/*
2569 * Called from arch_local_irq_enable when an interrupt needs
2570 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
2571 * which kind of interrupt. MSR:EE is already off. We generate a
2572 * stackframe like if a real interrupt had happened.
2573 *
2574 * Note: While MSR:EE is off, we need to make sure that _MSR
2575 * in the generated frame has EE set to 1 or the exception
2576 * handler will not properly re-enable them.
b48bbb82
NP
2577 *
2578 * Note that we don't specify LR as the NIP (return address) for
2579 * the interrupt because that would unbalance the return branch
2580 * predictor.
0f0c6ca1
NP
2581 */
2582_GLOBAL(__replay_interrupt)
2583 /* We are going to jump to the exception common code which
2584 * will retrieve various register values from the PACA which
2585 * we don't give a damn about, so we don't bother storing them.
2586 */
2587 mfmsr r12
3e23a12b 2588 LOAD_REG_ADDR(r11, replay_interrupt_return)
0f0c6ca1
NP
2589 mfcr r9
2590 ori r12,r12,MSR_EE
2591 cmpwi r3,0x900
2592 beq decrementer_common
2593 cmpwi r3,0x500
e6c1203d
NP
2594BEGIN_FTR_SECTION
2595 beq h_virt_irq_common
2596FTR_SECTION_ELSE
0f0c6ca1 2597 beq hardware_interrupt_common
e6c1203d 2598ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
f442d004
MS
2599 cmpwi r3,0xf00
2600 beq performance_monitor_common
0f0c6ca1 2601BEGIN_FTR_SECTION
d6f73fc6 2602 cmpwi r3,0xa00
a9af97aa 2603 beq h_doorbell_common_msgclr
0f0c6ca1
NP
2604 cmpwi r3,0xe60
2605 beq hmi_exception_common
2606FTR_SECTION_ELSE
2607 cmpwi r3,0xa00
a9af97aa 2608 beq doorbell_super_common_msgclr
0f0c6ca1 2609ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
3e23a12b 2610replay_interrupt_return:
0f0c6ca1 2611 blr
b48bbb82 2612
15770a13 2613_ASM_NOKPROBE_SYMBOL(__replay_interrupt)