powerpc/64s/exception: simplify hmi windup code
[linux-2.6-block.git] / arch / powerpc / kernel / exceptions-64s.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0ebc4cda
BH
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
25985edc 9 * position dependent assembly.
0ebc4cda
BH
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
7230c564 16#include <asm/hw_irq.h>
8aa34ab8 17#include <asm/exception-64s.h>
46f52210 18#include <asm/ptrace.h>
7cba160a 19#include <asm/cpuidle.h>
da2bc464 20#include <asm/head-64.h>
2c86cd18 21#include <asm/feature-fixups.h>
890274c2 22#include <asm/kup.h>
8aa34ab8 23
15820091
NP
24/* PACA save area offsets (exgen, exmc, etc) */
25#define EX_R9 0
26#define EX_R10 8
27#define EX_R11 16
28#define EX_R12 24
29#define EX_R13 32
30#define EX_DAR 40
31#define EX_DSISR 48
32#define EX_CCR 52
33#define EX_CFAR 56
34#define EX_PPR 64
35#if defined(CONFIG_RELOCATABLE)
36#define EX_CTR 72
37.if EX_SIZE != 10
38 .error "EX_SIZE is wrong"
39.endif
40#else
41.if EX_SIZE != 9
42 .error "EX_SIZE is wrong"
43.endif
44#endif
45
12a04809
NP
46/*
47 * We're short on space and time in the exception prolog, so we can't
48 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
49 * Instead we get the base of the kernel from paca->kernelbase and or in the low
50 * part of label. This requires that the label be within 64KB of kernelbase, and
51 * that kernelbase be 64K aligned.
52 */
53#define LOAD_HANDLER(reg, label) \
54 ld reg,PACAKBASE(r13); /* get high part of &label */ \
55 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
56
57#define __LOAD_HANDLER(reg, label) \
58 ld reg,PACAKBASE(r13); \
59 ori reg,reg,(ABS_ADDR(label))@l
60
61/*
62 * Branches from unrelocated code (e.g., interrupts) to labels outside
63 * head-y require >64K offsets.
64 */
65#define __LOAD_FAR_HANDLER(reg, label) \
66 ld reg,PACAKBASE(r13); \
67 ori reg,reg,(ABS_ADDR(label))@l; \
68 addis reg,reg,(ABS_ADDR(label))@h
69
70/* Exception register prefixes */
71#define EXC_HV 1
72#define EXC_STD 0
73
74#if defined(CONFIG_RELOCATABLE)
75/*
76 * If we support interrupts with relocation on AND we're a relocatable kernel,
77 * we need to use CTR to get to the 2nd level handler. So, save/restore it
78 * when required.
79 */
80#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
81#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
82#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
83#else
84/* ...else CTR is unused and in register. */
85#define SAVE_CTR(reg, area)
86#define GET_CTR(reg, area) mfctr reg
87#define RESTORE_CTR(reg, area)
88#endif
89
90/*
91 * PPR save/restore macros used in exceptions-64s.S
92 * Used for P7 or later processors
93 */
94#define SAVE_PPR(area, ra) \
95BEGIN_FTR_SECTION_NESTED(940) \
96 ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
97 std ra,_PPR(r1); \
98END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
99
100#define RESTORE_PPR_PACA(area, ra) \
101BEGIN_FTR_SECTION_NESTED(941) \
102 ld ra,area+EX_PPR(r13); \
103 mtspr SPRN_PPR,ra; \
104END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
105
106/*
107 * Get an SPR into a register if the CPU has the given feature
108 */
109#define OPT_GET_SPR(ra, spr, ftr) \
110BEGIN_FTR_SECTION_NESTED(943) \
111 mfspr ra,spr; \
112END_FTR_SECTION_NESTED(ftr,ftr,943)
113
114/*
115 * Set an SPR from a register if the CPU has the given feature
116 */
117#define OPT_SET_SPR(ra, spr, ftr) \
118BEGIN_FTR_SECTION_NESTED(943) \
119 mtspr spr,ra; \
120END_FTR_SECTION_NESTED(ftr,ftr,943)
121
122/*
123 * Save a register to the PACA if the CPU has the given feature
124 */
125#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
126BEGIN_FTR_SECTION_NESTED(943) \
127 std ra,offset(r13); \
128END_FTR_SECTION_NESTED(ftr,ftr,943)
129
130.macro EXCEPTION_PROLOG_0 area
131 GET_PACA(r13)
132 std r9,\area\()+EX_R9(r13) /* save r9 */
133 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
134 HMT_MEDIUM
135 std r10,\area\()+EX_R10(r13) /* save r10 - r12 */
136 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
137.endm
138
139.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, bitmask
140 OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
141 OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
142 INTERRUPT_TO_KERNEL
143 SAVE_CTR(r10, \area\())
144 mfcr r9
145 .if \kvm
146 KVMTEST \hsrr \vec
147 .endif
148 .if \bitmask
149 lbz r10,PACAIRQSOFTMASK(r13)
150 andi. r10,r10,\bitmask
151 /* Associate vector numbers with bits in paca->irq_happened */
152 .if \vec == 0x500 || \vec == 0xea0
153 li r10,PACA_IRQ_EE
154 .elseif \vec == 0x900
155 li r10,PACA_IRQ_DEC
156 .elseif \vec == 0xa00 || \vec == 0xe80
157 li r10,PACA_IRQ_DBELL
158 .elseif \vec == 0xe60
159 li r10,PACA_IRQ_HMI
160 .elseif \vec == 0xf00
161 li r10,PACA_IRQ_PMI
162 .else
163 .abort "Bad maskable vector"
164 .endif
165
166 .if \hsrr
167 bne masked_Hinterrupt
168 .else
169 bne masked_interrupt
170 .endif
171 .endif
172
173 std r11,\area\()+EX_R11(r13)
174 std r12,\area\()+EX_R12(r13)
175 GET_SCRATCH0(r10)
176 std r10,\area\()+EX_R13(r13)
177.endm
178
179.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri
180 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
181 .if ! \set_ri
182 xori r10,r10,MSR_RI /* Clear MSR_RI */
183 .endif
184 .if \hsrr
185 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
63d60d0c
NP
186 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
187 mtspr SPRN_HSRR1,r10
12a04809
NP
188 .else
189 mfspr r11,SPRN_SRR0 /* save SRR0 */
63d60d0c
NP
190 mfspr r12,SPRN_SRR1 /* and SRR1 */
191 mtspr SPRN_SRR1,r10
12a04809 192 .endif
63d60d0c 193 LOAD_HANDLER(r10, \label\())
12a04809 194 .if \hsrr
63d60d0c 195 mtspr SPRN_HSRR0,r10
12a04809
NP
196 HRFI_TO_KERNEL
197 .else
63d60d0c 198 mtspr SPRN_SRR0,r10
12a04809
NP
199 RFI_TO_KERNEL
200 .endif
201 b . /* prevent speculative execution */
202.endm
203
204.macro EXCEPTION_PROLOG_2_VIRT label, hsrr
205#ifdef CONFIG_RELOCATABLE
206 .if \hsrr
207 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
208 .else
209 mfspr r11,SPRN_SRR0 /* save SRR0 */
210 .endif
211 LOAD_HANDLER(r12, \label\())
212 mtctr r12
213 .if \hsrr
214 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
215 .else
216 mfspr r12,SPRN_SRR1 /* and HSRR1 */
217 .endif
218 li r10,MSR_RI
219 mtmsrd r10,1 /* Set RI (EE=0) */
220 bctr
221#else
222 .if \hsrr
223 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
224 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
225 .else
226 mfspr r11,SPRN_SRR0 /* save SRR0 */
227 mfspr r12,SPRN_SRR1 /* and SRR1 */
228 .endif
229 li r10,MSR_RI
230 mtmsrd r10,1 /* Set RI (EE=0) */
231 b \label
232#endif
233.endm
234
235/*
236 * Branch to label using its 0xC000 address. This results in instruction
237 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
238 * on using mtmsr rather than rfid.
239 *
240 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
241 * load KBASE for a slight optimisation.
242 */
243#define BRANCH_TO_C000(reg, label) \
244 __LOAD_HANDLER(reg, label); \
245 mtctr reg; \
246 bctr
247
248#ifdef CONFIG_RELOCATABLE
12a04809
NP
249#define BRANCH_LINK_TO_FAR(label) \
250 __LOAD_FAR_HANDLER(r12, label); \
251 mtctr r12; \
252 bctrl
253
254#else
12a04809
NP
255#define BRANCH_LINK_TO_FAR(label) \
256 bl label
257#endif
258
259#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
12a04809
NP
260#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
261/*
262 * If hv is possible, interrupts come into to the hv version
263 * of the kvmppc_interrupt code, which then jumps to the PR handler,
264 * kvmppc_interrupt_pr, if the guest is a PR guest.
265 */
266#define kvmppc_interrupt kvmppc_interrupt_hv
267#else
268#define kvmppc_interrupt kvmppc_interrupt_pr
269#endif
270
271.macro KVMTEST hsrr, n
272 lbz r10,HSTATE_IN_GUEST(r13)
273 cmpwi r10,0
274 .if \hsrr
275 bne do_kvm_H\n
276 .else
277 bne do_kvm_\n
278 .endif
279.endm
280
281.macro KVM_HANDLER area, hsrr, n, skip
282 .if \skip
283 cmpwi r10,KVM_GUEST_MODE_SKIP
284 beq 89f
285 .else
bf66e3c4 286BEGIN_FTR_SECTION_NESTED(947)
12a04809
NP
287 ld r10,\area+EX_CFAR(r13)
288 std r10,HSTATE_CFAR(r13)
bf66e3c4 289END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
12a04809
NP
290 .endif
291
bf66e3c4 292BEGIN_FTR_SECTION_NESTED(948)
12a04809
NP
293 ld r10,\area+EX_PPR(r13)
294 std r10,HSTATE_PPR(r13)
bf66e3c4 295END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
12a04809
NP
296 ld r10,\area+EX_R10(r13)
297 std r12,HSTATE_SCRATCH0(r13)
298 sldi r12,r9,32
299 /* HSRR variants have the 0x2 bit added to their trap number */
300 .if \hsrr
301 ori r12,r12,(\n + 0x2)
302 .else
303 ori r12,r12,(\n)
304 .endif
64e41351
NP
305
306#ifdef CONFIG_RELOCATABLE
307 /*
308 * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
309 * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
310 * to be saved in HSTATE_SCRATCH1.
311 */
312 mfctr r9
313 std r9,HSTATE_SCRATCH1(r13)
314 __LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
315 mtctr r9
316 ld r9,\area+EX_R9(r13)
317 bctr
318#else
319 ld r9,\area+EX_R9(r13)
320 b kvmppc_interrupt
321#endif
322
12a04809
NP
323
324 .if \skip
32589: mtocrf 0x80,r9
326 ld r9,\area+EX_R9(r13)
327 ld r10,\area+EX_R10(r13)
328 .if \hsrr
329 b kvmppc_skip_Hinterrupt
330 .else
331 b kvmppc_skip_interrupt
332 .endif
333 .endif
334.endm
335
336#else
337.macro KVMTEST hsrr, n
338.endm
339.macro KVM_HANDLER area, hsrr, n, skip
340.endm
341#endif
342
343#define EXCEPTION_PROLOG_COMMON_1() \
344 std r9,_CCR(r1); /* save CR in stackframe */ \
345 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
346 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
347 std r10,0(r1); /* make stack chain pointer */ \
348 std r0,GPR0(r1); /* save r0 in stackframe */ \
349 std r10,GPR1(r1); /* save r1 in stackframe */ \
350
12a04809
NP
351/* Save original regs values from save area to stack frame. */
352#define EXCEPTION_PROLOG_COMMON_2(area) \
353 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
354 ld r10,area+EX_R10(r13); \
355 std r9,GPR9(r1); \
356 std r10,GPR10(r1); \
357 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
358 ld r10,area+EX_R12(r13); \
359 ld r11,area+EX_R13(r13); \
360 std r9,GPR11(r1); \
361 std r10,GPR12(r1); \
362 std r11,GPR13(r1); \
bf66e3c4 363BEGIN_FTR_SECTION_NESTED(66); \
12a04809
NP
364 ld r10,area+EX_CFAR(r13); \
365 std r10,ORIG_GPR3(r1); \
bf66e3c4 366END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
12a04809
NP
367 GET_CTR(r10, area); \
368 std r10,_CTR(r1);
369
d064151f 370#define EXCEPTION_PROLOG_COMMON_3(trap) \
12a04809
NP
371 std r2,GPR2(r1); /* save r2 in stackframe */ \
372 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
373 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
374 mflr r9; /* Get LR, later save to stack */ \
375 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
376 std r9,_LINK(r1); \
377 lbz r10,PACAIRQSOFTMASK(r13); \
378 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
379 std r10,SOFTE(r1); \
380 std r11,_XER(r1); \
d064151f 381 li r9,(trap)+1; \
12a04809
NP
382 std r9,_TRAP(r1); /* set trap number */ \
383 li r10,0; \
384 ld r11,exception_marker@toc(r2); \
385 std r10,RESULT(r1); /* clear regs->result */ \
386 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
387
d064151f
NP
388/*
389 * On entry r13 points to the paca, r9-r13 are saved in the paca,
390 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
391 * SRR1, and relocation is on.
392 */
393#define EXCEPTION_COMMON(area, trap) \
394 andi. r10,r12,MSR_PR; /* See if coming from user */ \
395 mr r10,r1; /* Save r1 */ \
396 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
397 beq- 1f; \
398 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
3991: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
400 blt+ cr1,3f; /* abort if it is */ \
401 li r1,(trap); /* will be reloaded later */ \
402 sth r1,PACA_TRAP_SAVE(r13); \
403 std r3,area+EX_R3(r13); \
404 addi r3,r13,area; /* r3 -> where regs are saved*/ \
405 RESTORE_CTR(r1, area); \
406 b bad_stack; \
4073: EXCEPTION_PROLOG_COMMON_1(); \
408 kuap_save_amr_and_lock r9, r10, cr1, cr0; \
409 beq 4f; /* if from kernel mode */ \
410 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
411 SAVE_PPR(area, r9); \
4124: EXCEPTION_PROLOG_COMMON_2(area); \
413 EXCEPTION_PROLOG_COMMON_3(trap); \
414 ACCOUNT_STOLEN_TIME
12a04809 415
12a04809
NP
416
417/*
d064151f
NP
418 * Exception where stack is already set in r1, r1 is saved in r10.
419 * PPR save and CPU accounting is not done (for some reason).
12a04809
NP
420 */
421#define EXCEPTION_COMMON_STACK(area, trap) \
422 EXCEPTION_PROLOG_COMMON_1(); \
423 kuap_save_amr_and_lock r9, r10, cr1; \
424 EXCEPTION_PROLOG_COMMON_2(area); \
425 EXCEPTION_PROLOG_COMMON_3(trap)
426
d064151f
NP
427
428#define RUNLATCH_ON \
429BEGIN_FTR_SECTION \
430 ld r3, PACA_THREAD_INFO(r13); \
431 ld r4,TI_LOCAL_FLAGS(r3); \
432 andi. r0,r4,_TLF_RUNLATCH; \
433 beql ppc64_runlatch_on_trampoline; \
434END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
435
12a04809
NP
436/*
437 * When the idle code in power4_idle puts the CPU into NAP mode,
438 * it has to do so in a loop, and relies on the external interrupt
439 * and decrementer interrupt entry code to get it out of the loop.
440 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
441 * to signal that it is in the loop and needs help to get out.
442 */
443#ifdef CONFIG_PPC_970_NAP
444#define FINISH_NAP \
445BEGIN_FTR_SECTION \
446 ld r11, PACA_THREAD_INFO(r13); \
447 ld r9,TI_LOCAL_FLAGS(r11); \
448 andi. r10,r9,_TLF_NAPPING; \
449 bnel power4_fixup_nap; \
450END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
451#else
452#define FINISH_NAP
453#endif
454
a0502434
NP
455/*
456 * Following are the BOOK3S exception handler helper macros.
457 * Handlers come in a number of types, and each type has a number of varieties.
458 *
459 * EXC_REAL_* - real, unrelocated exception vectors
460 * EXC_VIRT_* - virt (AIL), unrelocated exception vectors
461 * TRAMP_REAL_* - real, unrelocated helpers (virt can call these)
462 * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use)
463 * TRAMP_KVM - KVM handlers that get put into real, unrelocated
464 * EXC_COMMON - virt, relocated common handlers
465 *
466 * The EXC handlers are given a name, and branch to name_common, or the
467 * appropriate KVM or masking function. Vector handler verieties are as
468 * follows:
469 *
470 * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
471 *
472 * EXC_{REAL|VIRT} - standard exception
473 *
474 * EXC_{REAL|VIRT}_suffix
475 * where _suffix is:
476 * - _MASKABLE - maskable exception
477 * - _OOL - out of line with trampoline to common handler
478 * - _HV - HV exception
479 *
480 * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
481 *
482 * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is
483 * an OOL vector that branches to a specified handler rather than the usual
484 * trampoline that goes to common. It, and other underscore macros, should
485 * be used with care.
486 *
487 * KVM handlers come in the following verieties:
488 * TRAMP_KVM
489 * TRAMP_KVM_SKIP
490 * TRAMP_KVM_HV
491 * TRAMP_KVM_HV_SKIP
492 *
493 * COMMON handlers come in the following verieties:
494 * EXC_COMMON_BEGIN/END - used to open-code the handler
495 * EXC_COMMON
496 * EXC_COMMON_ASYNC
497 *
498 * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
499 * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
500 */
501
502#define __EXC_REAL(name, start, size, area) \
503 EXC_REAL_BEGIN(name, start, size); \
504 SET_SCRATCH0(r13); /* save r13 */ \
505 EXCEPTION_PROLOG_0 area ; \
506 EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0 ; \
507 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
508 EXC_REAL_END(name, start, size)
509
510#define EXC_REAL(name, start, size) \
511 __EXC_REAL(name, start, size, PACA_EXGEN)
512
513#define __EXC_VIRT(name, start, size, realvec, area) \
514 EXC_VIRT_BEGIN(name, start, size); \
515 SET_SCRATCH0(r13); /* save r13 */ \
516 EXCEPTION_PROLOG_0 area ; \
517 EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0; \
518 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
519 EXC_VIRT_END(name, start, size)
520
521#define EXC_VIRT(name, start, size, realvec) \
522 __EXC_VIRT(name, start, size, realvec, PACA_EXGEN)
523
524#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
525 EXC_REAL_BEGIN(name, start, size); \
526 SET_SCRATCH0(r13); /* save r13 */ \
527 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
528 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, bitmask ; \
529 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
530 EXC_REAL_END(name, start, size)
531
532#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
533 EXC_VIRT_BEGIN(name, start, size); \
534 SET_SCRATCH0(r13); /* save r13 */ \
535 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
536 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, bitmask ; \
537 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
538 EXC_VIRT_END(name, start, size)
539
540#define EXC_REAL_HV(name, start, size) \
541 EXC_REAL_BEGIN(name, start, size); \
542 SET_SCRATCH0(r13); /* save r13 */ \
543 EXCEPTION_PROLOG_0 PACA_EXGEN; \
544 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0 ; \
545 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ; \
546 EXC_REAL_END(name, start, size)
547
548#define EXC_VIRT_HV(name, start, size, realvec) \
549 EXC_VIRT_BEGIN(name, start, size); \
550 SET_SCRATCH0(r13); /* save r13 */ \
551 EXCEPTION_PROLOG_0 PACA_EXGEN; \
552 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0 ; \
553 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ; \
554 EXC_VIRT_END(name, start, size)
555
556#define __EXC_REAL_OOL(name, start, size) \
557 EXC_REAL_BEGIN(name, start, size); \
558 SET_SCRATCH0(r13); \
559 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
560 b tramp_real_##name ; \
561 EXC_REAL_END(name, start, size)
562
563#define __TRAMP_REAL_OOL(name, vec) \
564 TRAMP_REAL_BEGIN(tramp_real_##name); \
565 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0 ; \
566 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
567
568#define EXC_REAL_OOL(name, start, size) \
569 __EXC_REAL_OOL(name, start, size); \
570 __TRAMP_REAL_OOL(name, start)
571
572#define __EXC_REAL_OOL_MASKABLE(name, start, size) \
573 __EXC_REAL_OOL(name, start, size)
574
575#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
576 TRAMP_REAL_BEGIN(tramp_real_##name); \
577 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, bitmask ; \
578 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
579
580#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
581 __EXC_REAL_OOL_MASKABLE(name, start, size); \
582 __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask)
583
584#define __EXC_REAL_OOL_HV_DIRECT(name, start, size, handler) \
585 EXC_REAL_BEGIN(name, start, size); \
586 SET_SCRATCH0(r13); \
587 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
588 b handler; \
589 EXC_REAL_END(name, start, size)
590
591#define __EXC_REAL_OOL_HV(name, start, size) \
592 __EXC_REAL_OOL(name, start, size)
593
594#define __TRAMP_REAL_OOL_HV(name, vec) \
595 TRAMP_REAL_BEGIN(tramp_real_##name); \
596 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0 ; \
597 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
598
599#define EXC_REAL_OOL_HV(name, start, size) \
600 __EXC_REAL_OOL_HV(name, start, size); \
601 __TRAMP_REAL_OOL_HV(name, start)
602
603#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \
604 __EXC_REAL_OOL(name, start, size)
605
606#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \
607 TRAMP_REAL_BEGIN(tramp_real_##name); \
608 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, bitmask ; \
609 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
610
611#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \
612 __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \
613 __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask)
614
615#define __EXC_VIRT_OOL(name, start, size) \
616 EXC_VIRT_BEGIN(name, start, size); \
617 SET_SCRATCH0(r13); \
618 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
619 b tramp_virt_##name; \
620 EXC_VIRT_END(name, start, size)
621
622#define __TRAMP_VIRT_OOL(name, realvec) \
623 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
624 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0 ; \
625 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD
626
627#define EXC_VIRT_OOL(name, start, size, realvec) \
628 __EXC_VIRT_OOL(name, start, size); \
629 __TRAMP_VIRT_OOL(name, realvec)
630
631#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \
632 __EXC_VIRT_OOL(name, start, size)
633
634#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
635 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
636 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, bitmask ; \
637 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
638
639#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
640 __EXC_VIRT_OOL_MASKABLE(name, start, size); \
641 __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)
642
643#define __EXC_VIRT_OOL_HV(name, start, size) \
644 __EXC_VIRT_OOL(name, start, size)
645
646#define __TRAMP_VIRT_OOL_HV(name, realvec) \
647 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
648 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0 ; \
649 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
650
651#define EXC_VIRT_OOL_HV(name, start, size, realvec) \
652 __EXC_VIRT_OOL_HV(name, start, size); \
653 __TRAMP_VIRT_OOL_HV(name, realvec)
654
655#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \
656 __EXC_VIRT_OOL(name, start, size)
657
658#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \
659 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
660 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, bitmask ; \
661 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
662
663#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \
664 __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \
665 __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)
666
667#define TRAMP_KVM(area, n) \
668 TRAMP_KVM_BEGIN(do_kvm_##n); \
669 KVM_HANDLER area, EXC_STD, n, 0
670
671#define TRAMP_KVM_SKIP(area, n) \
672 TRAMP_KVM_BEGIN(do_kvm_##n); \
673 KVM_HANDLER area, EXC_STD, n, 1
674
675#define TRAMP_KVM_HV(area, n) \
676 TRAMP_KVM_BEGIN(do_kvm_H##n); \
677 KVM_HANDLER area, EXC_HV, n, 0
678
679#define TRAMP_KVM_HV_SKIP(area, n) \
680 TRAMP_KVM_BEGIN(do_kvm_H##n); \
681 KVM_HANDLER area, EXC_HV, n, 1
682
683#define EXC_COMMON(name, realvec, hdlr) \
684 EXC_COMMON_BEGIN(name); \
685 EXCEPTION_COMMON(PACA_EXGEN, realvec); \
686 bl save_nvgprs; \
687 RECONCILE_IRQ_STATE(r10, r11); \
688 addi r3,r1,STACK_FRAME_OVERHEAD; \
689 bl hdlr; \
690 b ret_from_except
691
692/*
693 * Like EXC_COMMON, but for exceptions that can occur in the idle task and
694 * therefore need the special idle handling (finish nap and runlatch)
695 */
696#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
697 EXC_COMMON_BEGIN(name); \
698 EXCEPTION_COMMON(PACA_EXGEN, realvec); \
699 FINISH_NAP; \
700 RECONCILE_IRQ_STATE(r10, r11); \
701 RUNLATCH_ON; \
702 addi r3,r1,STACK_FRAME_OVERHEAD; \
703 bl hdlr; \
704 b ret_from_except_lite
705
12a04809 706
0ebc4cda 707/*
57f26649
NP
708 * There are a few constraints to be concerned with.
709 * - Real mode exceptions code/data must be located at their physical location.
710 * - Virtual mode exceptions must be mapped at their 0xc000... location.
711 * - Fixed location code must not call directly beyond the __end_interrupts
712 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
713 * must be used.
714 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
715 * virtual 0xc00...
716 * - Conditional branch targets must be within +/-32K of caller.
717 *
718 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
719 * therefore don't have to run in physically located code or rfid to
720 * virtual mode kernel code. However on relocatable kernels they do have
721 * to branch to KERNELBASE offset because the rest of the kernel (outside
722 * the exception vectors) may be located elsewhere.
723 *
724 * Virtual exceptions correspond with physical, except their entry points
725 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
726 * offset applied. Virtual exceptions are enabled with the Alternate
727 * Interrupt Location (AIL) bit set in the LPCR. However this does not
728 * guarantee they will be delivered virtually. Some conditions (see the ISA)
729 * cause exceptions to be delivered in real mode.
730 *
731 * It's impossible to receive interrupts below 0x300 via AIL.
732 *
733 * KVM: None of the virtual exceptions are from the guest. Anything that
734 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
735 *
736 *
0ebc4cda
BH
737 * We layout physical memory as follows:
738 * 0x0000 - 0x00ff : Secondary processor spin code
57f26649
NP
739 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
740 * 0x1900 - 0x3fff : Real mode trampolines
741 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
742 * 0x5900 - 0x6fff : Relon mode trampolines
0ebc4cda 743 * 0x7000 - 0x7fff : FWNMI data area
57f26649
NP
744 * 0x8000 - .... : Common interrupt handlers, remaining early
745 * setup code, rest of kernel.
e0319829
NP
746 *
747 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
748 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
749 * vectors there.
57f26649
NP
750 */
751OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
752OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
753OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
754OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
ccd47702
NP
755
756#ifdef CONFIG_PPC_POWERNV
bd3524fe
NP
757 .globl start_real_trampolines
758 .globl end_real_trampolines
759 .globl start_virt_trampolines
760 .globl end_virt_trampolines
ccd47702
NP
761#endif
762
57f26649
NP
763#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
764/*
765 * Data area reserved for FWNMI option.
766 * This address (0x7000) is fixed by the RPA.
767 * pseries and powernv need to keep the whole page from
768 * 0x7000 to 0x8000 free for use by the firmware
0ebc4cda 769 */
57f26649
NP
770ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
771OPEN_TEXT_SECTION(0x8000)
772#else
773OPEN_TEXT_SECTION(0x7000)
774#endif
775
776USE_FIXED_SECTION(real_vectors)
777
0ebc4cda
BH
778/*
779 * This is the start of the interrupt handlers for pSeries
780 * This code runs with relocation off.
781 * Code from here to __end_interrupts gets copied down to real
782 * address 0x100 when we are running a relocatable kernel.
783 * Therefore any relative branches in this section must only
784 * branch to labels in this section.
785 */
0ebc4cda
BH
786 .globl __start_interrupts
787__start_interrupts:
788
e0319829 789/* No virt vectors corresponding with 0x0..0x100 */
1a6822d1 790EXC_VIRT_NONE(0x4000, 0x100)
e0319829 791
fb479e44 792
a7c1ca19
NP
793EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
794 SET_SCRATCH0(r13)
5dba1d50 795 EXCEPTION_PROLOG_0 PACA_EXNMI
a7c1ca19
NP
796
797 /* This is EXCEPTION_PROLOG_1 with the idle feature section added */
798 OPT_SAVE_REG_TO_PACA(PACA_EXNMI+EX_PPR, r9, CPU_FTR_HAS_PPR)
799 OPT_SAVE_REG_TO_PACA(PACA_EXNMI+EX_CFAR, r10, CPU_FTR_CFAR)
800 INTERRUPT_TO_KERNEL
801 SAVE_CTR(r10, PACA_EXNMI)
802 mfcr r9
803
948cf67c 804#ifdef CONFIG_PPC_P7_NAP
fb479e44
NP
805 /*
806 * If running native on arch 2.06 or later, check if we are waking up
ba6d334a
BH
807 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
808 * bits 46:47. A non-0 value indicates that we are coming from a power
809 * saving state. The idle wakeup handler initially runs in real mode,
810 * but we branch to the 0xc000... address so we can turn on relocation
811 * with mtmsr.
948cf67c 812 */
bf66e3c4 813BEGIN_FTR_SECTION
a7c1ca19
NP
814 mfspr r10,SPRN_SRR1
815 rlwinm. r10,r10,47-31,30,31
816 beq- 1f
817 cmpwi cr1,r10,2
818 mfspr r3,SPRN_SRR1
819 bltlr cr1 /* no state loss, return to idle caller */
820 BRANCH_TO_C000(r10, system_reset_idle_common)
8211:
bf66e3c4 822END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
fb479e44 823#endif
371fefd6 824
a7c1ca19
NP
825 KVMTEST EXC_STD 0x100
826 std r11,PACA_EXNMI+EX_R11(r13)
827 std r12,PACA_EXNMI+EX_R12(r13)
828 GET_SCRATCH0(r10)
829 std r10,PACA_EXNMI+EX_R13(r13)
830
831 EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
c4f3b52c
NP
832 /*
833 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
834 * being used, so a nested NMI exception would corrupt it.
835 */
fb479e44 836
1a6822d1
NP
837EXC_REAL_END(system_reset, 0x100, 0x100)
838EXC_VIRT_NONE(0x4100, 0x100)
6de6638b 839TRAMP_KVM(PACA_EXNMI, 0x100)
fb479e44
NP
840
841#ifdef CONFIG_PPC_P7_NAP
842EXC_COMMON_BEGIN(system_reset_idle_common)
10d91611
NP
843 /*
844 * This must be a direct branch (without linker branch stub) because
845 * we can not use TOC at this point as r2 may not be restored yet.
846 */
847 b idle_return_gpr_loss
371fefd6
PM
848#endif
849
a3d96f70 850EXC_COMMON_BEGIN(system_reset_common)
c4f3b52c
NP
851 /*
852 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
853 * to recover, but nested NMI will notice in_nmi and not recover
854 * because of the use of the NMI stack. in_nmi reentrancy is tested in
855 * system_reset_exception.
856 */
857 lhz r10,PACA_IN_NMI(r13)
858 addi r10,r10,1
859 sth r10,PACA_IN_NMI(r13)
860 li r10,MSR_RI
861 mtmsrd r10,1
aca79d2b 862
b1ee8a3d
NP
863 mr r10,r1
864 ld r1,PACA_NMI_EMERG_SP(r13)
865 subi r1,r1,INT_FRAME_SIZE
47169fba
NP
866 EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100)
867 bl save_nvgprs
868 /*
869 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
870 * the right thing. We do not want to reconcile because that goes
871 * through irq tracing which we don't want in NMI.
872 *
873 * Save PACAIRQHAPPENED because some code will do a hard disable
874 * (e.g., xmon). So we want to restore this back to where it was
875 * when we return. DAR is unused in the stack, so save it there.
876 */
877 li r10,IRQS_ALL_DISABLED
878 stb r10,PACAIRQSOFTMASK(r13)
879 lbz r10,PACAIRQHAPPENED(r13)
880 std r10,_DAR(r1)
881
c06075f3
NP
882 addi r3,r1,STACK_FRAME_OVERHEAD
883 bl system_reset_exception
15b4dd79 884
15b4dd79 885 /* Clear MSR_RI before setting SRR0 and SRR1. */
fbc50063 886 li r9,0
15b4dd79 887 mtmsrd r9,1
c4f3b52c
NP
888
889 /*
15b4dd79 890 * MSR_RI is clear, now we can decrement paca->in_nmi.
c4f3b52c
NP
891 */
892 lhz r10,PACA_IN_NMI(r13)
893 subi r10,r10,1
894 sth r10,PACA_IN_NMI(r13)
895
15b4dd79
NP
896 /*
897 * Restore soft mask settings.
898 */
899 ld r10,_DAR(r1)
900 stb r10,PACAIRQHAPPENED(r13)
901 ld r10,SOFTE(r1)
902 stb r10,PACAIRQSOFTMASK(r13)
903
904 /*
905 * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
906 * Should share common bits...
907 */
908
909 /* Move original SRR0 and SRR1 into the respective regs */
910 ld r9,_MSR(r1)
911 mtspr SPRN_SRR1,r9
9592b29a
NP
912 ld r9,_NIP(r1)
913 mtspr SPRN_SRR0,r9
15b4dd79
NP
914 ld r9,_CTR(r1)
915 mtctr r9
916 ld r9,_XER(r1)
917 mtxer r9
918 ld r9,_LINK(r1)
919 mtlr r9
920 REST_GPR(0, r1)
921 REST_8GPRS(2, r1)
922 REST_GPR(10, r1)
923 ld r11,_CCR(r1)
924 mtcr r11
925 REST_GPR(11, r1)
926 REST_2GPRS(12, r1)
927 /* restore original r1. */
928 ld r1,GPR1(r1)
929 RFI_TO_USER_OR_KERNEL
582baf44
NP
930
931#ifdef CONFIG_PPC_PSERIES
932/*
933 * Vectors for the FWNMI option. Share common code.
934 */
935TRAMP_REAL_BEGIN(system_reset_fwnmi)
936 SET_SCRATCH0(r13) /* save r13 */
fc557537
NP
937 /* See comment at system_reset exception, don't turn on RI */
938 EXCEPTION_PROLOG_0 PACA_EXNMI
939 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0
940 EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
941
582baf44
NP
942#endif /* CONFIG_PPC_PSERIES */
943
0ebc4cda 944
1a6822d1 945EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
b01c8b54
PM
946 /* This is moved out of line as it can be patched by FW, but
947 * some code path might still want to branch into the original
948 * vector
949 */
1707dd16 950 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 951 EXCEPTION_PROLOG_0 PACA_EXMC
1e9b4507 952BEGIN_FTR_SECTION
db7d31ac 953 b machine_check_common_early
1e9b4507 954FTR_SECTION_ELSE
1707dd16 955 b machine_check_pSeries_0
1e9b4507 956ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1a6822d1
NP
957EXC_REAL_END(machine_check, 0x200, 0x100)
958EXC_VIRT_NONE(0x4200, 0x100)
db7d31ac 959TRAMP_REAL_BEGIN(machine_check_common_early)
fa4cf6b7 960 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0
afcf0095
NP
961 /*
962 * Register contents:
963 * R13 = PACA
964 * R9 = CR
965 * Original R9 to R13 is saved on PACA_EXMC
966 *
967 * Switch to mc_emergency stack and handle re-entrancy (we limit
968 * the nested MCE upto level 4 to avoid stack overflow).
969 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
970 *
971 * We use paca->in_mce to check whether this is the first entry or
972 * nested machine check. We increment paca->in_mce to track nested
973 * machine checks.
974 *
975 * If this is the first entry then set stack pointer to
976 * paca->mc_emergency_sp, otherwise r1 is already pointing to
977 * stack frame on mc_emergency stack.
978 *
979 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
980 * checkstop if we get another machine check exception before we do
981 * rfid with MSR_ME=1.
1945bc45
NP
982 *
983 * This interrupt can wake directly from idle. If that is the case,
984 * the machine check is handled then the idle wakeup code is called
2bf1071a 985 * to restore state.
afcf0095
NP
986 */
987 mr r11,r1 /* Save r1 */
988 lhz r10,PACA_IN_MCE(r13)
989 cmpwi r10,0 /* Are we in nested machine check */
990 bne 0f /* Yes, we are. */
991 /* First machine check entry */
992 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
9930: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
994 addi r10,r10,1 /* increment paca->in_mce */
995 sth r10,PACA_IN_MCE(r13)
996 /* Limit nested MCE to level 4 to avoid stack overflow */
ba41e1e1 997 cmpwi r10,MAX_MCE_DEPTH
afcf0095
NP
998 bgt 2f /* Check if we hit limit of 4 */
999 std r11,GPR1(r1) /* Save r1 on the stack. */
1000 std r11,0(r1) /* make stack chain pointer */
1001 mfspr r11,SPRN_SRR0 /* Save SRR0 */
1002 std r11,_NIP(r1)
1003 mfspr r11,SPRN_SRR1 /* Save SRR1 */
1004 std r11,_MSR(r1)
1005 mfspr r11,SPRN_DAR /* Save DAR */
1006 std r11,_DAR(r1)
1007 mfspr r11,SPRN_DSISR /* Save DSISR */
1008 std r11,_DSISR(r1)
1009 std r9,_CCR(r1) /* Save CR in stackframe */
e13e7cd4 1010 /* We don't touch AMR here, we never go to virtual mode */
afcf0095
NP
1011 /* Save r9 through r13 from EXMC save area to stack frame. */
1012 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
1013 mfmsr r11 /* get MSR value */
db7d31ac 1014BEGIN_FTR_SECTION
afcf0095 1015 ori r11,r11,MSR_ME /* turn on ME bit */
db7d31ac 1016END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
1017 ori r11,r11,MSR_RI /* turn on RI bit */
1018 LOAD_HANDLER(r12, machine_check_handle_early)
10191: mtspr SPRN_SRR0,r12
1020 mtspr SPRN_SRR1,r11
222f20f1 1021 RFI_TO_KERNEL
afcf0095
NP
1022 b . /* prevent speculative execution */
10232:
1024 /* Stack overflow. Stay on emergency stack and panic.
1025 * Keep the ME bit off while panic-ing, so that if we hit
1026 * another machine check we checkstop.
1027 */
1028 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
1029 ld r11,PACAKMSR(r13)
1030 LOAD_HANDLER(r12, unrecover_mce)
1031 li r10,MSR_ME
1032 andc r11,r11,r10 /* Turn off MSR_ME */
1033 b 1b
1034 b . /* prevent speculative execution */
afcf0095
NP
1035
1036TRAMP_REAL_BEGIN(machine_check_pSeries)
1037 .globl machine_check_fwnmi
1038machine_check_fwnmi:
1039 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1040 EXCEPTION_PROLOG_0 PACA_EXMC
a43c1590 1041BEGIN_FTR_SECTION
db7d31ac 1042 b machine_check_common_early
a43c1590 1043END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
afcf0095 1044machine_check_pSeries_0:
fa4cf6b7 1045 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 0
afcf0095 1046 /*
83a980f7
NP
1047 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1048 * nested machine check corrupts it. machine_check_common enables
1049 * MSR_RI.
afcf0095 1050 */
2d046308 1051 EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
afcf0095
NP
1052
1053TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
1054
1055EXC_COMMON_BEGIN(machine_check_common)
1056 /*
1057 * Machine check is different because we use a different
1058 * save area: PACA_EXMC instead of PACA_EXGEN.
1059 */
1060 mfspr r10,SPRN_DAR
1061 std r10,PACA_EXMC+EX_DAR(r13)
1062 mfspr r10,SPRN_DSISR
1063 stw r10,PACA_EXMC+EX_DSISR(r13)
d064151f 1064 EXCEPTION_COMMON(PACA_EXMC, 0x200)
afcf0095
NP
1065 FINISH_NAP
1066 RECONCILE_IRQ_STATE(r10, r11)
1067 ld r3,PACA_EXMC+EX_DAR(r13)
1068 lwz r4,PACA_EXMC+EX_DSISR(r13)
1069 /* Enable MSR_RI when finished with PACA_EXMC */
1070 li r10,MSR_RI
1071 mtmsrd r10,1
1072 std r3,_DAR(r1)
1073 std r4,_DSISR(r1)
1074 bl save_nvgprs
1075 addi r3,r1,STACK_FRAME_OVERHEAD
1076 bl machine_check_exception
1077 b ret_from_except
1078
1079#define MACHINE_CHECK_HANDLER_WINDUP \
1080 /* Clear MSR_RI before setting SRR0 and SRR1. */\
fbc50063 1081 li r9,0; \
afcf0095 1082 mtmsrd r9,1; /* Clear MSR_RI */ \
ad73d8d4
NP
1083 /* Decrement paca->in_mce now RI is clear. */ \
1084 lhz r12,PACA_IN_MCE(r13); \
1085 subi r12,r12,1; \
1086 sth r12,PACA_IN_MCE(r13); \
afcf0095
NP
1087 /* Move original SRR0 and SRR1 into the respective regs */ \
1088 ld r9,_MSR(r1); \
1089 mtspr SPRN_SRR1,r9; \
9592b29a
NP
1090 ld r9,_NIP(r1); \
1091 mtspr SPRN_SRR0,r9; \
afcf0095
NP
1092 ld r9,_CTR(r1); \
1093 mtctr r9; \
1094 ld r9,_XER(r1); \
1095 mtxer r9; \
1096 ld r9,_LINK(r1); \
1097 mtlr r9; \
1098 REST_GPR(0, r1); \
1099 REST_8GPRS(2, r1); \
1100 REST_GPR(10, r1); \
1101 ld r11,_CCR(r1); \
1102 mtcr r11; \
afcf0095
NP
1103 REST_GPR(11, r1); \
1104 REST_2GPRS(12, r1); \
1105 /* restore original r1. */ \
1106 ld r1,GPR1(r1)
1107
1945bc45
NP
1108#ifdef CONFIG_PPC_P7_NAP
1109/*
1110 * This is an idle wakeup. Low level machine check has already been
1111 * done. Queue the event then call the idle code to do the wake up.
1112 */
1113EXC_COMMON_BEGIN(machine_check_idle_common)
1114 bl machine_check_queue_event
1115
1116 /*
1117 * We have not used any non-volatile GPRs here, and as a rule
1118 * most exception code including machine check does not.
1119 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
1120 * wakeup will restore volatile registers.
1121 *
1122 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
1123 *
1124 * Then decrement MCE nesting after finishing with the stack.
1125 */
1126 ld r3,_MSR(r1)
10d91611 1127 ld r4,_LINK(r1)
1945bc45
NP
1128
1129 lhz r11,PACA_IN_MCE(r13)
1130 subi r11,r11,1
1131 sth r11,PACA_IN_MCE(r13)
1132
10d91611
NP
1133 mtlr r4
1134 rlwinm r10,r3,47-31,30,31
1135 cmpwi cr1,r10,2
1136 bltlr cr1 /* no state loss, return to idle caller */
1137 b idle_return_gpr_loss
1945bc45 1138#endif
afcf0095
NP
1139 /*
1140 * Handle machine check early in real mode. We come here with
1141 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1142 */
1143EXC_COMMON_BEGIN(machine_check_handle_early)
1144 std r0,GPR0(r1) /* Save r0 */
1145 EXCEPTION_PROLOG_COMMON_3(0x200)
1146 bl save_nvgprs
1147 addi r3,r1,STACK_FRAME_OVERHEAD
1148 bl machine_check_early
1149 std r3,RESULT(r1) /* Save result */
1150 ld r12,_MSR(r1)
db7d31ac
MS
1151BEGIN_FTR_SECTION
1152 b 4f
1153END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
1945bc45 1154
afcf0095
NP
1155#ifdef CONFIG_PPC_P7_NAP
1156 /*
1157 * Check if thread was in power saving mode. We come here when any
1158 * of the following is true:
1159 * a. thread wasn't in power saving mode
1160 * b. thread was in power saving mode with no state loss,
1161 * supervisor state loss or hypervisor state loss.
1162 *
1163 * Go back to nap/sleep/winkle mode again if (b) is true.
1164 */
bf66e3c4 1165BEGIN_FTR_SECTION
1945bc45 1166 rlwinm. r11,r12,47-31,30,31
6102c005 1167 bne machine_check_idle_common
bf66e3c4 1168END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
afcf0095 1169#endif
1945bc45 1170
afcf0095
NP
1171 /*
1172 * Check if we are coming from hypervisor userspace. If yes then we
1173 * continue in host kernel in V mode to deliver the MC event.
1174 */
1175 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
1176 beq 5f
db7d31ac 11774: andi. r11,r12,MSR_PR /* See if coming from user. */
afcf0095
NP
1178 bne 9f /* continue in V mode if we are. */
1179
11805:
1181#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
db7d31ac 1182BEGIN_FTR_SECTION
afcf0095
NP
1183 /*
1184 * We are coming from kernel context. Check if we are coming from
1185 * guest. if yes, then we can continue. We will fall through
1186 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1187 */
1188 lbz r11,HSTATE_IN_GUEST(r13)
1189 cmpwi r11,0 /* Check if coming from guest */
1190 bne 9f /* continue if we are. */
db7d31ac 1191END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
1192#endif
1193 /*
1194 * At this point we are not sure about what context we come from.
1195 * Queue up the MCE event and return from the interrupt.
1196 * But before that, check if this is an un-recoverable exception.
1197 * If yes, then stay on emergency stack and panic.
1198 */
1199 andi. r11,r12,MSR_RI
1200 bne 2f
12011: mfspr r11,SPRN_SRR0
1202 LOAD_HANDLER(r10,unrecover_mce)
1203 mtspr SPRN_SRR0,r10
1204 ld r10,PACAKMSR(r13)
1205 /*
1206 * We are going down. But there are chances that we might get hit by
1207 * another MCE during panic path and we may run into unstable state
1208 * with no way out. Hence, turn ME bit off while going down, so that
1209 * when another MCE is hit during panic path, system will checkstop
1210 * and hypervisor will get restarted cleanly by SP.
1211 */
1212 li r3,MSR_ME
1213 andc r10,r10,r3 /* Turn off MSR_ME */
1214 mtspr SPRN_SRR1,r10
222f20f1 1215 RFI_TO_KERNEL
afcf0095
NP
1216 b .
12172:
1218 /*
1219 * Check if we have successfully handled/recovered from error, if not
1220 * then stay on emergency stack and panic.
1221 */
1222 ld r3,RESULT(r1) /* Load result */
1223 cmpdi r3,0 /* see if we handled MCE successfully */
1224
1225 beq 1b /* if !handled then panic */
db7d31ac 1226BEGIN_FTR_SECTION
afcf0095
NP
1227 /*
1228 * Return from MC interrupt.
1229 * Queue up the MCE event so that we can log it later, while
1230 * returning from kernel or opal call.
1231 */
1232 bl machine_check_queue_event
1233 MACHINE_CHECK_HANDLER_WINDUP
222f20f1 1234 RFI_TO_USER_OR_KERNEL
db7d31ac
MS
1235FTR_SECTION_ELSE
1236 /*
1237 * pSeries: Return from MC interrupt. Before that stay on emergency
1238 * stack and call machine_check_exception to log the MCE event.
1239 */
1240 LOAD_HANDLER(r10,mce_return)
1241 mtspr SPRN_SRR0,r10
1242 ld r10,PACAKMSR(r13)
1243 mtspr SPRN_SRR1,r10
1244 RFI_TO_KERNEL
1245 b .
1246ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
12479:
1248 /* Deliver the machine check to host kernel in V mode. */
1249 MACHINE_CHECK_HANDLER_WINDUP
db7d31ac 1250 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1251 EXCEPTION_PROLOG_0 PACA_EXMC
db7d31ac 1252 b machine_check_pSeries_0
afcf0095
NP
1253
1254EXC_COMMON_BEGIN(unrecover_mce)
1255 /* Invoke machine_check_exception to print MCE event and panic. */
1256 addi r3,r1,STACK_FRAME_OVERHEAD
1257 bl machine_check_exception
1258 /*
1259 * We will not reach here. Even if we did, there is no way out. Call
1260 * unrecoverable_exception and die.
1261 */
12621: addi r3,r1,STACK_FRAME_OVERHEAD
1263 bl unrecoverable_exception
1264 b 1b
1265
a43c1590
MS
1266EXC_COMMON_BEGIN(mce_return)
1267 /* Invoke machine_check_exception to print MCE event and return. */
1268 addi r3,r1,STACK_FRAME_OVERHEAD
1269 bl machine_check_exception
db7d31ac 1270 MACHINE_CHECK_HANDLER_WINDUP
a43c1590
MS
1271 RFI_TO_KERNEL
1272 b .
0ebc4cda 1273
e779fc93 1274EXC_REAL_BEGIN(data_access, 0x300, 0x80)
bf66e3c4
NP
1275 SET_SCRATCH0(r13) /* save r13 */
1276 EXCEPTION_PROLOG_0 PACA_EXGEN
e779fc93
NP
1277 b tramp_real_data_access
1278EXC_REAL_END(data_access, 0x300, 0x80)
1279
1280TRAMP_REAL_BEGIN(tramp_real_data_access)
bf66e3c4 1281 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 0
38555434
NP
1282 /*
1283 * DAR/DSISR must be read before setting MSR[RI], because
1284 * a d-side MCE will clobber those registers so is not
1285 * recoverable if they are live.
1286 */
1287 mfspr r10,SPRN_DAR
1288 mfspr r11,SPRN_DSISR
1289 std r10,PACA_EXGEN+EX_DAR(r13)
1290 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1291EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1
e779fc93
NP
1292
1293EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
bf66e3c4
NP
1294 SET_SCRATCH0(r13) /* save r13 */
1295 EXCEPTION_PROLOG_0 PACA_EXGEN
1296 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 0
38555434
NP
1297 mfspr r10,SPRN_DAR
1298 mfspr r11,SPRN_DSISR
1299 std r10,PACA_EXGEN+EX_DAR(r13)
1300 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1301EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD
e779fc93
NP
1302EXC_VIRT_END(data_access, 0x4300, 0x80)
1303
80795e6c
NP
1304TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
1305
1306EXC_COMMON_BEGIN(data_access_common)
1307 /*
1308 * Here r13 points to the paca, r9 contains the saved CR,
1309 * SRR0 and SRR1 are saved in r11 and r12,
1310 * r9 - r13 are saved in paca->exgen.
38555434 1311 * EX_DAR and EX_DSISR have saved DAR/DSISR
80795e6c 1312 */
d064151f 1313 EXCEPTION_COMMON(PACA_EXGEN, 0x300)
80795e6c
NP
1314 RECONCILE_IRQ_STATE(r10, r11)
1315 ld r12,_MSR(r1)
1316 ld r3,PACA_EXGEN+EX_DAR(r13)
1317 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1318 li r5,0x300
1319 std r3,_DAR(r1)
1320 std r4,_DSISR(r1)
1321BEGIN_MMU_FTR_SECTION
1322 b do_hash_page /* Try to handle as hpte fault */
1323MMU_FTR_SECTION_ELSE
1324 b handle_page_fault
1325ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1326
0ebc4cda 1327
1a6822d1 1328EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
bf66e3c4
NP
1329 SET_SCRATCH0(r13) /* save r13 */
1330 EXCEPTION_PROLOG_0 PACA_EXSLB
e779fc93 1331 b tramp_real_data_access_slb
1a6822d1 1332EXC_REAL_END(data_access_slb, 0x380, 0x80)
0ebc4cda 1333
e779fc93 1334TRAMP_REAL_BEGIN(tramp_real_data_access_slb)
bf66e3c4 1335 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 0
38555434
NP
1336 mfspr r10,SPRN_DAR
1337 std r10,PACA_EXSLB+EX_DAR(r13)
bf66e3c4 1338 EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1
e779fc93 1339
1a6822d1 1340EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
bf66e3c4
NP
1341 SET_SCRATCH0(r13) /* save r13 */
1342 EXCEPTION_PROLOG_0 PACA_EXSLB
1343 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 0
38555434
NP
1344 mfspr r10,SPRN_DAR
1345 std r10,PACA_EXSLB+EX_DAR(r13)
bf66e3c4 1346 EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD
1a6822d1 1347EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
48e7b769 1348
2b9af6e4
NP
1349TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
1350
48e7b769 1351EXC_COMMON_BEGIN(data_access_slb_common)
d064151f 1352 EXCEPTION_COMMON(PACA_EXSLB, 0x380)
48e7b769
NP
1353 ld r4,PACA_EXSLB+EX_DAR(r13)
1354 std r4,_DAR(r1)
1355 addi r3,r1,STACK_FRAME_OVERHEAD
7100e870
NP
1356BEGIN_MMU_FTR_SECTION
1357 /* HPT case, do SLB fault */
48e7b769
NP
1358 bl do_slb_fault
1359 cmpdi r3,0
1360 bne- 1f
1361 b fast_exception_return
13621: /* Error case */
7100e870
NP
1363MMU_FTR_SECTION_ELSE
1364 /* Radix case, access is outside page table range */
1365 li r3,-EFAULT
1366ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
48e7b769
NP
1367 std r3,RESULT(r1)
1368 bl save_nvgprs
1369 RECONCILE_IRQ_STATE(r10, r11)
1370 ld r4,_DAR(r1)
1371 ld r5,RESULT(r1)
1372 addi r3,r1,STACK_FRAME_OVERHEAD
1373 bl do_bad_slb_fault
1374 b ret_from_except
1375
2b9af6e4 1376
1a6822d1
NP
1377EXC_REAL(instruction_access, 0x400, 0x80)
1378EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
27ce77df
NP
1379TRAMP_KVM(PACA_EXGEN, 0x400)
1380
1381EXC_COMMON_BEGIN(instruction_access_common)
d064151f 1382 EXCEPTION_COMMON(PACA_EXGEN, 0x400)
27ce77df
NP
1383 RECONCILE_IRQ_STATE(r10, r11)
1384 ld r12,_MSR(r1)
1385 ld r3,_NIP(r1)
475b581f 1386 andis. r4,r12,DSISR_SRR1_MATCH_64S@h
27ce77df
NP
1387 li r5,0x400
1388 std r3,_DAR(r1)
1389 std r4,_DSISR(r1)
1390BEGIN_MMU_FTR_SECTION
1391 b do_hash_page /* Try to handle as hpte fault */
1392MMU_FTR_SECTION_ELSE
1393 b handle_page_fault
1394ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1395
0ebc4cda 1396
fc557537
NP
1397__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB)
1398__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB)
48e7b769 1399TRAMP_KVM(PACA_EXSLB, 0x480)
54be0b9c 1400
48e7b769 1401EXC_COMMON_BEGIN(instruction_access_slb_common)
d064151f 1402 EXCEPTION_COMMON(PACA_EXSLB, 0x480)
48e7b769
NP
1403 ld r4,_NIP(r1)
1404 addi r3,r1,STACK_FRAME_OVERHEAD
7100e870
NP
1405BEGIN_MMU_FTR_SECTION
1406 /* HPT case, do SLB fault */
48e7b769
NP
1407 bl do_slb_fault
1408 cmpdi r3,0
1409 bne- 1f
1410 b fast_exception_return
14111: /* Error case */
7100e870
NP
1412MMU_FTR_SECTION_ELSE
1413 /* Radix case, access is outside page table range */
1414 li r3,-EFAULT
1415ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
48e7b769 1416 std r3,RESULT(r1)
8d04631a 1417 bl save_nvgprs
8d04631a 1418 RECONCILE_IRQ_STATE(r10, r11)
48e7b769
NP
1419 ld r4,_NIP(r1)
1420 ld r5,RESULT(r1)
1421 addi r3,r1,STACK_FRAME_OVERHEAD
1422 bl do_bad_slb_fault
8d04631a
NP
1423 b ret_from_except
1424
48e7b769 1425
1a6822d1 1426EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
80bd9177
NP
1427 SET_SCRATCH0(r13) /* save r13 */
1428 EXCEPTION_PROLOG_0 PACA_EXGEN
bf66e3c4
NP
1429BEGIN_FTR_SECTION
1430 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1431 EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1
1432FTR_SECTION_ELSE
1433 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1434 EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1
1435ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1a6822d1 1436EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
da2bc464 1437
1a6822d1 1438EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
80bd9177
NP
1439 SET_SCRATCH0(r13) /* save r13 */
1440 EXCEPTION_PROLOG_0 PACA_EXGEN
bf66e3c4
NP
1441BEGIN_FTR_SECTION
1442 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1443 EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV
1444FTR_SECTION_ELSE
1445 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1446 EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD
1447ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1a6822d1 1448EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
c138e588 1449
7ede5317
NP
1450TRAMP_KVM(PACA_EXGEN, 0x500)
1451TRAMP_KVM_HV(PACA_EXGEN, 0x500)
c138e588
NP
1452EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
1453
1454
e779fc93 1455EXC_REAL_BEGIN(alignment, 0x600, 0x100)
bf66e3c4
NP
1456 SET_SCRATCH0(r13) /* save r13 */
1457 EXCEPTION_PROLOG_0 PACA_EXGEN
1458 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 0
38555434
NP
1459 mfspr r10,SPRN_DAR
1460 mfspr r11,SPRN_DSISR
1461 std r10,PACA_EXGEN+EX_DAR(r13)
1462 stw r11,PACA_EXGEN+EX_DSISR(r13)
bf66e3c4 1463 EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1
e779fc93
NP
1464EXC_REAL_END(alignment, 0x600, 0x100)
1465
1466EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
bf66e3c4
NP
1467 SET_SCRATCH0(r13) /* save r13 */
1468 EXCEPTION_PROLOG_0 PACA_EXGEN
1469 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 0
38555434
NP
1470 mfspr r10,SPRN_DAR
1471 mfspr r11,SPRN_DSISR
1472 std r10,PACA_EXGEN+EX_DAR(r13)
1473 stw r11,PACA_EXGEN+EX_DSISR(r13)
bf66e3c4 1474 EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD
e779fc93
NP
1475EXC_VIRT_END(alignment, 0x4600, 0x100)
1476
da2bc464 1477TRAMP_KVM(PACA_EXGEN, 0x600)
f9aa6714 1478EXC_COMMON_BEGIN(alignment_common)
d064151f 1479 EXCEPTION_COMMON(PACA_EXGEN, 0x600)
f9aa6714
NP
1480 ld r3,PACA_EXGEN+EX_DAR(r13)
1481 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1482 std r3,_DAR(r1)
1483 std r4,_DSISR(r1)
1484 bl save_nvgprs
1485 RECONCILE_IRQ_STATE(r10, r11)
1486 addi r3,r1,STACK_FRAME_OVERHEAD
1487 bl alignment_exception
1488 b ret_from_except
1489
da2bc464 1490
1a6822d1
NP
1491EXC_REAL(program_check, 0x700, 0x100)
1492EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
da2bc464 1493TRAMP_KVM(PACA_EXGEN, 0x700)
11e87346 1494EXC_COMMON_BEGIN(program_check_common)
265e60a1
CB
1495 /*
1496 * It's possible to receive a TM Bad Thing type program check with
1497 * userspace register values (in particular r1), but with SRR1 reporting
1498 * that we came from the kernel. Normally that would confuse the bad
1499 * stack logic, and we would report a bad kernel stack pointer. Instead
1500 * we switch to the emergency stack if we're taking a TM Bad Thing from
1501 * the kernel.
1502 */
1503 li r10,MSR_PR /* Build a mask of MSR_PR .. */
1504 oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
1505 and r10,r10,r12 /* Mask SRR1 with that. */
1506 srdi r10,r10,8 /* Shift it so we can compare */
1507 cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
1508 bne 1f /* If != go to normal path. */
1509
1510 /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
1511 andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
1512 /* 3 in EXCEPTION_PROLOG_COMMON */
1513 mr r10,r1 /* Save r1 */
1514 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
1515 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1516 b 3f /* Jump into the macro !! */
d064151f 15171: EXCEPTION_COMMON(PACA_EXGEN, 0x700)
11e87346
NP
1518 bl save_nvgprs
1519 RECONCILE_IRQ_STATE(r10, r11)
1520 addi r3,r1,STACK_FRAME_OVERHEAD
1521 bl program_check_exception
1522 b ret_from_except
1523
b01c8b54 1524
1a6822d1
NP
1525EXC_REAL(fp_unavailable, 0x800, 0x100)
1526EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
da2bc464 1527TRAMP_KVM(PACA_EXGEN, 0x800)
c78d9b97 1528EXC_COMMON_BEGIN(fp_unavailable_common)
d064151f 1529 EXCEPTION_COMMON(PACA_EXGEN, 0x800)
c78d9b97
NP
1530 bne 1f /* if from user, just load it up */
1531 bl save_nvgprs
1532 RECONCILE_IRQ_STATE(r10, r11)
1533 addi r3,r1,STACK_FRAME_OVERHEAD
1534 bl kernel_fp_unavailable_exception
1535 BUG_OPCODE
15361:
1537#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1538BEGIN_FTR_SECTION
1539 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1540 * transaction), go do TM stuff
1541 */
1542 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1543 bne- 2f
1544END_FTR_SECTION_IFSET(CPU_FTR_TM)
1545#endif
1546 bl load_up_fpu
1547 b fast_exception_return
1548#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
15492: /* User process was in a transaction */
1550 bl save_nvgprs
1551 RECONCILE_IRQ_STATE(r10, r11)
1552 addi r3,r1,STACK_FRAME_OVERHEAD
1553 bl fp_unavailable_tm
1554 b ret_from_except
1555#endif
1556
a5d4f3ad 1557
a048a07d 1558EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
f14e953b 1559EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
39c0da57
NP
1560TRAMP_KVM(PACA_EXGEN, 0x900)
1561EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
1562
a485c709 1563
1a6822d1
NP
1564EXC_REAL_HV(hdecrementer, 0x980, 0x80)
1565EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
facc6d74
NP
1566TRAMP_KVM_HV(PACA_EXGEN, 0x980)
1567EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
1568
a5d4f3ad 1569
f14e953b
MS
1570EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
1571EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
da2bc464 1572TRAMP_KVM(PACA_EXGEN, 0xa00)
ca243163
NP
1573#ifdef CONFIG_PPC_DOORBELL
1574EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
1575#else
1576EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
1577#endif
1578
0ebc4cda 1579
1a6822d1
NP
1580EXC_REAL(trap_0b, 0xb00, 0x100)
1581EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
da2bc464 1582TRAMP_KVM(PACA_EXGEN, 0xb00)
341215dc
NP
1583EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
1584
acd7d8ce
NP
1585/*
1586 * system call / hypercall (0xc00, 0x4c00)
1587 *
1588 * The system call exception is invoked with "sc 0" and does not alter HV bit.
1589 * There is support for kernel code to invoke system calls but there are no
1590 * in-tree users.
1591 *
1592 * The hypercall is invoked with "sc 1" and sets HV=1.
1593 *
1594 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1595 * 0x4c00 virtual mode.
1596 *
1597 * Call convention:
1598 *
1599 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
1600 *
1601 * For hypercalls, the register convention is as follows:
1602 * r0 volatile
1603 * r1-2 nonvolatile
1604 * r3 volatile parameter and return value for status
1605 * r4-r10 volatile input and output value
1606 * r11 volatile hypercall number and output value
76fc0cfc 1607 * r12 volatile input and output value
acd7d8ce
NP
1608 * r13-r31 nonvolatile
1609 * LR nonvolatile
1610 * CTR volatile
1611 * XER volatile
1612 * CR0-1 CR5-7 volatile
1613 * CR2-4 nonvolatile
1614 * Other registers nonvolatile
1615 *
1616 * The intersection of volatile registers that don't contain possible
76fc0cfc
NP
1617 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1618 * without saving, though xer is not a good idea to use, as hardware may
1619 * interpret some bits so it may be costly to change them.
acd7d8ce 1620 */
1b4d4a79 1621.macro SYSTEM_CALL virt
bc355125 1622#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
acd7d8ce
NP
1623 /*
1624 * There is a little bit of juggling to get syscall and hcall
76fc0cfc
NP
1625 * working well. Save r13 in ctr to avoid using SPRG scratch
1626 * register.
acd7d8ce
NP
1627 *
1628 * Userspace syscalls have already saved the PPR, hcalls must save
1629 * it before setting HMT_MEDIUM.
1630 */
1b4d4a79
NP
1631 mtctr r13
1632 GET_PACA(r13)
1633 std r10,PACA_EXGEN+EX_R10(r13)
1634 INTERRUPT_TO_KERNEL
1635 KVMTEST EXC_STD 0xc00 /* uses r10, branch to do_kvm_0xc00_system_call */
1b4d4a79 1636 mfctr r9
bc355125 1637#else
1b4d4a79
NP
1638 mr r9,r13
1639 GET_PACA(r13)
1640 INTERRUPT_TO_KERNEL
bc355125 1641#endif
d807ad37 1642
727f1361 1643#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1b4d4a79
NP
1644BEGIN_FTR_SECTION
1645 cmpdi r0,0x1ebe
1646 beq- 1f
1647END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1648#endif
d807ad37 1649
b0b2a93d 1650 /* We reach here with PACA in r13, r13 in r9. */
1b4d4a79
NP
1651 mfspr r11,SPRN_SRR0
1652 mfspr r12,SPRN_SRR1
b0b2a93d
NP
1653
1654 HMT_MEDIUM
1655
1656 .if ! \virt
1b4d4a79
NP
1657 __LOAD_HANDLER(r10, system_call_common)
1658 mtspr SPRN_SRR0,r10
1659 ld r10,PACAKMSR(r13)
1660 mtspr SPRN_SRR1,r10
1661 RFI_TO_KERNEL
1662 b . /* prevent speculative execution */
1663 .else
b0b2a93d
NP
1664 li r10,MSR_RI
1665 mtmsrd r10,1 /* Set RI (EE=0) */
1b4d4a79 1666#ifdef CONFIG_RELOCATABLE
1b4d4a79
NP
1667 __LOAD_HANDLER(r10, system_call_common)
1668 mtctr r10
1b4d4a79 1669 bctr
d807ad37 1670#else
1b4d4a79
NP
1671 b system_call_common
1672#endif
1673 .endif
1674
1675#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1676 /* Fast LE/BE switch system call */
16771: mfspr r12,SPRN_SRR1
1678 xori r12,r12,MSR_LE
1679 mtspr SPRN_SRR1,r12
1680 mr r13,r9
1681 RFI_TO_USER /* return to userspace */
1682 b . /* prevent speculative execution */
d807ad37 1683#endif
1b4d4a79 1684.endm
d807ad37 1685
1a6822d1 1686EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1b4d4a79 1687 SYSTEM_CALL 0
1a6822d1 1688EXC_REAL_END(system_call, 0xc00, 0x100)
da2bc464 1689
1a6822d1 1690EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1b4d4a79 1691 SYSTEM_CALL 1
1a6822d1 1692EXC_VIRT_END(system_call, 0x4c00, 0x100)
d807ad37 1693
acd7d8ce
NP
1694#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1695 /*
1696 * This is a hcall, so register convention is as above, with these
1697 * differences:
1698 * r13 = PACA
76fc0cfc
NP
1699 * ctr = orig r13
1700 * orig r10 saved in PACA
acd7d8ce
NP
1701 */
1702TRAMP_KVM_BEGIN(do_kvm_0xc00)
1703 /*
1704 * Save the PPR (on systems that support it) before changing to
1705 * HMT_MEDIUM. That allows the KVM code to save that value into the
1706 * guest state (it is the guest's PPR value).
1707 */
76fc0cfc 1708 OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
acd7d8ce 1709 HMT_MEDIUM
76fc0cfc 1710 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
acd7d8ce 1711 mfctr r10
76fc0cfc 1712 SET_SCRATCH0(r10)
acd7d8ce
NP
1713 std r9,PACA_EXGEN+EX_R9(r13)
1714 mfcr r9
17bdc064 1715 KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0
acd7d8ce 1716#endif
da2bc464 1717
d807ad37 1718
1a6822d1
NP
1719EXC_REAL(single_step, 0xd00, 0x100)
1720EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
da2bc464 1721TRAMP_KVM(PACA_EXGEN, 0xd00)
bc6675c6 1722EXC_COMMON(single_step_common, 0xd00, single_step_exception)
b01c8b54 1723
1a6822d1 1724EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
da0e7e62 1725EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
f5c32c1d
NP
1726TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1727EXC_COMMON_BEGIN(h_data_storage_common)
1728 mfspr r10,SPRN_HDAR
1729 std r10,PACA_EXGEN+EX_DAR(r13)
1730 mfspr r10,SPRN_HDSISR
1731 stw r10,PACA_EXGEN+EX_DSISR(r13)
d064151f 1732 EXCEPTION_COMMON(PACA_EXGEN, 0xe00)
f5c32c1d
NP
1733 bl save_nvgprs
1734 RECONCILE_IRQ_STATE(r10, r11)
1735 addi r3,r1,STACK_FRAME_OVERHEAD
d7b45615
SJS
1736BEGIN_MMU_FTR_SECTION
1737 ld r4,PACA_EXGEN+EX_DAR(r13)
1738 lwz r5,PACA_EXGEN+EX_DSISR(r13)
1739 std r4,_DAR(r1)
1740 std r5,_DSISR(r1)
1741 li r5,SIGSEGV
1742 bl bad_page_fault
1743MMU_FTR_SECTION_ELSE
f5c32c1d 1744 bl unknown_exception
d7b45615 1745ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
f5c32c1d 1746 b ret_from_except
f5c32c1d 1747
1707dd16 1748
1a6822d1 1749EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
da0e7e62 1750EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
82517cab
NP
1751TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1752EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1753
1707dd16 1754
1a6822d1
NP
1755EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1756EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
031b4026
NP
1757TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1758EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1759
1707dd16 1760
e0319829
NP
1761/*
1762 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1763 * first, and then eventaully from there to the trampoline to get into virtual
1764 * mode.
1765 */
1a6822d1 1766__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
f14e953b 1767__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
1a6822d1 1768EXC_VIRT_NONE(0x4e60, 0x20)
62f9b03b
NP
1769TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1770TRAMP_REAL_BEGIN(hmi_exception_early)
fa4cf6b7 1771 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0
a4087a4d
NP
1772 mr r10,r1 /* Save r1 */
1773 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
62f9b03b 1774 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
62f9b03b 1775 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
a4087a4d
NP
1776 mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
1777 EXCEPTION_PROLOG_COMMON_1()
890274c2 1778 /* We don't touch AMR here, we never go to virtual mode */
62f9b03b
NP
1779 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1780 EXCEPTION_PROLOG_COMMON_3(0xe60)
1781 addi r3,r1,STACK_FRAME_OVERHEAD
505a314f 1782 BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
5080332c 1783 cmpdi cr0,r3,0
67d4160a 1784 bne 1f
5080332c 1785
62f9b03b
NP
1786 /* Windup the stack. */
1787 /* Move original HSRR0 and HSRR1 into the respective regs */
1788 ld r9,_MSR(r1)
1789 mtspr SPRN_HSRR1,r9
9592b29a
NP
1790 ld r9,_NIP(r1)
1791 mtspr SPRN_HSRR0,r9
62f9b03b
NP
1792 ld r9,_CTR(r1)
1793 mtctr r9
1794 ld r9,_XER(r1)
1795 mtxer r9
1796 ld r9,_LINK(r1)
1797 mtlr r9
1798 REST_GPR(0, r1)
1799 REST_8GPRS(2, r1)
1800 REST_GPR(10, r1)
1801 ld r11,_CCR(r1)
5080332c 1802 REST_2GPRS(12, r1)
62f9b03b
NP
1803 mtcr r11
1804 REST_GPR(11, r1)
5080332c 1805 ld r1,GPR1(r1)
222f20f1 1806 HRFI_TO_USER_OR_KERNEL
5080332c 1807
67d4160a
NP
18081:
1809 ld r9,_MSR(r1)
1810 mtspr SPRN_HSRR1,r9
1811 ld r9,_NIP(r1)
1812 mtspr SPRN_HSRR0,r9
1813 ld r9,_CTR(r1)
1814 mtctr r9
1815 ld r9,_XER(r1)
1816 mtxer r9
1817 ld r9,_LINK(r1)
1818 mtlr r9
1819 REST_GPR(0, r1)
1820 REST_8GPRS(2, r1)
1821 REST_GPR(10, r1)
1822 ld r11,_CCR(r1)
1823 REST_2GPRS(12, r1)
1824 mtcr r11
5080332c 1825 REST_GPR(11, r1)
62f9b03b
NP
1826 ld r1,GPR1(r1)
1827
1828 /*
1829 * Go to virtual mode and pull the HMI event information from
1830 * firmware.
1831 */
62f9b03b 1832 SET_SCRATCH0(r13)
5dba1d50 1833 EXCEPTION_PROLOG_0 PACA_EXGEN
62f9b03b
NP
1834 b tramp_real_hmi_exception
1835
5080332c 1836EXC_COMMON_BEGIN(hmi_exception_common)
47169fba
NP
1837 EXCEPTION_COMMON(PACA_EXGEN, 0xe60)
1838 FINISH_NAP
1839 bl save_nvgprs
1840 RECONCILE_IRQ_STATE(r10, r11)
1841 RUNLATCH_ON
c06075f3
NP
1842 addi r3,r1,STACK_FRAME_OVERHEAD
1843 bl handle_hmi_exception
1844 b ret_from_except
1707dd16 1845
f14e953b
MS
1846EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1847EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
9bcb81bf
NP
1848TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1849#ifdef CONFIG_PPC_DOORBELL
1850EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1851#else
1852EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1853#endif
1854
0ebc4cda 1855
f14e953b
MS
1856EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1857EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
74408776
NP
1858TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1859EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1860
9baaef0a 1861
1a6822d1
NP
1862EXC_REAL_NONE(0xec0, 0x20)
1863EXC_VIRT_NONE(0x4ec0, 0x20)
1864EXC_REAL_NONE(0xee0, 0x20)
1865EXC_VIRT_NONE(0x4ee0, 0x20)
bda7fea2 1866
0ebc4cda 1867
f442d004
MS
1868EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1869EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
b1c7f150
NP
1870TRAMP_KVM(PACA_EXGEN, 0xf00)
1871EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1872
0ebc4cda 1873
1a6822d1
NP
1874EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1875EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
d1a0ca9c
NP
1876TRAMP_KVM(PACA_EXGEN, 0xf20)
1877EXC_COMMON_BEGIN(altivec_unavailable_common)
d064151f 1878 EXCEPTION_COMMON(PACA_EXGEN, 0xf20)
d1a0ca9c
NP
1879#ifdef CONFIG_ALTIVEC
1880BEGIN_FTR_SECTION
1881 beq 1f
1882#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1883 BEGIN_FTR_SECTION_NESTED(69)
1884 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1885 * transaction), go do TM stuff
1886 */
1887 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1888 bne- 2f
1889 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1890#endif
1891 bl load_up_altivec
1892 b fast_exception_return
1893#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
18942: /* User process was in a transaction */
1895 bl save_nvgprs
1896 RECONCILE_IRQ_STATE(r10, r11)
1897 addi r3,r1,STACK_FRAME_OVERHEAD
1898 bl altivec_unavailable_tm
1899 b ret_from_except
1900#endif
19011:
1902END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1903#endif
1904 bl save_nvgprs
1905 RECONCILE_IRQ_STATE(r10, r11)
1906 addi r3,r1,STACK_FRAME_OVERHEAD
1907 bl altivec_unavailable_exception
1908 b ret_from_except
1909
0ebc4cda 1910
1a6822d1
NP
1911EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1912EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
792cbddd
NP
1913TRAMP_KVM(PACA_EXGEN, 0xf40)
1914EXC_COMMON_BEGIN(vsx_unavailable_common)
d064151f 1915 EXCEPTION_COMMON(PACA_EXGEN, 0xf40)
792cbddd
NP
1916#ifdef CONFIG_VSX
1917BEGIN_FTR_SECTION
1918 beq 1f
1919#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1920 BEGIN_FTR_SECTION_NESTED(69)
1921 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1922 * transaction), go do TM stuff
1923 */
1924 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1925 bne- 2f
1926 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1927#endif
1928 b load_up_vsx
1929#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
19302: /* User process was in a transaction */
1931 bl save_nvgprs
1932 RECONCILE_IRQ_STATE(r10, r11)
1933 addi r3,r1,STACK_FRAME_OVERHEAD
1934 bl vsx_unavailable_tm
1935 b ret_from_except
1936#endif
19371:
1938END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1939#endif
1940 bl save_nvgprs
1941 RECONCILE_IRQ_STATE(r10, r11)
1942 addi r3,r1,STACK_FRAME_OVERHEAD
1943 bl vsx_unavailable_exception
1944 b ret_from_except
1945
da2bc464 1946
1a6822d1
NP
1947EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1948EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1134713c
NP
1949TRAMP_KVM(PACA_EXGEN, 0xf60)
1950EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1951
da2bc464 1952
1a6822d1
NP
1953EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1954EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
14b0072c
NP
1955TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1956EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1957
da2bc464 1958
1a6822d1
NP
1959EXC_REAL_NONE(0xfa0, 0x20)
1960EXC_VIRT_NONE(0x4fa0, 0x20)
1961EXC_REAL_NONE(0xfc0, 0x20)
1962EXC_VIRT_NONE(0x4fc0, 0x20)
1963EXC_REAL_NONE(0xfe0, 0x20)
1964EXC_VIRT_NONE(0x4fe0, 0x20)
1965
1966EXC_REAL_NONE(0x1000, 0x100)
1967EXC_VIRT_NONE(0x5000, 0x100)
1968EXC_REAL_NONE(0x1100, 0x100)
1969EXC_VIRT_NONE(0x5100, 0x100)
d0c0c9a1 1970
0ebc4cda 1971#ifdef CONFIG_CBE_RAS
1a6822d1
NP
1972EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
1973EXC_VIRT_NONE(0x5200, 0x100)
da2bc464 1974TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
ff1b3206 1975EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
da2bc464 1976#else /* CONFIG_CBE_RAS */
1a6822d1
NP
1977EXC_REAL_NONE(0x1200, 0x100)
1978EXC_VIRT_NONE(0x5200, 0x100)
da2bc464 1979#endif
b01c8b54 1980
ff1b3206 1981
1a6822d1
NP
1982EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
1983EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
da2bc464 1984TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
4e96dbbf
NP
1985EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
1986
1a6822d1
NP
1987EXC_REAL_NONE(0x1400, 0x100)
1988EXC_VIRT_NONE(0x5400, 0x100)
da2bc464 1989
1a6822d1 1990EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
b92a66a6 1991 mtspr SPRN_SPRG_HSCRATCH0,r13
5dba1d50 1992 EXCEPTION_PROLOG_0 PACA_EXGEN
fa4cf6b7 1993 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0
b92a66a6
MN
1994
1995#ifdef CONFIG_PPC_DENORMALISATION
1996 mfspr r10,SPRN_HSRR1
afcf0095 1997 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
afcf0095
NP
1998 bne+ denorm_assist
1999#endif
1e9b4507 2000
a7c1ca19 2001 KVMTEST EXC_HV 0x1500
2d046308 2002 EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1
1a6822d1 2003EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
a74599a5 2004
d7e89849 2005#ifdef CONFIG_PPC_DENORMALISATION
1a6822d1 2006EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
d7e89849 2007 b exc_real_0x1500_denorm_exception_hv
1a6822d1 2008EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
d7e89849 2009#else
1a6822d1 2010EXC_VIRT_NONE(0x5500, 0x100)
afcf0095
NP
2011#endif
2012
4bb3c7a0 2013TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
b01c8b54 2014
b92a66a6 2015#ifdef CONFIG_PPC_DENORMALISATION
da2bc464 2016TRAMP_REAL_BEGIN(denorm_assist)
b92a66a6
MN
2017BEGIN_FTR_SECTION
2018/*
2019 * To denormalise we need to move a copy of the register to itself.
2020 * For POWER6 do that here for all FP regs.
2021 */
2022 mfmsr r10
2023 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2024 xori r10,r10,(MSR_FE0|MSR_FE1)
2025 mtmsrd r10
2026 sync
d7c67fb1 2027
f3c8b6c6
NP
2028 .Lreg=0
2029 .rept 32
2030 fmr .Lreg,.Lreg
2031 .Lreg=.Lreg+1
2032 .endr
d7c67fb1 2033
b92a66a6
MN
2034FTR_SECTION_ELSE
2035/*
2036 * To denormalise we need to move a copy of the register to itself.
2037 * For POWER7 do that here for the first 32 VSX registers only.
2038 */
2039 mfmsr r10
2040 oris r10,r10,MSR_VSX@h
2041 mtmsrd r10
2042 sync
d7c67fb1 2043
f3c8b6c6
NP
2044 .Lreg=0
2045 .rept 32
2046 XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2047 .Lreg=.Lreg+1
2048 .endr
d7c67fb1 2049
b92a66a6 2050ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
fb0fce3e
MN
2051
2052BEGIN_FTR_SECTION
2053 b denorm_done
2054END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2055/*
2056 * To denormalise we need to move a copy of the register to itself.
2057 * For POWER8 we need to do that for all 64 VSX registers
2058 */
f3c8b6c6
NP
2059 .Lreg=32
2060 .rept 32
2061 XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2062 .Lreg=.Lreg+1
2063 .endr
2064
fb0fce3e 2065denorm_done:
f14040bc
MN
2066 mfspr r11,SPRN_HSRR0
2067 subi r11,r11,4
b92a66a6
MN
2068 mtspr SPRN_HSRR0,r11
2069 mtcrf 0x80,r9
2070 ld r9,PACA_EXGEN+EX_R9(r13)
44e9309f 2071 RESTORE_PPR_PACA(PACA_EXGEN, r10)
630573c1
PM
2072BEGIN_FTR_SECTION
2073 ld r10,PACA_EXGEN+EX_CFAR(r13)
2074 mtspr SPRN_CFAR,r10
2075END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
b92a66a6
MN
2076 ld r10,PACA_EXGEN+EX_R10(r13)
2077 ld r11,PACA_EXGEN+EX_R11(r13)
2078 ld r12,PACA_EXGEN+EX_R12(r13)
2079 ld r13,PACA_EXGEN+EX_R13(r13)
222f20f1 2080 HRFI_TO_UNKNOWN
b92a66a6
MN
2081 b .
2082#endif
2083
872e2ae4 2084EXC_COMMON(denorm_common, 0x1500, unknown_exception)
d7e89849
NP
2085
2086
2087#ifdef CONFIG_CBE_RAS
1a6822d1
NP
2088EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
2089EXC_VIRT_NONE(0x5600, 0x100)
d7e89849 2090TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
69a79344 2091EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
d7e89849 2092#else /* CONFIG_CBE_RAS */
1a6822d1
NP
2093EXC_REAL_NONE(0x1600, 0x100)
2094EXC_VIRT_NONE(0x5600, 0x100)
d7e89849
NP
2095#endif
2096
69a79344 2097
1a6822d1
NP
2098EXC_REAL(altivec_assist, 0x1700, 0x100)
2099EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
d7e89849 2100TRAMP_KVM(PACA_EXGEN, 0x1700)
b51c079e
NP
2101#ifdef CONFIG_ALTIVEC
2102EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
2103#else
2104EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
2105#endif
2106
d7e89849
NP
2107
2108#ifdef CONFIG_CBE_RAS
1a6822d1
NP
2109EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
2110EXC_VIRT_NONE(0x5800, 0x100)
d7e89849 2111TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
3965f8ab 2112EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
d7e89849 2113#else /* CONFIG_CBE_RAS */
1a6822d1
NP
2114EXC_REAL_NONE(0x1800, 0x100)
2115EXC_VIRT_NONE(0x5800, 0x100)
d7e89849
NP
2116#endif
2117
75eb767e 2118#ifdef CONFIG_PPC_WATCHDOG
2104180a
NP
2119
2120#define MASKED_DEC_HANDLER_LABEL 3f
2121
2122#define MASKED_DEC_HANDLER(_H) \
21233: /* soft-nmi */ \
2124 std r12,PACA_EXGEN+EX_R12(r13); \
2125 GET_SCRATCH0(r10); \
2126 std r10,PACA_EXGEN+EX_R13(r13); \
2d046308 2127 EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1
2104180a 2128
cc491f1d
NP
2129/*
2130 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2131 * stack is one that is usable by maskable interrupts so long as MSR_EE
2132 * remains off. It is used for recovery when something has corrupted the
2133 * normal kernel stack, for example. The "soft NMI" must not use the process
2134 * stack because we want irq disabled sections to avoid touching the stack
2135 * at all (other than PMU interrupts), so use the emergency stack for this,
2136 * and run it entirely with interrupts hard disabled.
2137 */
2104180a
NP
2138EXC_COMMON_BEGIN(soft_nmi_common)
2139 mr r10,r1
2140 ld r1,PACAEMERGSP(r13)
2104180a 2141 subi r1,r1,INT_FRAME_SIZE
47169fba
NP
2142 EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900)
2143 bl save_nvgprs
2144 RECONCILE_IRQ_STATE(r10, r11)
c06075f3
NP
2145 addi r3,r1,STACK_FRAME_OVERHEAD
2146 bl soft_nmi_interrupt
2104180a
NP
2147 b ret_from_except
2148
75eb767e 2149#else /* CONFIG_PPC_WATCHDOG */
2104180a
NP
2150#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
2151#define MASKED_DEC_HANDLER(_H)
75eb767e 2152#endif /* CONFIG_PPC_WATCHDOG */
d7e89849 2153
0ebc4cda 2154/*
fe9e1d54
IM
2155 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2156 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2157 * - If it was a doorbell we return immediately since doorbells are edge
2158 * triggered and won't automatically refire.
0869b6fd
MS
2159 * - If it was a HMI we return immediately since we handled it in realmode
2160 * and it won't refire.
6cc3f91b 2161 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
fe9e1d54 2162 * This is called with r10 containing the value to OR to the paca field.
0ebc4cda 2163 */
4508a74a
NP
2164.macro MASKED_INTERRUPT hsrr
2165 .if \hsrr
2166masked_Hinterrupt:
2167 .else
2168masked_interrupt:
2169 .endif
2170 std r11,PACA_EXGEN+EX_R11(r13)
2171 lbz r11,PACAIRQHAPPENED(r13)
2172 or r11,r11,r10
2173 stb r11,PACAIRQHAPPENED(r13)
2174 cmpwi r10,PACA_IRQ_DEC
2175 bne 1f
2176 lis r10,0x7fff
2177 ori r10,r10,0xffff
2178 mtspr SPRN_DEC,r10
2179 b MASKED_DEC_HANDLER_LABEL
21801: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
2181 beq 2f
2182 .if \hsrr
2183 mfspr r10,SPRN_HSRR1
2184 xori r10,r10,MSR_EE /* clear MSR_EE */
2185 mtspr SPRN_HSRR1,r10
2186 .else
2187 mfspr r10,SPRN_SRR1
2188 xori r10,r10,MSR_EE /* clear MSR_EE */
2189 mtspr SPRN_SRR1,r10
2190 .endif
2191 ori r11,r11,PACA_IRQ_HARD_DIS
2192 stb r11,PACAIRQHAPPENED(r13)
21932: /* done */
2194 mtcrf 0x80,r9
2195 std r1,PACAR1(r13)
2196 ld r9,PACA_EXGEN+EX_R9(r13)
2197 ld r10,PACA_EXGEN+EX_R10(r13)
2198 ld r11,PACA_EXGEN+EX_R11(r13)
2199 /* returns to kernel where r13 must be set up, so don't restore it */
2200 .if \hsrr
2201 HRFI_TO_KERNEL
2202 .else
2203 RFI_TO_KERNEL
2204 .endif
2205 b .
2206 MASKED_DEC_HANDLER(\hsrr\())
2207.endm
57f26649 2208
a048a07d
NP
2209TRAMP_REAL_BEGIN(stf_barrier_fallback)
2210 std r9,PACA_EXRFI+EX_R9(r13)
2211 std r10,PACA_EXRFI+EX_R10(r13)
2212 sync
2213 ld r9,PACA_EXRFI+EX_R9(r13)
2214 ld r10,PACA_EXRFI+EX_R10(r13)
2215 ori 31,31,0
2216 .rept 14
2217 b 1f
22181:
2219 .endr
2220 blr
2221
aa8a5e00
ME
2222TRAMP_REAL_BEGIN(rfi_flush_fallback)
2223 SET_SCRATCH0(r13);
2224 GET_PACA(r13);
78ee9946
ME
2225 std r1,PACA_EXRFI+EX_R12(r13)
2226 ld r1,PACAKSAVE(r13)
aa8a5e00
ME
2227 std r9,PACA_EXRFI+EX_R9(r13)
2228 std r10,PACA_EXRFI+EX_R10(r13)
2229 std r11,PACA_EXRFI+EX_R11(r13)
aa8a5e00
ME
2230 mfctr r9
2231 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
bdcb1aef
NP
2232 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2233 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
aa8a5e00 2234 mtctr r11
15a3204d 2235 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
aa8a5e00
ME
2236
2237 /* order ld/st prior to dcbt stop all streams with flushing */
2238 sync
bdcb1aef
NP
2239
2240 /*
2241 * The load adresses are at staggered offsets within cachelines,
2242 * which suits some pipelines better (on others it should not
2243 * hurt).
2244 */
22451:
2246 ld r11,(0x80 + 8)*0(r10)
2247 ld r11,(0x80 + 8)*1(r10)
2248 ld r11,(0x80 + 8)*2(r10)
2249 ld r11,(0x80 + 8)*3(r10)
2250 ld r11,(0x80 + 8)*4(r10)
2251 ld r11,(0x80 + 8)*5(r10)
2252 ld r11,(0x80 + 8)*6(r10)
2253 ld r11,(0x80 + 8)*7(r10)
2254 addi r10,r10,0x80*8
aa8a5e00
ME
2255 bdnz 1b
2256
2257 mtctr r9
2258 ld r9,PACA_EXRFI+EX_R9(r13)
2259 ld r10,PACA_EXRFI+EX_R10(r13)
2260 ld r11,PACA_EXRFI+EX_R11(r13)
78ee9946 2261 ld r1,PACA_EXRFI+EX_R12(r13)
aa8a5e00
ME
2262 GET_SCRATCH0(r13);
2263 rfid
2264
2265TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2266 SET_SCRATCH0(r13);
2267 GET_PACA(r13);
78ee9946
ME
2268 std r1,PACA_EXRFI+EX_R12(r13)
2269 ld r1,PACAKSAVE(r13)
aa8a5e00
ME
2270 std r9,PACA_EXRFI+EX_R9(r13)
2271 std r10,PACA_EXRFI+EX_R10(r13)
2272 std r11,PACA_EXRFI+EX_R11(r13)
aa8a5e00
ME
2273 mfctr r9
2274 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
bdcb1aef
NP
2275 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2276 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
aa8a5e00 2277 mtctr r11
15a3204d 2278 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
aa8a5e00
ME
2279
2280 /* order ld/st prior to dcbt stop all streams with flushing */
2281 sync
bdcb1aef
NP
2282
2283 /*
2284 * The load adresses are at staggered offsets within cachelines,
2285 * which suits some pipelines better (on others it should not
2286 * hurt).
2287 */
22881:
2289 ld r11,(0x80 + 8)*0(r10)
2290 ld r11,(0x80 + 8)*1(r10)
2291 ld r11,(0x80 + 8)*2(r10)
2292 ld r11,(0x80 + 8)*3(r10)
2293 ld r11,(0x80 + 8)*4(r10)
2294 ld r11,(0x80 + 8)*5(r10)
2295 ld r11,(0x80 + 8)*6(r10)
2296 ld r11,(0x80 + 8)*7(r10)
2297 addi r10,r10,0x80*8
aa8a5e00
ME
2298 bdnz 1b
2299
2300 mtctr r9
2301 ld r9,PACA_EXRFI+EX_R9(r13)
2302 ld r10,PACA_EXRFI+EX_R10(r13)
2303 ld r11,PACA_EXRFI+EX_R11(r13)
78ee9946 2304 ld r1,PACA_EXRFI+EX_R12(r13)
aa8a5e00
ME
2305 GET_SCRATCH0(r13);
2306 hrfid
2307
57f26649
NP
2308/*
2309 * Real mode exceptions actually use this too, but alternate
2310 * instruction code patches (which end up in the common .text area)
2311 * cannot reach these if they are put there.
2312 */
2313USE_FIXED_SECTION(virt_trampolines)
4508a74a
NP
2314 MASKED_INTERRUPT EXC_STD
2315 MASKED_INTERRUPT EXC_HV
0ebc4cda 2316
4f6c11db 2317#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
da2bc464 2318TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
4f6c11db
PM
2319 /*
2320 * Here all GPRs are unchanged from when the interrupt happened
2321 * except for r13, which is saved in SPRG_SCRATCH0.
2322 */
2323 mfspr r13, SPRN_SRR0
2324 addi r13, r13, 4
2325 mtspr SPRN_SRR0, r13
2326 GET_SCRATCH0(r13)
222f20f1 2327 RFI_TO_KERNEL
4f6c11db
PM
2328 b .
2329
da2bc464 2330TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
4f6c11db
PM
2331 /*
2332 * Here all GPRs are unchanged from when the interrupt happened
2333 * except for r13, which is saved in SPRG_SCRATCH0.
2334 */
2335 mfspr r13, SPRN_HSRR0
2336 addi r13, r13, 4
2337 mtspr SPRN_HSRR0, r13
2338 GET_SCRATCH0(r13)
222f20f1 2339 HRFI_TO_KERNEL
4f6c11db
PM
2340 b .
2341#endif
2342
0ebc4cda 2343/*
057b6d7e
HB
2344 * Ensure that any handlers that get invoked from the exception prologs
2345 * above are below the first 64KB (0x10000) of the kernel image because
2346 * the prologs assemble the addresses of these handlers using the
2347 * LOAD_HANDLER macro, which uses an ori instruction.
0ebc4cda
BH
2348 */
2349
2350/*** Common interrupt handlers ***/
2351
0ebc4cda 2352
c1fb6816
MN
2353 /*
2354 * Relocation-on interrupts: A subset of the interrupts can be delivered
2355 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
2356 * it. Addresses are the same as the original interrupt addresses, but
2357 * offset by 0xc000000000004000.
2358 * It's impossible to receive interrupts below 0x300 via this mechanism.
2359 * KVM: None of these traps are from the guest ; anything that escalated
2360 * to HV=1 from HV=0 is delivered via real mode handlers.
2361 */
2362
2363 /*
2364 * This uses the standard macro, since the original 0x300 vector
2365 * only has extra guff for STAB-based processors -- which never
2366 * come here.
2367 */
da2bc464 2368
57f26649 2369EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
b1576fec 2370 b __ppc64_runlatch_on
fe1952fc 2371
57f26649 2372USE_FIXED_SECTION(virt_trampolines)
8ed8ab40
HB
2373 /*
2374 * The __end_interrupts marker must be past the out-of-line (OOL)
2375 * handlers, so that they are copied to real address 0x100 when running
2376 * a relocatable kernel. This ensures they can be reached from the short
2377 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
2378 * directly, without using LOAD_HANDLER().
2379 */
2380 .align 7
2381 .globl __end_interrupts
2382__end_interrupts:
57f26649 2383DEFINE_FIXED_SYMBOL(__end_interrupts)
61383407 2384
087aa036 2385#ifdef CONFIG_PPC_970_NAP
7c8cb4b5 2386EXC_COMMON_BEGIN(power4_fixup_nap)
087aa036
CG
2387 andc r9,r9,r10
2388 std r9,TI_LOCAL_FLAGS(r11)
2389 ld r10,_LINK(r1) /* make idle task do the */
2390 std r10,_NIP(r1) /* equivalent of a blr */
2391 blr
2392#endif
2393
57f26649
NP
2394CLOSE_FIXED_SECTION(real_vectors);
2395CLOSE_FIXED_SECTION(real_trampolines);
2396CLOSE_FIXED_SECTION(virt_vectors);
2397CLOSE_FIXED_SECTION(virt_trampolines);
2398
2399USE_TEXT_SECTION()
2400
0ebc4cda
BH
2401/*
2402 * Hash table stuff
2403 */
f4329f2e 2404 .balign IFETCH_ALIGN_BYTES
6a3bab90 2405do_hash_page:
4e003747 2406#ifdef CONFIG_PPC_BOOK3S_64
e6c2a479 2407 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
398a719d
BH
2408 ori r0,r0,DSISR_BAD_FAULT_64S@l
2409 and. r0,r4,r0 /* weird error? */
0ebc4cda 2410 bne- handle_page_fault /* if not, try to insert a HPTE */
c911d2e1 2411 ld r11, PACA_THREAD_INFO(r13)
9c1e1052
PM
2412 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
2413 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
2414 bne 77f /* then don't call hash_page now */
0ebc4cda
BH
2415
2416 /*
2417 * r3 contains the faulting address
106713a1 2418 * r4 msr
0ebc4cda 2419 * r5 contains the trap number
aefa5688 2420 * r6 contains dsisr
0ebc4cda 2421 *
7230c564 2422 * at return r3 = 0 for success, 1 for page fault, negative for error
0ebc4cda 2423 */
106713a1 2424 mr r4,r12
aefa5688 2425 ld r6,_DSISR(r1)
106713a1
AK
2426 bl __hash_page /* build HPTE if possible */
2427 cmpdi r3,0 /* see if __hash_page succeeded */
0ebc4cda 2428
7230c564 2429 /* Success */
0ebc4cda 2430 beq fast_exc_return_irq /* Return from exception on success */
0ebc4cda 2431
7230c564
BH
2432 /* Error */
2433 blt- 13f
d89ba535
NR
2434
2435 /* Reload DSISR into r4 for the DABR check below */
2436 ld r4,_DSISR(r1)
4e003747 2437#endif /* CONFIG_PPC_BOOK3S_64 */
9c7cc234 2438
0ebc4cda
BH
2439/* Here we have a page fault that hash_page can't handle. */
2440handle_page_fault:
d89ba535
NR
244111: andis. r0,r4,DSISR_DABRMATCH@h
2442 bne- handle_dabr_fault
2443 ld r4,_DAR(r1)
0ebc4cda
BH
2444 ld r5,_DSISR(r1)
2445 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 2446 bl do_page_fault
0ebc4cda 2447 cmpdi r3,0
f474c28f 2448 beq+ ret_from_except_lite
b1576fec 2449 bl save_nvgprs
0ebc4cda
BH
2450 mr r5,r3
2451 addi r3,r1,STACK_FRAME_OVERHEAD
2452 lwz r4,_DAR(r1)
b1576fec
AB
2453 bl bad_page_fault
2454 b ret_from_except
0ebc4cda 2455
a546498f
BH
2456/* We have a data breakpoint exception - handle it */
2457handle_dabr_fault:
b1576fec 2458 bl save_nvgprs
a546498f
BH
2459 ld r4,_DAR(r1)
2460 ld r5,_DSISR(r1)
2461 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 2462 bl do_break
f474c28f
RB
2463 /*
2464 * do_break() may have changed the NV GPRS while handling a breakpoint.
2465 * If so, we need to restore them with their updated values. Don't use
2466 * ret_from_except_lite here.
2467 */
2468 b ret_from_except
a546498f 2469
0ebc4cda 2470
4e003747 2471#ifdef CONFIG_PPC_BOOK3S_64
0ebc4cda
BH
2472/* We have a page fault that hash_page could handle but HV refused
2473 * the PTE insertion
2474 */
b1576fec 247513: bl save_nvgprs
0ebc4cda
BH
2476 mr r5,r3
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478 ld r4,_DAR(r1)
b1576fec
AB
2479 bl low_hash_fault
2480 b ret_from_except
caca285e 2481#endif
0ebc4cda 2482
9c1e1052
PM
2483/*
2484 * We come here as a result of a DSI at a point where we don't want
2485 * to call hash_page, such as when we are accessing memory (possibly
2486 * user memory) inside a PMU interrupt that occurred while interrupts
2487 * were soft-disabled. We want to invoke the exception handler for
2488 * the access, or panic if there isn't a handler.
2489 */
b1576fec 249077: bl save_nvgprs
9c1e1052
PM
2491 mr r4,r3
2492 addi r3,r1,STACK_FRAME_OVERHEAD
2493 li r5,SIGSEGV
b1576fec
AB
2494 bl bad_page_fault
2495 b ret_from_except
4e2bf01b
ME
2496
2497/*
2498 * Here we have detected that the kernel stack pointer is bad.
2499 * R9 contains the saved CR, r13 points to the paca,
2500 * r10 contains the (bad) kernel stack pointer,
2501 * r11 and r12 contain the saved SRR0 and SRR1.
2502 * We switch to using an emergency stack, save the registers there,
2503 * and call kernel_bad_stack(), which panics.
2504 */
2505bad_stack:
2506 ld r1,PACAEMERGSP(r13)
2507 subi r1,r1,64+INT_FRAME_SIZE
2508 std r9,_CCR(r1)
2509 std r10,GPR1(r1)
2510 std r11,_NIP(r1)
2511 std r12,_MSR(r1)
2512 mfspr r11,SPRN_DAR
2513 mfspr r12,SPRN_DSISR
2514 std r11,_DAR(r1)
2515 std r12,_DSISR(r1)
2516 mflr r10
2517 mfctr r11
2518 mfxer r12
2519 std r10,_LINK(r1)
2520 std r11,_CTR(r1)
2521 std r12,_XER(r1)
2522 SAVE_GPR(0,r1)
2523 SAVE_GPR(2,r1)
2524 ld r10,EX_R3(r3)
2525 std r10,GPR3(r1)
2526 SAVE_GPR(4,r1)
2527 SAVE_4GPRS(5,r1)
2528 ld r9,EX_R9(r3)
2529 ld r10,EX_R10(r3)
2530 SAVE_2GPRS(9,r1)
2531 ld r9,EX_R11(r3)
2532 ld r10,EX_R12(r3)
2533 ld r11,EX_R13(r3)
2534 std r9,GPR11(r1)
2535 std r10,GPR12(r1)
2536 std r11,GPR13(r1)
2537BEGIN_FTR_SECTION
2538 ld r10,EX_CFAR(r3)
2539 std r10,ORIG_GPR3(r1)
2540END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2541 SAVE_8GPRS(14,r1)
2542 SAVE_10GPRS(22,r1)
2543 lhz r12,PACA_TRAP_SAVE(r13)
2544 std r12,_TRAP(r1)
2545 addi r11,r1,INT_FRAME_SIZE
2546 std r11,0(r1)
2547 li r12,0
2548 std r12,0(r11)
2549 ld r2,PACATOC(r13)
2550 ld r11,exception_marker@toc(r2)
2551 std r12,RESULT(r1)
2552 std r11,STACK_FRAME_OVERHEAD-16(r1)
25531: addi r3,r1,STACK_FRAME_OVERHEAD
2554 bl kernel_bad_stack
2555 b 1b
15770a13 2556_ASM_NOKPROBE_SYMBOL(bad_stack);
0f0c6ca1 2557
a9af97aa
NP
2558/*
2559 * When doorbell is triggered from system reset wakeup, the message is
2560 * not cleared, so it would fire again when EE is enabled.
2561 *
2562 * When coming from local_irq_enable, there may be the same problem if
2563 * we were hard disabled.
2564 *
2565 * Execute msgclr to clear pending exceptions before handling it.
2566 */
2567h_doorbell_common_msgclr:
2568 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2569 PPC_MSGCLR(3)
2570 b h_doorbell_common
2571
2572doorbell_super_common_msgclr:
2573 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2574 PPC_MSGCLRP(3)
2575 b doorbell_super_common
2576
0f0c6ca1
NP
2577/*
2578 * Called from arch_local_irq_enable when an interrupt needs
2579 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
2580 * which kind of interrupt. MSR:EE is already off. We generate a
2581 * stackframe like if a real interrupt had happened.
2582 *
2583 * Note: While MSR:EE is off, we need to make sure that _MSR
2584 * in the generated frame has EE set to 1 or the exception
2585 * handler will not properly re-enable them.
b48bbb82
NP
2586 *
2587 * Note that we don't specify LR as the NIP (return address) for
2588 * the interrupt because that would unbalance the return branch
2589 * predictor.
0f0c6ca1
NP
2590 */
2591_GLOBAL(__replay_interrupt)
2592 /* We are going to jump to the exception common code which
2593 * will retrieve various register values from the PACA which
2594 * we don't give a damn about, so we don't bother storing them.
2595 */
2596 mfmsr r12
3e23a12b 2597 LOAD_REG_ADDR(r11, replay_interrupt_return)
0f0c6ca1
NP
2598 mfcr r9
2599 ori r12,r12,MSR_EE
2600 cmpwi r3,0x900
2601 beq decrementer_common
2602 cmpwi r3,0x500
e6c1203d
NP
2603BEGIN_FTR_SECTION
2604 beq h_virt_irq_common
2605FTR_SECTION_ELSE
0f0c6ca1 2606 beq hardware_interrupt_common
e6c1203d 2607ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
f442d004
MS
2608 cmpwi r3,0xf00
2609 beq performance_monitor_common
0f0c6ca1 2610BEGIN_FTR_SECTION
d6f73fc6 2611 cmpwi r3,0xa00
a9af97aa 2612 beq h_doorbell_common_msgclr
0f0c6ca1
NP
2613 cmpwi r3,0xe60
2614 beq hmi_exception_common
2615FTR_SECTION_ELSE
2616 cmpwi r3,0xa00
a9af97aa 2617 beq doorbell_super_common_msgclr
0f0c6ca1 2618ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
3e23a12b 2619replay_interrupt_return:
0f0c6ca1 2620 blr
b48bbb82 2621
15770a13 2622_ASM_NOKPROBE_SYMBOL(__replay_interrupt)