powerpc/64s/exception: move head-64.h code to exception-64s.S where it is used
[linux-2.6-block.git] / arch / powerpc / kernel / exceptions-64s.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0ebc4cda
BH
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
25985edc 9 * position dependent assembly.
0ebc4cda
BH
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
7230c564 16#include <asm/hw_irq.h>
8aa34ab8 17#include <asm/exception-64s.h>
46f52210 18#include <asm/ptrace.h>
7cba160a 19#include <asm/cpuidle.h>
da2bc464 20#include <asm/head-64.h>
2c86cd18 21#include <asm/feature-fixups.h>
890274c2 22#include <asm/kup.h>
8aa34ab8 23
12a04809
NP
24/*
25 * We're short on space and time in the exception prolog, so we can't
26 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
27 * Instead we get the base of the kernel from paca->kernelbase and or in the low
28 * part of label. This requires that the label be within 64KB of kernelbase, and
29 * that kernelbase be 64K aligned.
30 */
31#define LOAD_HANDLER(reg, label) \
32 ld reg,PACAKBASE(r13); /* get high part of &label */ \
33 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
34
35#define __LOAD_HANDLER(reg, label) \
36 ld reg,PACAKBASE(r13); \
37 ori reg,reg,(ABS_ADDR(label))@l
38
39/*
40 * Branches from unrelocated code (e.g., interrupts) to labels outside
41 * head-y require >64K offsets.
42 */
43#define __LOAD_FAR_HANDLER(reg, label) \
44 ld reg,PACAKBASE(r13); \
45 ori reg,reg,(ABS_ADDR(label))@l; \
46 addis reg,reg,(ABS_ADDR(label))@h
47
48/* Exception register prefixes */
49#define EXC_HV 1
50#define EXC_STD 0
51
52#if defined(CONFIG_RELOCATABLE)
53/*
54 * If we support interrupts with relocation on AND we're a relocatable kernel,
55 * we need to use CTR to get to the 2nd level handler. So, save/restore it
56 * when required.
57 */
58#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
59#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
60#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
61#else
62/* ...else CTR is unused and in register. */
63#define SAVE_CTR(reg, area)
64#define GET_CTR(reg, area) mfctr reg
65#define RESTORE_CTR(reg, area)
66#endif
67
68/*
69 * PPR save/restore macros used in exceptions-64s.S
70 * Used for P7 or later processors
71 */
72#define SAVE_PPR(area, ra) \
73BEGIN_FTR_SECTION_NESTED(940) \
74 ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
75 std ra,_PPR(r1); \
76END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
77
78#define RESTORE_PPR_PACA(area, ra) \
79BEGIN_FTR_SECTION_NESTED(941) \
80 ld ra,area+EX_PPR(r13); \
81 mtspr SPRN_PPR,ra; \
82END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
83
84/*
85 * Get an SPR into a register if the CPU has the given feature
86 */
87#define OPT_GET_SPR(ra, spr, ftr) \
88BEGIN_FTR_SECTION_NESTED(943) \
89 mfspr ra,spr; \
90END_FTR_SECTION_NESTED(ftr,ftr,943)
91
92/*
93 * Set an SPR from a register if the CPU has the given feature
94 */
95#define OPT_SET_SPR(ra, spr, ftr) \
96BEGIN_FTR_SECTION_NESTED(943) \
97 mtspr spr,ra; \
98END_FTR_SECTION_NESTED(ftr,ftr,943)
99
100/*
101 * Save a register to the PACA if the CPU has the given feature
102 */
103#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
104BEGIN_FTR_SECTION_NESTED(943) \
105 std ra,offset(r13); \
106END_FTR_SECTION_NESTED(ftr,ftr,943)
107
108.macro EXCEPTION_PROLOG_0 area
109 GET_PACA(r13)
110 std r9,\area\()+EX_R9(r13) /* save r9 */
111 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
112 HMT_MEDIUM
113 std r10,\area\()+EX_R10(r13) /* save r10 - r12 */
114 OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
115.endm
116
117.macro EXCEPTION_PROLOG_1 hsrr, area, kvm, vec, bitmask
118 OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
119 OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
120 INTERRUPT_TO_KERNEL
121 SAVE_CTR(r10, \area\())
122 mfcr r9
123 .if \kvm
124 KVMTEST \hsrr \vec
125 .endif
126 .if \bitmask
127 lbz r10,PACAIRQSOFTMASK(r13)
128 andi. r10,r10,\bitmask
129 /* Associate vector numbers with bits in paca->irq_happened */
130 .if \vec == 0x500 || \vec == 0xea0
131 li r10,PACA_IRQ_EE
132 .elseif \vec == 0x900
133 li r10,PACA_IRQ_DEC
134 .elseif \vec == 0xa00 || \vec == 0xe80
135 li r10,PACA_IRQ_DBELL
136 .elseif \vec == 0xe60
137 li r10,PACA_IRQ_HMI
138 .elseif \vec == 0xf00
139 li r10,PACA_IRQ_PMI
140 .else
141 .abort "Bad maskable vector"
142 .endif
143
144 .if \hsrr
145 bne masked_Hinterrupt
146 .else
147 bne masked_interrupt
148 .endif
149 .endif
150
151 std r11,\area\()+EX_R11(r13)
152 std r12,\area\()+EX_R12(r13)
153 GET_SCRATCH0(r10)
154 std r10,\area\()+EX_R13(r13)
155.endm
156
157.macro EXCEPTION_PROLOG_2_REAL label, hsrr, set_ri
158 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
159 .if ! \set_ri
160 xori r10,r10,MSR_RI /* Clear MSR_RI */
161 .endif
162 .if \hsrr
163 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
164 .else
165 mfspr r11,SPRN_SRR0 /* save SRR0 */
166 .endif
167 LOAD_HANDLER(r12, \label\())
168 .if \hsrr
169 mtspr SPRN_HSRR0,r12
170 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
171 mtspr SPRN_HSRR1,r10
172 HRFI_TO_KERNEL
173 .else
174 mtspr SPRN_SRR0,r12
175 mfspr r12,SPRN_SRR1 /* and SRR1 */
176 mtspr SPRN_SRR1,r10
177 RFI_TO_KERNEL
178 .endif
179 b . /* prevent speculative execution */
180.endm
181
182.macro EXCEPTION_PROLOG_2_VIRT label, hsrr
183#ifdef CONFIG_RELOCATABLE
184 .if \hsrr
185 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
186 .else
187 mfspr r11,SPRN_SRR0 /* save SRR0 */
188 .endif
189 LOAD_HANDLER(r12, \label\())
190 mtctr r12
191 .if \hsrr
192 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
193 .else
194 mfspr r12,SPRN_SRR1 /* and HSRR1 */
195 .endif
196 li r10,MSR_RI
197 mtmsrd r10,1 /* Set RI (EE=0) */
198 bctr
199#else
200 .if \hsrr
201 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
202 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
203 .else
204 mfspr r11,SPRN_SRR0 /* save SRR0 */
205 mfspr r12,SPRN_SRR1 /* and SRR1 */
206 .endif
207 li r10,MSR_RI
208 mtmsrd r10,1 /* Set RI (EE=0) */
209 b \label
210#endif
211.endm
212
213/*
214 * Branch to label using its 0xC000 address. This results in instruction
215 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
216 * on using mtmsr rather than rfid.
217 *
218 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
219 * load KBASE for a slight optimisation.
220 */
221#define BRANCH_TO_C000(reg, label) \
222 __LOAD_HANDLER(reg, label); \
223 mtctr reg; \
224 bctr
225
226#ifdef CONFIG_RELOCATABLE
227#define BRANCH_TO_COMMON(reg, label) \
228 __LOAD_HANDLER(reg, label); \
229 mtctr reg; \
230 bctr
231
232#define BRANCH_LINK_TO_FAR(label) \
233 __LOAD_FAR_HANDLER(r12, label); \
234 mtctr r12; \
235 bctrl
236
237#else
238#define BRANCH_TO_COMMON(reg, label) \
239 b label
240
241#define BRANCH_LINK_TO_FAR(label) \
242 bl label
243#endif
244
245#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
246
247#ifdef CONFIG_RELOCATABLE
248/*
249 * KVM requires __LOAD_FAR_HANDLER.
250 *
251 * __BRANCH_TO_KVM_EXIT branches are also a special case because they
252 * explicitly use r9 then reload it from PACA before branching. Hence
253 * the double-underscore.
254 */
255#define __BRANCH_TO_KVM_EXIT(area, label) \
256 mfctr r9; \
257 std r9,HSTATE_SCRATCH1(r13); \
258 __LOAD_FAR_HANDLER(r9, label); \
259 mtctr r9; \
260 ld r9,area+EX_R9(r13); \
261 bctr
262
263#else
264#define __BRANCH_TO_KVM_EXIT(area, label) \
265 ld r9,area+EX_R9(r13); \
266 b label
267#endif
268
269#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
270/*
271 * If hv is possible, interrupts come into to the hv version
272 * of the kvmppc_interrupt code, which then jumps to the PR handler,
273 * kvmppc_interrupt_pr, if the guest is a PR guest.
274 */
275#define kvmppc_interrupt kvmppc_interrupt_hv
276#else
277#define kvmppc_interrupt kvmppc_interrupt_pr
278#endif
279
280.macro KVMTEST hsrr, n
281 lbz r10,HSTATE_IN_GUEST(r13)
282 cmpwi r10,0
283 .if \hsrr
284 bne do_kvm_H\n
285 .else
286 bne do_kvm_\n
287 .endif
288.endm
289
290.macro KVM_HANDLER area, hsrr, n, skip
291 .if \skip
292 cmpwi r10,KVM_GUEST_MODE_SKIP
293 beq 89f
294 .else
295 BEGIN_FTR_SECTION_NESTED(947)
296 ld r10,\area+EX_CFAR(r13)
297 std r10,HSTATE_CFAR(r13)
298 END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
299 .endif
300
301 BEGIN_FTR_SECTION_NESTED(948)
302 ld r10,\area+EX_PPR(r13)
303 std r10,HSTATE_PPR(r13)
304 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
305 ld r10,\area+EX_R10(r13)
306 std r12,HSTATE_SCRATCH0(r13)
307 sldi r12,r9,32
308 /* HSRR variants have the 0x2 bit added to their trap number */
309 .if \hsrr
310 ori r12,r12,(\n + 0x2)
311 .else
312 ori r12,r12,(\n)
313 .endif
314 /* This reloads r9 before branching to kvmppc_interrupt */
315 __BRANCH_TO_KVM_EXIT(\area, kvmppc_interrupt)
316
317 .if \skip
31889: mtocrf 0x80,r9
319 ld r9,\area+EX_R9(r13)
320 ld r10,\area+EX_R10(r13)
321 .if \hsrr
322 b kvmppc_skip_Hinterrupt
323 .else
324 b kvmppc_skip_interrupt
325 .endif
326 .endif
327.endm
328
329#else
330.macro KVMTEST hsrr, n
331.endm
332.macro KVM_HANDLER area, hsrr, n, skip
333.endm
334#endif
335
336#define EXCEPTION_PROLOG_COMMON_1() \
337 std r9,_CCR(r1); /* save CR in stackframe */ \
338 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
339 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
340 std r10,0(r1); /* make stack chain pointer */ \
341 std r0,GPR0(r1); /* save r0 in stackframe */ \
342 std r10,GPR1(r1); /* save r1 in stackframe */ \
343
344
345/*
346 * The common exception prolog is used for all except a few exceptions
347 * such as a segment miss on a kernel address. We have to be prepared
348 * to take another exception from the point where we first touch the
349 * kernel stack onwards.
350 *
351 * On entry r13 points to the paca, r9-r13 are saved in the paca,
352 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
353 * SRR1, and relocation is on.
354 */
355#define EXCEPTION_PROLOG_COMMON(n, area) \
356 andi. r10,r12,MSR_PR; /* See if coming from user */ \
357 mr r10,r1; /* Save r1 */ \
358 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
359 beq- 1f; \
360 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
3611: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
362 blt+ cr1,3f; /* abort if it is */ \
363 li r1,(n); /* will be reloaded later */ \
364 sth r1,PACA_TRAP_SAVE(r13); \
365 std r3,area+EX_R3(r13); \
366 addi r3,r13,area; /* r3 -> where regs are saved*/ \
367 RESTORE_CTR(r1, area); \
368 b bad_stack; \
3693: EXCEPTION_PROLOG_COMMON_1(); \
370 kuap_save_amr_and_lock r9, r10, cr1, cr0; \
371 beq 4f; /* if from kernel mode */ \
372 ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
373 SAVE_PPR(area, r9); \
3744: EXCEPTION_PROLOG_COMMON_2(area) \
375 EXCEPTION_PROLOG_COMMON_3(n) \
376 ACCOUNT_STOLEN_TIME
377
378/* Save original regs values from save area to stack frame. */
379#define EXCEPTION_PROLOG_COMMON_2(area) \
380 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
381 ld r10,area+EX_R10(r13); \
382 std r9,GPR9(r1); \
383 std r10,GPR10(r1); \
384 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
385 ld r10,area+EX_R12(r13); \
386 ld r11,area+EX_R13(r13); \
387 std r9,GPR11(r1); \
388 std r10,GPR12(r1); \
389 std r11,GPR13(r1); \
390 BEGIN_FTR_SECTION_NESTED(66); \
391 ld r10,area+EX_CFAR(r13); \
392 std r10,ORIG_GPR3(r1); \
393 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
394 GET_CTR(r10, area); \
395 std r10,_CTR(r1);
396
397#define EXCEPTION_PROLOG_COMMON_3(n) \
398 std r2,GPR2(r1); /* save r2 in stackframe */ \
399 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
400 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
401 mflr r9; /* Get LR, later save to stack */ \
402 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
403 std r9,_LINK(r1); \
404 lbz r10,PACAIRQSOFTMASK(r13); \
405 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
406 std r10,SOFTE(r1); \
407 std r11,_XER(r1); \
408 li r9,(n)+1; \
409 std r9,_TRAP(r1); /* set trap number */ \
410 li r10,0; \
411 ld r11,exception_marker@toc(r2); \
412 std r10,RESULT(r1); /* clear regs->result */ \
413 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
414
415#define RUNLATCH_ON \
416BEGIN_FTR_SECTION \
417 ld r3, PACA_THREAD_INFO(r13); \
418 ld r4,TI_LOCAL_FLAGS(r3); \
419 andi. r0,r4,_TLF_RUNLATCH; \
420 beql ppc64_runlatch_on_trampoline; \
421END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
422
423#define EXCEPTION_COMMON(area, trap) \
424 EXCEPTION_PROLOG_COMMON(trap, area); \
425
426/*
427 * Exception where stack is already set in r1, r1 is saved in r10
428 */
429#define EXCEPTION_COMMON_STACK(area, trap) \
430 EXCEPTION_PROLOG_COMMON_1(); \
431 kuap_save_amr_and_lock r9, r10, cr1; \
432 EXCEPTION_PROLOG_COMMON_2(area); \
433 EXCEPTION_PROLOG_COMMON_3(trap)
434
435/*
436 * When the idle code in power4_idle puts the CPU into NAP mode,
437 * it has to do so in a loop, and relies on the external interrupt
438 * and decrementer interrupt entry code to get it out of the loop.
439 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
440 * to signal that it is in the loop and needs help to get out.
441 */
442#ifdef CONFIG_PPC_970_NAP
443#define FINISH_NAP \
444BEGIN_FTR_SECTION \
445 ld r11, PACA_THREAD_INFO(r13); \
446 ld r9,TI_LOCAL_FLAGS(r11); \
447 andi. r10,r9,_TLF_NAPPING; \
448 bnel power4_fixup_nap; \
449END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
450#else
451#define FINISH_NAP
452#endif
453
a0502434
NP
454/*
455 * Following are the BOOK3S exception handler helper macros.
456 * Handlers come in a number of types, and each type has a number of varieties.
457 *
458 * EXC_REAL_* - real, unrelocated exception vectors
459 * EXC_VIRT_* - virt (AIL), unrelocated exception vectors
460 * TRAMP_REAL_* - real, unrelocated helpers (virt can call these)
461 * TRAMP_VIRT_* - virt, unreloc helpers (in practice, real can use)
462 * TRAMP_KVM - KVM handlers that get put into real, unrelocated
463 * EXC_COMMON - virt, relocated common handlers
464 *
465 * The EXC handlers are given a name, and branch to name_common, or the
466 * appropriate KVM or masking function. Vector handler verieties are as
467 * follows:
468 *
469 * EXC_{REAL|VIRT}_BEGIN/END - used to open-code the exception
470 *
471 * EXC_{REAL|VIRT} - standard exception
472 *
473 * EXC_{REAL|VIRT}_suffix
474 * where _suffix is:
475 * - _MASKABLE - maskable exception
476 * - _OOL - out of line with trampoline to common handler
477 * - _HV - HV exception
478 *
479 * There can be combinations, e.g., EXC_VIRT_OOL_MASKABLE_HV
480 *
481 * The one unusual case is __EXC_REAL_OOL_HV_DIRECT, which is
482 * an OOL vector that branches to a specified handler rather than the usual
483 * trampoline that goes to common. It, and other underscore macros, should
484 * be used with care.
485 *
486 * KVM handlers come in the following verieties:
487 * TRAMP_KVM
488 * TRAMP_KVM_SKIP
489 * TRAMP_KVM_HV
490 * TRAMP_KVM_HV_SKIP
491 *
492 * COMMON handlers come in the following verieties:
493 * EXC_COMMON_BEGIN/END - used to open-code the handler
494 * EXC_COMMON
495 * EXC_COMMON_ASYNC
496 *
497 * TRAMP_REAL and TRAMP_VIRT can be used with BEGIN/END. KVM
498 * and OOL handlers are implemented as types of TRAMP and TRAMP_VIRT handlers.
499 */
500
501#define __EXC_REAL(name, start, size, area) \
502 EXC_REAL_BEGIN(name, start, size); \
503 SET_SCRATCH0(r13); /* save r13 */ \
504 EXCEPTION_PROLOG_0 area ; \
505 EXCEPTION_PROLOG_1 EXC_STD, area, 1, start, 0 ; \
506 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
507 EXC_REAL_END(name, start, size)
508
509#define EXC_REAL(name, start, size) \
510 __EXC_REAL(name, start, size, PACA_EXGEN)
511
512#define __EXC_VIRT(name, start, size, realvec, area) \
513 EXC_VIRT_BEGIN(name, start, size); \
514 SET_SCRATCH0(r13); /* save r13 */ \
515 EXCEPTION_PROLOG_0 area ; \
516 EXCEPTION_PROLOG_1 EXC_STD, area, 0, realvec, 0; \
517 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
518 EXC_VIRT_END(name, start, size)
519
520#define EXC_VIRT(name, start, size, realvec) \
521 __EXC_VIRT(name, start, size, realvec, PACA_EXGEN)
522
523#define EXC_REAL_MASKABLE(name, start, size, bitmask) \
524 EXC_REAL_BEGIN(name, start, size); \
525 SET_SCRATCH0(r13); /* save r13 */ \
526 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
527 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, start, bitmask ; \
528 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1 ; \
529 EXC_REAL_END(name, start, size)
530
531#define EXC_VIRT_MASKABLE(name, start, size, realvec, bitmask) \
532 EXC_VIRT_BEGIN(name, start, size); \
533 SET_SCRATCH0(r13); /* save r13 */ \
534 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
535 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, bitmask ; \
536 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD ; \
537 EXC_VIRT_END(name, start, size)
538
539#define EXC_REAL_HV(name, start, size) \
540 EXC_REAL_BEGIN(name, start, size); \
541 SET_SCRATCH0(r13); /* save r13 */ \
542 EXCEPTION_PROLOG_0 PACA_EXGEN; \
543 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, start, 0 ; \
544 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1 ; \
545 EXC_REAL_END(name, start, size)
546
547#define EXC_VIRT_HV(name, start, size, realvec) \
548 EXC_VIRT_BEGIN(name, start, size); \
549 SET_SCRATCH0(r13); /* save r13 */ \
550 EXCEPTION_PROLOG_0 PACA_EXGEN; \
551 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0 ; \
552 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV ; \
553 EXC_VIRT_END(name, start, size)
554
555#define __EXC_REAL_OOL(name, start, size) \
556 EXC_REAL_BEGIN(name, start, size); \
557 SET_SCRATCH0(r13); \
558 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
559 b tramp_real_##name ; \
560 EXC_REAL_END(name, start, size)
561
562#define __TRAMP_REAL_OOL(name, vec) \
563 TRAMP_REAL_BEGIN(tramp_real_##name); \
564 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, 0 ; \
565 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
566
567#define EXC_REAL_OOL(name, start, size) \
568 __EXC_REAL_OOL(name, start, size); \
569 __TRAMP_REAL_OOL(name, start)
570
571#define __EXC_REAL_OOL_MASKABLE(name, start, size) \
572 __EXC_REAL_OOL(name, start, size)
573
574#define __TRAMP_REAL_OOL_MASKABLE(name, vec, bitmask) \
575 TRAMP_REAL_BEGIN(tramp_real_##name); \
576 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, vec, bitmask ; \
577 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
578
579#define EXC_REAL_OOL_MASKABLE(name, start, size, bitmask) \
580 __EXC_REAL_OOL_MASKABLE(name, start, size); \
581 __TRAMP_REAL_OOL_MASKABLE(name, start, bitmask)
582
583#define __EXC_REAL_OOL_HV_DIRECT(name, start, size, handler) \
584 EXC_REAL_BEGIN(name, start, size); \
585 SET_SCRATCH0(r13); \
586 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
587 b handler; \
588 EXC_REAL_END(name, start, size)
589
590#define __EXC_REAL_OOL_HV(name, start, size) \
591 __EXC_REAL_OOL(name, start, size)
592
593#define __TRAMP_REAL_OOL_HV(name, vec) \
594 TRAMP_REAL_BEGIN(tramp_real_##name); \
595 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, 0 ; \
596 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
597
598#define EXC_REAL_OOL_HV(name, start, size) \
599 __EXC_REAL_OOL_HV(name, start, size); \
600 __TRAMP_REAL_OOL_HV(name, start)
601
602#define __EXC_REAL_OOL_MASKABLE_HV(name, start, size) \
603 __EXC_REAL_OOL(name, start, size)
604
605#define __TRAMP_REAL_OOL_MASKABLE_HV(name, vec, bitmask) \
606 TRAMP_REAL_BEGIN(tramp_real_##name); \
607 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, vec, bitmask ; \
608 EXCEPTION_PROLOG_2_REAL name##_common, EXC_HV, 1
609
610#define EXC_REAL_OOL_MASKABLE_HV(name, start, size, bitmask) \
611 __EXC_REAL_OOL_MASKABLE_HV(name, start, size); \
612 __TRAMP_REAL_OOL_MASKABLE_HV(name, start, bitmask)
613
614#define __EXC_VIRT_OOL(name, start, size) \
615 EXC_VIRT_BEGIN(name, start, size); \
616 SET_SCRATCH0(r13); \
617 EXCEPTION_PROLOG_0 PACA_EXGEN ; \
618 b tramp_virt_##name; \
619 EXC_VIRT_END(name, start, size)
620
621#define __TRAMP_VIRT_OOL(name, realvec) \
622 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
623 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, vec, 0 ; \
624 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_STD
625
626#define EXC_VIRT_OOL(name, start, size, realvec) \
627 __EXC_VIRT_OOL(name, start, size); \
628 __TRAMP_VIRT_OOL(name, realvec)
629
630#define __EXC_VIRT_OOL_MASKABLE(name, start, size) \
631 __EXC_VIRT_OOL(name, start, size)
632
633#define __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask) \
634 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
635 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, realvec, bitmask ; \
636 EXCEPTION_PROLOG_2_REAL name##_common, EXC_STD, 1
637
638#define EXC_VIRT_OOL_MASKABLE(name, start, size, realvec, bitmask) \
639 __EXC_VIRT_OOL_MASKABLE(name, start, size); \
640 __TRAMP_VIRT_OOL_MASKABLE(name, realvec, bitmask)
641
642#define __EXC_VIRT_OOL_HV(name, start, size) \
643 __EXC_VIRT_OOL(name, start, size)
644
645#define __TRAMP_VIRT_OOL_HV(name, realvec) \
646 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
647 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, 0 ; \
648 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
649
650#define EXC_VIRT_OOL_HV(name, start, size, realvec) \
651 __EXC_VIRT_OOL_HV(name, start, size); \
652 __TRAMP_VIRT_OOL_HV(name, realvec)
653
654#define __EXC_VIRT_OOL_MASKABLE_HV(name, start, size) \
655 __EXC_VIRT_OOL(name, start, size)
656
657#define __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask) \
658 TRAMP_VIRT_BEGIN(tramp_virt_##name); \
659 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, realvec, bitmask ; \
660 EXCEPTION_PROLOG_2_VIRT name##_common, EXC_HV
661
662#define EXC_VIRT_OOL_MASKABLE_HV(name, start, size, realvec, bitmask) \
663 __EXC_VIRT_OOL_MASKABLE_HV(name, start, size); \
664 __TRAMP_VIRT_OOL_MASKABLE_HV(name, realvec, bitmask)
665
666#define TRAMP_KVM(area, n) \
667 TRAMP_KVM_BEGIN(do_kvm_##n); \
668 KVM_HANDLER area, EXC_STD, n, 0
669
670#define TRAMP_KVM_SKIP(area, n) \
671 TRAMP_KVM_BEGIN(do_kvm_##n); \
672 KVM_HANDLER area, EXC_STD, n, 1
673
674#define TRAMP_KVM_HV(area, n) \
675 TRAMP_KVM_BEGIN(do_kvm_H##n); \
676 KVM_HANDLER area, EXC_HV, n, 0
677
678#define TRAMP_KVM_HV_SKIP(area, n) \
679 TRAMP_KVM_BEGIN(do_kvm_H##n); \
680 KVM_HANDLER area, EXC_HV, n, 1
681
682#define EXC_COMMON(name, realvec, hdlr) \
683 EXC_COMMON_BEGIN(name); \
684 EXCEPTION_COMMON(PACA_EXGEN, realvec); \
685 bl save_nvgprs; \
686 RECONCILE_IRQ_STATE(r10, r11); \
687 addi r3,r1,STACK_FRAME_OVERHEAD; \
688 bl hdlr; \
689 b ret_from_except
690
691/*
692 * Like EXC_COMMON, but for exceptions that can occur in the idle task and
693 * therefore need the special idle handling (finish nap and runlatch)
694 */
695#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
696 EXC_COMMON_BEGIN(name); \
697 EXCEPTION_COMMON(PACA_EXGEN, realvec); \
698 FINISH_NAP; \
699 RECONCILE_IRQ_STATE(r10, r11); \
700 RUNLATCH_ON; \
701 addi r3,r1,STACK_FRAME_OVERHEAD; \
702 bl hdlr; \
703 b ret_from_except_lite
704
12a04809 705
0ebc4cda 706/*
57f26649
NP
707 * There are a few constraints to be concerned with.
708 * - Real mode exceptions code/data must be located at their physical location.
709 * - Virtual mode exceptions must be mapped at their 0xc000... location.
710 * - Fixed location code must not call directly beyond the __end_interrupts
711 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
712 * must be used.
713 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
714 * virtual 0xc00...
715 * - Conditional branch targets must be within +/-32K of caller.
716 *
717 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
718 * therefore don't have to run in physically located code or rfid to
719 * virtual mode kernel code. However on relocatable kernels they do have
720 * to branch to KERNELBASE offset because the rest of the kernel (outside
721 * the exception vectors) may be located elsewhere.
722 *
723 * Virtual exceptions correspond with physical, except their entry points
724 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
725 * offset applied. Virtual exceptions are enabled with the Alternate
726 * Interrupt Location (AIL) bit set in the LPCR. However this does not
727 * guarantee they will be delivered virtually. Some conditions (see the ISA)
728 * cause exceptions to be delivered in real mode.
729 *
730 * It's impossible to receive interrupts below 0x300 via AIL.
731 *
732 * KVM: None of the virtual exceptions are from the guest. Anything that
733 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
734 *
735 *
0ebc4cda
BH
736 * We layout physical memory as follows:
737 * 0x0000 - 0x00ff : Secondary processor spin code
57f26649
NP
738 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
739 * 0x1900 - 0x3fff : Real mode trampolines
740 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
741 * 0x5900 - 0x6fff : Relon mode trampolines
0ebc4cda 742 * 0x7000 - 0x7fff : FWNMI data area
57f26649
NP
743 * 0x8000 - .... : Common interrupt handlers, remaining early
744 * setup code, rest of kernel.
e0319829
NP
745 *
746 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
747 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
748 * vectors there.
57f26649
NP
749 */
750OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
751OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
752OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
753OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
ccd47702
NP
754
755#ifdef CONFIG_PPC_POWERNV
bd3524fe
NP
756 .globl start_real_trampolines
757 .globl end_real_trampolines
758 .globl start_virt_trampolines
759 .globl end_virt_trampolines
ccd47702
NP
760#endif
761
57f26649
NP
762#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
763/*
764 * Data area reserved for FWNMI option.
765 * This address (0x7000) is fixed by the RPA.
766 * pseries and powernv need to keep the whole page from
767 * 0x7000 to 0x8000 free for use by the firmware
0ebc4cda 768 */
57f26649
NP
769ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
770OPEN_TEXT_SECTION(0x8000)
771#else
772OPEN_TEXT_SECTION(0x7000)
773#endif
774
775USE_FIXED_SECTION(real_vectors)
776
0ebc4cda
BH
777/*
778 * This is the start of the interrupt handlers for pSeries
779 * This code runs with relocation off.
780 * Code from here to __end_interrupts gets copied down to real
781 * address 0x100 when we are running a relocatable kernel.
782 * Therefore any relative branches in this section must only
783 * branch to labels in this section.
784 */
0ebc4cda
BH
785 .globl __start_interrupts
786__start_interrupts:
787
e0319829 788/* No virt vectors corresponding with 0x0..0x100 */
1a6822d1 789EXC_VIRT_NONE(0x4000, 0x100)
e0319829 790
fb479e44 791
a7c1ca19
NP
792EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
793 SET_SCRATCH0(r13)
5dba1d50 794 EXCEPTION_PROLOG_0 PACA_EXNMI
a7c1ca19
NP
795
796 /* This is EXCEPTION_PROLOG_1 with the idle feature section added */
797 OPT_SAVE_REG_TO_PACA(PACA_EXNMI+EX_PPR, r9, CPU_FTR_HAS_PPR)
798 OPT_SAVE_REG_TO_PACA(PACA_EXNMI+EX_CFAR, r10, CPU_FTR_CFAR)
799 INTERRUPT_TO_KERNEL
800 SAVE_CTR(r10, PACA_EXNMI)
801 mfcr r9
802
948cf67c 803#ifdef CONFIG_PPC_P7_NAP
fb479e44
NP
804 /*
805 * If running native on arch 2.06 or later, check if we are waking up
ba6d334a
BH
806 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
807 * bits 46:47. A non-0 value indicates that we are coming from a power
808 * saving state. The idle wakeup handler initially runs in real mode,
809 * but we branch to the 0xc000... address so we can turn on relocation
810 * with mtmsr.
948cf67c 811 */
a7c1ca19
NP
812 BEGIN_FTR_SECTION
813 mfspr r10,SPRN_SRR1
814 rlwinm. r10,r10,47-31,30,31
815 beq- 1f
816 cmpwi cr1,r10,2
817 mfspr r3,SPRN_SRR1
818 bltlr cr1 /* no state loss, return to idle caller */
819 BRANCH_TO_C000(r10, system_reset_idle_common)
8201:
821 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
fb479e44 822#endif
371fefd6 823
a7c1ca19
NP
824 KVMTEST EXC_STD 0x100
825 std r11,PACA_EXNMI+EX_R11(r13)
826 std r12,PACA_EXNMI+EX_R12(r13)
827 GET_SCRATCH0(r10)
828 std r10,PACA_EXNMI+EX_R13(r13)
829
830 EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
c4f3b52c
NP
831 /*
832 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
833 * being used, so a nested NMI exception would corrupt it.
834 */
fb479e44 835
1a6822d1
NP
836EXC_REAL_END(system_reset, 0x100, 0x100)
837EXC_VIRT_NONE(0x4100, 0x100)
6de6638b 838TRAMP_KVM(PACA_EXNMI, 0x100)
fb479e44
NP
839
840#ifdef CONFIG_PPC_P7_NAP
841EXC_COMMON_BEGIN(system_reset_idle_common)
10d91611
NP
842 /*
843 * This must be a direct branch (without linker branch stub) because
844 * we can not use TOC at this point as r2 may not be restored yet.
845 */
846 b idle_return_gpr_loss
371fefd6
PM
847#endif
848
a3d96f70 849EXC_COMMON_BEGIN(system_reset_common)
c4f3b52c
NP
850 /*
851 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
852 * to recover, but nested NMI will notice in_nmi and not recover
853 * because of the use of the NMI stack. in_nmi reentrancy is tested in
854 * system_reset_exception.
855 */
856 lhz r10,PACA_IN_NMI(r13)
857 addi r10,r10,1
858 sth r10,PACA_IN_NMI(r13)
859 li r10,MSR_RI
860 mtmsrd r10,1
aca79d2b 861
b1ee8a3d
NP
862 mr r10,r1
863 ld r1,PACA_NMI_EMERG_SP(r13)
864 subi r1,r1,INT_FRAME_SIZE
47169fba
NP
865 EXCEPTION_COMMON_STACK(PACA_EXNMI, 0x100)
866 bl save_nvgprs
867 /*
868 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
869 * the right thing. We do not want to reconcile because that goes
870 * through irq tracing which we don't want in NMI.
871 *
872 * Save PACAIRQHAPPENED because some code will do a hard disable
873 * (e.g., xmon). So we want to restore this back to where it was
874 * when we return. DAR is unused in the stack, so save it there.
875 */
876 li r10,IRQS_ALL_DISABLED
877 stb r10,PACAIRQSOFTMASK(r13)
878 lbz r10,PACAIRQHAPPENED(r13)
879 std r10,_DAR(r1)
880
c06075f3
NP
881 addi r3,r1,STACK_FRAME_OVERHEAD
882 bl system_reset_exception
15b4dd79
NP
883
884 /* This (and MCE) can be simplified with mtmsrd L=1 */
885 /* Clear MSR_RI before setting SRR0 and SRR1. */
886 li r0,MSR_RI
887 mfmsr r9
888 andc r9,r9,r0
889 mtmsrd r9,1
c4f3b52c
NP
890
891 /*
15b4dd79 892 * MSR_RI is clear, now we can decrement paca->in_nmi.
c4f3b52c
NP
893 */
894 lhz r10,PACA_IN_NMI(r13)
895 subi r10,r10,1
896 sth r10,PACA_IN_NMI(r13)
897
15b4dd79
NP
898 /*
899 * Restore soft mask settings.
900 */
901 ld r10,_DAR(r1)
902 stb r10,PACAIRQHAPPENED(r13)
903 ld r10,SOFTE(r1)
904 stb r10,PACAIRQSOFTMASK(r13)
905
906 /*
907 * Keep below code in synch with MACHINE_CHECK_HANDLER_WINDUP.
908 * Should share common bits...
909 */
910
911 /* Move original SRR0 and SRR1 into the respective regs */
912 ld r9,_MSR(r1)
913 mtspr SPRN_SRR1,r9
914 ld r3,_NIP(r1)
915 mtspr SPRN_SRR0,r3
916 ld r9,_CTR(r1)
917 mtctr r9
918 ld r9,_XER(r1)
919 mtxer r9
920 ld r9,_LINK(r1)
921 mtlr r9
922 REST_GPR(0, r1)
923 REST_8GPRS(2, r1)
924 REST_GPR(10, r1)
925 ld r11,_CCR(r1)
926 mtcr r11
927 REST_GPR(11, r1)
928 REST_2GPRS(12, r1)
929 /* restore original r1. */
930 ld r1,GPR1(r1)
931 RFI_TO_USER_OR_KERNEL
582baf44
NP
932
933#ifdef CONFIG_PPC_PSERIES
934/*
935 * Vectors for the FWNMI option. Share common code.
936 */
937TRAMP_REAL_BEGIN(system_reset_fwnmi)
938 SET_SCRATCH0(r13) /* save r13 */
fc557537
NP
939 /* See comment at system_reset exception, don't turn on RI */
940 EXCEPTION_PROLOG_0 PACA_EXNMI
941 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXNMI, 0, 0x100, 0
942 EXCEPTION_PROLOG_2_REAL system_reset_common, EXC_STD, 0
943
582baf44
NP
944#endif /* CONFIG_PPC_PSERIES */
945
0ebc4cda 946
1a6822d1 947EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
b01c8b54
PM
948 /* This is moved out of line as it can be patched by FW, but
949 * some code path might still want to branch into the original
950 * vector
951 */
1707dd16 952 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 953 EXCEPTION_PROLOG_0 PACA_EXMC
1e9b4507 954BEGIN_FTR_SECTION
db7d31ac 955 b machine_check_common_early
1e9b4507 956FTR_SECTION_ELSE
1707dd16 957 b machine_check_pSeries_0
1e9b4507 958ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1a6822d1
NP
959EXC_REAL_END(machine_check, 0x200, 0x100)
960EXC_VIRT_NONE(0x4200, 0x100)
db7d31ac 961TRAMP_REAL_BEGIN(machine_check_common_early)
fa4cf6b7 962 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 0, 0x200, 0
afcf0095
NP
963 /*
964 * Register contents:
965 * R13 = PACA
966 * R9 = CR
967 * Original R9 to R13 is saved on PACA_EXMC
968 *
969 * Switch to mc_emergency stack and handle re-entrancy (we limit
970 * the nested MCE upto level 4 to avoid stack overflow).
971 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
972 *
973 * We use paca->in_mce to check whether this is the first entry or
974 * nested machine check. We increment paca->in_mce to track nested
975 * machine checks.
976 *
977 * If this is the first entry then set stack pointer to
978 * paca->mc_emergency_sp, otherwise r1 is already pointing to
979 * stack frame on mc_emergency stack.
980 *
981 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
982 * checkstop if we get another machine check exception before we do
983 * rfid with MSR_ME=1.
1945bc45
NP
984 *
985 * This interrupt can wake directly from idle. If that is the case,
986 * the machine check is handled then the idle wakeup code is called
2bf1071a 987 * to restore state.
afcf0095
NP
988 */
989 mr r11,r1 /* Save r1 */
990 lhz r10,PACA_IN_MCE(r13)
991 cmpwi r10,0 /* Are we in nested machine check */
992 bne 0f /* Yes, we are. */
993 /* First machine check entry */
994 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
9950: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
996 addi r10,r10,1 /* increment paca->in_mce */
997 sth r10,PACA_IN_MCE(r13)
998 /* Limit nested MCE to level 4 to avoid stack overflow */
ba41e1e1 999 cmpwi r10,MAX_MCE_DEPTH
afcf0095
NP
1000 bgt 2f /* Check if we hit limit of 4 */
1001 std r11,GPR1(r1) /* Save r1 on the stack. */
1002 std r11,0(r1) /* make stack chain pointer */
1003 mfspr r11,SPRN_SRR0 /* Save SRR0 */
1004 std r11,_NIP(r1)
1005 mfspr r11,SPRN_SRR1 /* Save SRR1 */
1006 std r11,_MSR(r1)
1007 mfspr r11,SPRN_DAR /* Save DAR */
1008 std r11,_DAR(r1)
1009 mfspr r11,SPRN_DSISR /* Save DSISR */
1010 std r11,_DSISR(r1)
1011 std r9,_CCR(r1) /* Save CR in stackframe */
e13e7cd4 1012 /* We don't touch AMR here, we never go to virtual mode */
afcf0095
NP
1013 /* Save r9 through r13 from EXMC save area to stack frame. */
1014 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
1015 mfmsr r11 /* get MSR value */
db7d31ac 1016BEGIN_FTR_SECTION
afcf0095 1017 ori r11,r11,MSR_ME /* turn on ME bit */
db7d31ac 1018END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
1019 ori r11,r11,MSR_RI /* turn on RI bit */
1020 LOAD_HANDLER(r12, machine_check_handle_early)
10211: mtspr SPRN_SRR0,r12
1022 mtspr SPRN_SRR1,r11
222f20f1 1023 RFI_TO_KERNEL
afcf0095
NP
1024 b . /* prevent speculative execution */
10252:
1026 /* Stack overflow. Stay on emergency stack and panic.
1027 * Keep the ME bit off while panic-ing, so that if we hit
1028 * another machine check we checkstop.
1029 */
1030 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
1031 ld r11,PACAKMSR(r13)
1032 LOAD_HANDLER(r12, unrecover_mce)
1033 li r10,MSR_ME
1034 andc r11,r11,r10 /* Turn off MSR_ME */
1035 b 1b
1036 b . /* prevent speculative execution */
afcf0095
NP
1037
1038TRAMP_REAL_BEGIN(machine_check_pSeries)
1039 .globl machine_check_fwnmi
1040machine_check_fwnmi:
1041 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1042 EXCEPTION_PROLOG_0 PACA_EXMC
a43c1590 1043BEGIN_FTR_SECTION
db7d31ac 1044 b machine_check_common_early
a43c1590 1045END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
afcf0095 1046machine_check_pSeries_0:
fa4cf6b7 1047 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXMC, 1, 0x200, 0
afcf0095 1048 /*
83a980f7
NP
1049 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1050 * nested machine check corrupts it. machine_check_common enables
1051 * MSR_RI.
afcf0095 1052 */
2d046308 1053 EXCEPTION_PROLOG_2_REAL machine_check_common, EXC_STD, 0
afcf0095
NP
1054
1055TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
1056
1057EXC_COMMON_BEGIN(machine_check_common)
1058 /*
1059 * Machine check is different because we use a different
1060 * save area: PACA_EXMC instead of PACA_EXGEN.
1061 */
1062 mfspr r10,SPRN_DAR
1063 std r10,PACA_EXMC+EX_DAR(r13)
1064 mfspr r10,SPRN_DSISR
1065 stw r10,PACA_EXMC+EX_DSISR(r13)
1066 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1067 FINISH_NAP
1068 RECONCILE_IRQ_STATE(r10, r11)
1069 ld r3,PACA_EXMC+EX_DAR(r13)
1070 lwz r4,PACA_EXMC+EX_DSISR(r13)
1071 /* Enable MSR_RI when finished with PACA_EXMC */
1072 li r10,MSR_RI
1073 mtmsrd r10,1
1074 std r3,_DAR(r1)
1075 std r4,_DSISR(r1)
1076 bl save_nvgprs
1077 addi r3,r1,STACK_FRAME_OVERHEAD
1078 bl machine_check_exception
1079 b ret_from_except
1080
1081#define MACHINE_CHECK_HANDLER_WINDUP \
1082 /* Clear MSR_RI before setting SRR0 and SRR1. */\
1083 li r0,MSR_RI; \
1084 mfmsr r9; /* get MSR value */ \
1085 andc r9,r9,r0; \
1086 mtmsrd r9,1; /* Clear MSR_RI */ \
1087 /* Move original SRR0 and SRR1 into the respective regs */ \
1088 ld r9,_MSR(r1); \
1089 mtspr SPRN_SRR1,r9; \
1090 ld r3,_NIP(r1); \
1091 mtspr SPRN_SRR0,r3; \
1092 ld r9,_CTR(r1); \
1093 mtctr r9; \
1094 ld r9,_XER(r1); \
1095 mtxer r9; \
1096 ld r9,_LINK(r1); \
1097 mtlr r9; \
1098 REST_GPR(0, r1); \
1099 REST_8GPRS(2, r1); \
1100 REST_GPR(10, r1); \
1101 ld r11,_CCR(r1); \
1102 mtcr r11; \
1103 /* Decrement paca->in_mce. */ \
1104 lhz r12,PACA_IN_MCE(r13); \
1105 subi r12,r12,1; \
1106 sth r12,PACA_IN_MCE(r13); \
1107 REST_GPR(11, r1); \
1108 REST_2GPRS(12, r1); \
1109 /* restore original r1. */ \
1110 ld r1,GPR1(r1)
1111
1945bc45
NP
1112#ifdef CONFIG_PPC_P7_NAP
1113/*
1114 * This is an idle wakeup. Low level machine check has already been
1115 * done. Queue the event then call the idle code to do the wake up.
1116 */
1117EXC_COMMON_BEGIN(machine_check_idle_common)
1118 bl machine_check_queue_event
1119
1120 /*
1121 * We have not used any non-volatile GPRs here, and as a rule
1122 * most exception code including machine check does not.
1123 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
1124 * wakeup will restore volatile registers.
1125 *
1126 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
1127 *
1128 * Then decrement MCE nesting after finishing with the stack.
1129 */
1130 ld r3,_MSR(r1)
10d91611 1131 ld r4,_LINK(r1)
1945bc45
NP
1132
1133 lhz r11,PACA_IN_MCE(r13)
1134 subi r11,r11,1
1135 sth r11,PACA_IN_MCE(r13)
1136
10d91611
NP
1137 mtlr r4
1138 rlwinm r10,r3,47-31,30,31
1139 cmpwi cr1,r10,2
1140 bltlr cr1 /* no state loss, return to idle caller */
1141 b idle_return_gpr_loss
1945bc45 1142#endif
afcf0095
NP
1143 /*
1144 * Handle machine check early in real mode. We come here with
1145 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1146 */
1147EXC_COMMON_BEGIN(machine_check_handle_early)
1148 std r0,GPR0(r1) /* Save r0 */
1149 EXCEPTION_PROLOG_COMMON_3(0x200)
1150 bl save_nvgprs
1151 addi r3,r1,STACK_FRAME_OVERHEAD
1152 bl machine_check_early
1153 std r3,RESULT(r1) /* Save result */
1154 ld r12,_MSR(r1)
db7d31ac
MS
1155BEGIN_FTR_SECTION
1156 b 4f
1157END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
1945bc45 1158
afcf0095
NP
1159#ifdef CONFIG_PPC_P7_NAP
1160 /*
1161 * Check if thread was in power saving mode. We come here when any
1162 * of the following is true:
1163 * a. thread wasn't in power saving mode
1164 * b. thread was in power saving mode with no state loss,
1165 * supervisor state loss or hypervisor state loss.
1166 *
1167 * Go back to nap/sleep/winkle mode again if (b) is true.
1168 */
1945bc45
NP
1169 BEGIN_FTR_SECTION
1170 rlwinm. r11,r12,47-31,30,31
6102c005 1171 bne machine_check_idle_common
1945bc45 1172 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
afcf0095 1173#endif
1945bc45 1174
afcf0095
NP
1175 /*
1176 * Check if we are coming from hypervisor userspace. If yes then we
1177 * continue in host kernel in V mode to deliver the MC event.
1178 */
1179 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
1180 beq 5f
db7d31ac 11814: andi. r11,r12,MSR_PR /* See if coming from user. */
afcf0095
NP
1182 bne 9f /* continue in V mode if we are. */
1183
11845:
1185#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
db7d31ac 1186BEGIN_FTR_SECTION
afcf0095
NP
1187 /*
1188 * We are coming from kernel context. Check if we are coming from
1189 * guest. if yes, then we can continue. We will fall through
1190 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1191 */
1192 lbz r11,HSTATE_IN_GUEST(r13)
1193 cmpwi r11,0 /* Check if coming from guest */
1194 bne 9f /* continue if we are. */
db7d31ac 1195END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
1196#endif
1197 /*
1198 * At this point we are not sure about what context we come from.
1199 * Queue up the MCE event and return from the interrupt.
1200 * But before that, check if this is an un-recoverable exception.
1201 * If yes, then stay on emergency stack and panic.
1202 */
1203 andi. r11,r12,MSR_RI
1204 bne 2f
12051: mfspr r11,SPRN_SRR0
1206 LOAD_HANDLER(r10,unrecover_mce)
1207 mtspr SPRN_SRR0,r10
1208 ld r10,PACAKMSR(r13)
1209 /*
1210 * We are going down. But there are chances that we might get hit by
1211 * another MCE during panic path and we may run into unstable state
1212 * with no way out. Hence, turn ME bit off while going down, so that
1213 * when another MCE is hit during panic path, system will checkstop
1214 * and hypervisor will get restarted cleanly by SP.
1215 */
1216 li r3,MSR_ME
1217 andc r10,r10,r3 /* Turn off MSR_ME */
1218 mtspr SPRN_SRR1,r10
222f20f1 1219 RFI_TO_KERNEL
afcf0095
NP
1220 b .
12212:
1222 /*
1223 * Check if we have successfully handled/recovered from error, if not
1224 * then stay on emergency stack and panic.
1225 */
1226 ld r3,RESULT(r1) /* Load result */
1227 cmpdi r3,0 /* see if we handled MCE successfully */
1228
1229 beq 1b /* if !handled then panic */
db7d31ac 1230BEGIN_FTR_SECTION
afcf0095
NP
1231 /*
1232 * Return from MC interrupt.
1233 * Queue up the MCE event so that we can log it later, while
1234 * returning from kernel or opal call.
1235 */
1236 bl machine_check_queue_event
1237 MACHINE_CHECK_HANDLER_WINDUP
222f20f1 1238 RFI_TO_USER_OR_KERNEL
db7d31ac
MS
1239FTR_SECTION_ELSE
1240 /*
1241 * pSeries: Return from MC interrupt. Before that stay on emergency
1242 * stack and call machine_check_exception to log the MCE event.
1243 */
1244 LOAD_HANDLER(r10,mce_return)
1245 mtspr SPRN_SRR0,r10
1246 ld r10,PACAKMSR(r13)
1247 mtspr SPRN_SRR1,r10
1248 RFI_TO_KERNEL
1249 b .
1250ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
afcf0095
NP
12519:
1252 /* Deliver the machine check to host kernel in V mode. */
1253 MACHINE_CHECK_HANDLER_WINDUP
db7d31ac 1254 SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1255 EXCEPTION_PROLOG_0 PACA_EXMC
db7d31ac 1256 b machine_check_pSeries_0
afcf0095
NP
1257
1258EXC_COMMON_BEGIN(unrecover_mce)
1259 /* Invoke machine_check_exception to print MCE event and panic. */
1260 addi r3,r1,STACK_FRAME_OVERHEAD
1261 bl machine_check_exception
1262 /*
1263 * We will not reach here. Even if we did, there is no way out. Call
1264 * unrecoverable_exception and die.
1265 */
12661: addi r3,r1,STACK_FRAME_OVERHEAD
1267 bl unrecoverable_exception
1268 b 1b
1269
a43c1590
MS
1270EXC_COMMON_BEGIN(mce_return)
1271 /* Invoke machine_check_exception to print MCE event and return. */
1272 addi r3,r1,STACK_FRAME_OVERHEAD
1273 bl machine_check_exception
db7d31ac 1274 MACHINE_CHECK_HANDLER_WINDUP
a43c1590
MS
1275 RFI_TO_KERNEL
1276 b .
0ebc4cda 1277
e779fc93
NP
1278EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1279SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1280EXCEPTION_PROLOG_0 PACA_EXGEN
e779fc93
NP
1281 b tramp_real_data_access
1282EXC_REAL_END(data_access, 0x300, 0x80)
1283
1284TRAMP_REAL_BEGIN(tramp_real_data_access)
fa4cf6b7 1285EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x300, 0
38555434
NP
1286 /*
1287 * DAR/DSISR must be read before setting MSR[RI], because
1288 * a d-side MCE will clobber those registers so is not
1289 * recoverable if they are live.
1290 */
1291 mfspr r10,SPRN_DAR
1292 mfspr r11,SPRN_DSISR
1293 std r10,PACA_EXGEN+EX_DAR(r13)
1294 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1295EXCEPTION_PROLOG_2_REAL data_access_common, EXC_STD, 1
e779fc93
NP
1296
1297EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1298SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1299EXCEPTION_PROLOG_0 PACA_EXGEN
fa4cf6b7 1300EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x300, 0
38555434
NP
1301 mfspr r10,SPRN_DAR
1302 mfspr r11,SPRN_DSISR
1303 std r10,PACA_EXGEN+EX_DAR(r13)
1304 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1305EXCEPTION_PROLOG_2_VIRT data_access_common, EXC_STD
e779fc93
NP
1306EXC_VIRT_END(data_access, 0x4300, 0x80)
1307
80795e6c
NP
1308TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
1309
1310EXC_COMMON_BEGIN(data_access_common)
1311 /*
1312 * Here r13 points to the paca, r9 contains the saved CR,
1313 * SRR0 and SRR1 are saved in r11 and r12,
1314 * r9 - r13 are saved in paca->exgen.
38555434 1315 * EX_DAR and EX_DSISR have saved DAR/DSISR
80795e6c 1316 */
80795e6c
NP
1317 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
1318 RECONCILE_IRQ_STATE(r10, r11)
1319 ld r12,_MSR(r1)
1320 ld r3,PACA_EXGEN+EX_DAR(r13)
1321 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1322 li r5,0x300
1323 std r3,_DAR(r1)
1324 std r4,_DSISR(r1)
1325BEGIN_MMU_FTR_SECTION
1326 b do_hash_page /* Try to handle as hpte fault */
1327MMU_FTR_SECTION_ELSE
1328 b handle_page_fault
1329ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1330
0ebc4cda 1331
1a6822d1 1332EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
e779fc93 1333SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1334EXCEPTION_PROLOG_0 PACA_EXSLB
e779fc93 1335 b tramp_real_data_access_slb
1a6822d1 1336EXC_REAL_END(data_access_slb, 0x380, 0x80)
0ebc4cda 1337
e779fc93 1338TRAMP_REAL_BEGIN(tramp_real_data_access_slb)
fa4cf6b7 1339EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 1, 0x380, 0
38555434
NP
1340 mfspr r10,SPRN_DAR
1341 std r10,PACA_EXSLB+EX_DAR(r13)
2d046308 1342EXCEPTION_PROLOG_2_REAL data_access_slb_common, EXC_STD, 1
e779fc93 1343
1a6822d1 1344EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
e779fc93 1345SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1346EXCEPTION_PROLOG_0 PACA_EXSLB
fa4cf6b7 1347EXCEPTION_PROLOG_1 EXC_STD, PACA_EXSLB, 0, 0x380, 0
38555434
NP
1348 mfspr r10,SPRN_DAR
1349 std r10,PACA_EXSLB+EX_DAR(r13)
2d046308 1350EXCEPTION_PROLOG_2_VIRT data_access_slb_common, EXC_STD
1a6822d1 1351EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
48e7b769 1352
2b9af6e4
NP
1353TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
1354
48e7b769 1355EXC_COMMON_BEGIN(data_access_slb_common)
48e7b769
NP
1356 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
1357 ld r4,PACA_EXSLB+EX_DAR(r13)
1358 std r4,_DAR(r1)
1359 addi r3,r1,STACK_FRAME_OVERHEAD
7100e870
NP
1360BEGIN_MMU_FTR_SECTION
1361 /* HPT case, do SLB fault */
48e7b769
NP
1362 bl do_slb_fault
1363 cmpdi r3,0
1364 bne- 1f
1365 b fast_exception_return
13661: /* Error case */
7100e870
NP
1367MMU_FTR_SECTION_ELSE
1368 /* Radix case, access is outside page table range */
1369 li r3,-EFAULT
1370ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
48e7b769
NP
1371 std r3,RESULT(r1)
1372 bl save_nvgprs
1373 RECONCILE_IRQ_STATE(r10, r11)
1374 ld r4,_DAR(r1)
1375 ld r5,RESULT(r1)
1376 addi r3,r1,STACK_FRAME_OVERHEAD
1377 bl do_bad_slb_fault
1378 b ret_from_except
1379
2b9af6e4 1380
1a6822d1
NP
1381EXC_REAL(instruction_access, 0x400, 0x80)
1382EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
27ce77df
NP
1383TRAMP_KVM(PACA_EXGEN, 0x400)
1384
1385EXC_COMMON_BEGIN(instruction_access_common)
1386 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1387 RECONCILE_IRQ_STATE(r10, r11)
1388 ld r12,_MSR(r1)
1389 ld r3,_NIP(r1)
475b581f 1390 andis. r4,r12,DSISR_SRR1_MATCH_64S@h
27ce77df
NP
1391 li r5,0x400
1392 std r3,_DAR(r1)
1393 std r4,_DSISR(r1)
1394BEGIN_MMU_FTR_SECTION
1395 b do_hash_page /* Try to handle as hpte fault */
1396MMU_FTR_SECTION_ELSE
1397 b handle_page_fault
1398ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1399
0ebc4cda 1400
fc557537
NP
1401__EXC_REAL(instruction_access_slb, 0x480, 0x80, PACA_EXSLB)
1402__EXC_VIRT(instruction_access_slb, 0x4480, 0x80, 0x480, PACA_EXSLB)
48e7b769 1403TRAMP_KVM(PACA_EXSLB, 0x480)
54be0b9c 1404
48e7b769
NP
1405EXC_COMMON_BEGIN(instruction_access_slb_common)
1406 EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
1407 ld r4,_NIP(r1)
1408 addi r3,r1,STACK_FRAME_OVERHEAD
7100e870
NP
1409BEGIN_MMU_FTR_SECTION
1410 /* HPT case, do SLB fault */
48e7b769
NP
1411 bl do_slb_fault
1412 cmpdi r3,0
1413 bne- 1f
1414 b fast_exception_return
14151: /* Error case */
7100e870
NP
1416MMU_FTR_SECTION_ELSE
1417 /* Radix case, access is outside page table range */
1418 li r3,-EFAULT
1419ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
48e7b769 1420 std r3,RESULT(r1)
8d04631a 1421 bl save_nvgprs
8d04631a 1422 RECONCILE_IRQ_STATE(r10, r11)
48e7b769
NP
1423 ld r4,_NIP(r1)
1424 ld r5,RESULT(r1)
1425 addi r3,r1,STACK_FRAME_OVERHEAD
1426 bl do_bad_slb_fault
8d04631a
NP
1427 b ret_from_except
1428
48e7b769 1429
1a6822d1 1430EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
80bd9177
NP
1431 SET_SCRATCH0(r13) /* save r13 */
1432 EXCEPTION_PROLOG_0 PACA_EXGEN
a5d4f3ad 1433 BEGIN_FTR_SECTION
fc557537
NP
1434 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1435 EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_HV, 1
de56a948 1436 FTR_SECTION_ELSE
fc557537
NP
1437 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1438 EXCEPTION_PROLOG_2_REAL hardware_interrupt_common, EXC_STD, 1
969391c5 1439 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1a6822d1 1440EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
da2bc464 1441
1a6822d1 1442EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
80bd9177
NP
1443 SET_SCRATCH0(r13) /* save r13 */
1444 EXCEPTION_PROLOG_0 PACA_EXGEN
c138e588 1445 BEGIN_FTR_SECTION
fc557537
NP
1446 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1447 EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_HV
c138e588 1448 FTR_SECTION_ELSE
fc557537
NP
1449 EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x500, IRQS_DISABLED
1450 EXCEPTION_PROLOG_2_VIRT hardware_interrupt_common, EXC_STD
c138e588 1451 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1a6822d1 1452EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
c138e588 1453
7ede5317
NP
1454TRAMP_KVM(PACA_EXGEN, 0x500)
1455TRAMP_KVM_HV(PACA_EXGEN, 0x500)
c138e588
NP
1456EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
1457
1458
e779fc93
NP
1459EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1460SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1461EXCEPTION_PROLOG_0 PACA_EXGEN
fa4cf6b7 1462EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 1, 0x600, 0
38555434
NP
1463 mfspr r10,SPRN_DAR
1464 mfspr r11,SPRN_DSISR
1465 std r10,PACA_EXGEN+EX_DAR(r13)
1466 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1467EXCEPTION_PROLOG_2_REAL alignment_common, EXC_STD, 1
e779fc93
NP
1468EXC_REAL_END(alignment, 0x600, 0x100)
1469
1470EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1471SET_SCRATCH0(r13) /* save r13 */
5dba1d50 1472EXCEPTION_PROLOG_0 PACA_EXGEN
fa4cf6b7 1473EXCEPTION_PROLOG_1 EXC_STD, PACA_EXGEN, 0, 0x600, 0
38555434
NP
1474 mfspr r10,SPRN_DAR
1475 mfspr r11,SPRN_DSISR
1476 std r10,PACA_EXGEN+EX_DAR(r13)
1477 stw r11,PACA_EXGEN+EX_DSISR(r13)
2d046308 1478EXCEPTION_PROLOG_2_VIRT alignment_common, EXC_STD
e779fc93
NP
1479EXC_VIRT_END(alignment, 0x4600, 0x100)
1480
da2bc464 1481TRAMP_KVM(PACA_EXGEN, 0x600)
f9aa6714 1482EXC_COMMON_BEGIN(alignment_common)
f9aa6714
NP
1483 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1484 ld r3,PACA_EXGEN+EX_DAR(r13)
1485 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1486 std r3,_DAR(r1)
1487 std r4,_DSISR(r1)
1488 bl save_nvgprs
1489 RECONCILE_IRQ_STATE(r10, r11)
1490 addi r3,r1,STACK_FRAME_OVERHEAD
1491 bl alignment_exception
1492 b ret_from_except
1493
da2bc464 1494
1a6822d1
NP
1495EXC_REAL(program_check, 0x700, 0x100)
1496EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
da2bc464 1497TRAMP_KVM(PACA_EXGEN, 0x700)
11e87346 1498EXC_COMMON_BEGIN(program_check_common)
265e60a1
CB
1499 /*
1500 * It's possible to receive a TM Bad Thing type program check with
1501 * userspace register values (in particular r1), but with SRR1 reporting
1502 * that we came from the kernel. Normally that would confuse the bad
1503 * stack logic, and we would report a bad kernel stack pointer. Instead
1504 * we switch to the emergency stack if we're taking a TM Bad Thing from
1505 * the kernel.
1506 */
1507 li r10,MSR_PR /* Build a mask of MSR_PR .. */
1508 oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
1509 and r10,r10,r12 /* Mask SRR1 with that. */
1510 srdi r10,r10,8 /* Shift it so we can compare */
1511 cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
1512 bne 1f /* If != go to normal path. */
1513
1514 /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
1515 andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
1516 /* 3 in EXCEPTION_PROLOG_COMMON */
1517 mr r10,r1 /* Save r1 */
1518 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
1519 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1520 b 3f /* Jump into the macro !! */
15211: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
11e87346
NP
1522 bl save_nvgprs
1523 RECONCILE_IRQ_STATE(r10, r11)
1524 addi r3,r1,STACK_FRAME_OVERHEAD
1525 bl program_check_exception
1526 b ret_from_except
1527
b01c8b54 1528
1a6822d1
NP
1529EXC_REAL(fp_unavailable, 0x800, 0x100)
1530EXC_VIRT(fp_unavailable, 0x4800, 0x100, 0x800)
da2bc464 1531TRAMP_KVM(PACA_EXGEN, 0x800)
c78d9b97
NP
1532EXC_COMMON_BEGIN(fp_unavailable_common)
1533 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1534 bne 1f /* if from user, just load it up */
1535 bl save_nvgprs
1536 RECONCILE_IRQ_STATE(r10, r11)
1537 addi r3,r1,STACK_FRAME_OVERHEAD
1538 bl kernel_fp_unavailable_exception
1539 BUG_OPCODE
15401:
1541#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1542BEGIN_FTR_SECTION
1543 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1544 * transaction), go do TM stuff
1545 */
1546 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1547 bne- 2f
1548END_FTR_SECTION_IFSET(CPU_FTR_TM)
1549#endif
1550 bl load_up_fpu
1551 b fast_exception_return
1552#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
15532: /* User process was in a transaction */
1554 bl save_nvgprs
1555 RECONCILE_IRQ_STATE(r10, r11)
1556 addi r3,r1,STACK_FRAME_OVERHEAD
1557 bl fp_unavailable_tm
1558 b ret_from_except
1559#endif
1560
a5d4f3ad 1561
a048a07d 1562EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
f14e953b 1563EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
39c0da57
NP
1564TRAMP_KVM(PACA_EXGEN, 0x900)
1565EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
1566
a485c709 1567
1a6822d1
NP
1568EXC_REAL_HV(hdecrementer, 0x980, 0x80)
1569EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
facc6d74
NP
1570TRAMP_KVM_HV(PACA_EXGEN, 0x980)
1571EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
1572
a5d4f3ad 1573
f14e953b
MS
1574EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0x100, IRQS_DISABLED)
1575EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x100, 0xa00, IRQS_DISABLED)
da2bc464 1576TRAMP_KVM(PACA_EXGEN, 0xa00)
ca243163
NP
1577#ifdef CONFIG_PPC_DOORBELL
1578EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
1579#else
1580EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
1581#endif
1582
0ebc4cda 1583
1a6822d1
NP
1584EXC_REAL(trap_0b, 0xb00, 0x100)
1585EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
da2bc464 1586TRAMP_KVM(PACA_EXGEN, 0xb00)
341215dc
NP
1587EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
1588
acd7d8ce
NP
1589/*
1590 * system call / hypercall (0xc00, 0x4c00)
1591 *
1592 * The system call exception is invoked with "sc 0" and does not alter HV bit.
1593 * There is support for kernel code to invoke system calls but there are no
1594 * in-tree users.
1595 *
1596 * The hypercall is invoked with "sc 1" and sets HV=1.
1597 *
1598 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1599 * 0x4c00 virtual mode.
1600 *
1601 * Call convention:
1602 *
1603 * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
1604 *
1605 * For hypercalls, the register convention is as follows:
1606 * r0 volatile
1607 * r1-2 nonvolatile
1608 * r3 volatile parameter and return value for status
1609 * r4-r10 volatile input and output value
1610 * r11 volatile hypercall number and output value
76fc0cfc 1611 * r12 volatile input and output value
acd7d8ce
NP
1612 * r13-r31 nonvolatile
1613 * LR nonvolatile
1614 * CTR volatile
1615 * XER volatile
1616 * CR0-1 CR5-7 volatile
1617 * CR2-4 nonvolatile
1618 * Other registers nonvolatile
1619 *
1620 * The intersection of volatile registers that don't contain possible
76fc0cfc
NP
1621 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1622 * without saving, though xer is not a good idea to use, as hardware may
1623 * interpret some bits so it may be costly to change them.
acd7d8ce 1624 */
bc355125 1625#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
acd7d8ce
NP
1626 /*
1627 * There is a little bit of juggling to get syscall and hcall
76fc0cfc
NP
1628 * working well. Save r13 in ctr to avoid using SPRG scratch
1629 * register.
acd7d8ce
NP
1630 *
1631 * Userspace syscalls have already saved the PPR, hcalls must save
1632 * it before setting HMT_MEDIUM.
1633 */
bc355125 1634#define SYSCALL_KVMTEST \
76fc0cfc 1635 mtctr r13; \
bc355125 1636 GET_PACA(r13); \
76fc0cfc 1637 std r10,PACA_EXGEN+EX_R10(r13); \
a048a07d 1638 INTERRUPT_TO_KERNEL; \
a7c1ca19 1639 KVMTEST EXC_STD 0xc00 ; /* uses r10, branch to do_kvm_0xc00_system_call */ \
bc355125 1640 HMT_MEDIUM; \
76fc0cfc 1641 mfctr r9;
bc355125
PM
1642
1643#else
1644#define SYSCALL_KVMTEST \
acd7d8ce
NP
1645 HMT_MEDIUM; \
1646 mr r9,r13; \
a048a07d
NP
1647 GET_PACA(r13); \
1648 INTERRUPT_TO_KERNEL;
bc355125
PM
1649#endif
1650
fb479e44
NP
1651#define LOAD_SYSCALL_HANDLER(reg) \
1652 __LOAD_HANDLER(reg, system_call_common)
d807ad37 1653
acd7d8ce
NP
1654/*
1655 * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9,
1656 * and HMT_MEDIUM.
1657 */
1658#define SYSCALL_REAL \
1659 mfspr r11,SPRN_SRR0 ; \
d807ad37
NP
1660 mfspr r12,SPRN_SRR1 ; \
1661 LOAD_SYSCALL_HANDLER(r10) ; \
1662 mtspr SPRN_SRR0,r10 ; \
1663 ld r10,PACAKMSR(r13) ; \
1664 mtspr SPRN_SRR1,r10 ; \
222f20f1 1665 RFI_TO_KERNEL ; \
d807ad37
NP
1666 b . ; /* prevent speculative execution */
1667
727f1361 1668#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
5c2511bf
ME
1669#define SYSCALL_FASTENDIAN_TEST \
1670BEGIN_FTR_SECTION \
1671 cmpdi r0,0x1ebe ; \
1672 beq- 1f ; \
1673END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
1674
acd7d8ce 1675#define SYSCALL_FASTENDIAN \
d807ad37
NP
1676 /* Fast LE/BE switch system call */ \
16771: mfspr r12,SPRN_SRR1 ; \
1678 xori r12,r12,MSR_LE ; \
1679 mtspr SPRN_SRR1,r12 ; \
acd7d8ce 1680 mr r13,r9 ; \
222f20f1 1681 RFI_TO_USER ; /* return to userspace */ \
d807ad37 1682 b . ; /* prevent speculative execution */
727f1361
ME
1683#else
1684#define SYSCALL_FASTENDIAN_TEST
1685#define SYSCALL_FASTENDIAN
1686#endif /* CONFIG_PPC_FAST_ENDIAN_SWITCH */
d807ad37
NP
1687
1688#if defined(CONFIG_RELOCATABLE)
1689 /*
1690 * We can't branch directly so we do it via the CTR which
1691 * is volatile across system calls.
1692 */
acd7d8ce
NP
1693#define SYSCALL_VIRT \
1694 LOAD_SYSCALL_HANDLER(r10) ; \
1695 mtctr r10 ; \
1696 mfspr r11,SPRN_SRR0 ; \
d807ad37
NP
1697 mfspr r12,SPRN_SRR1 ; \
1698 li r10,MSR_RI ; \
1699 mtmsrd r10,1 ; \
1700 bctr ;
1701#else
1702 /* We can branch directly */
acd7d8ce
NP
1703#define SYSCALL_VIRT \
1704 mfspr r11,SPRN_SRR0 ; \
d807ad37
NP
1705 mfspr r12,SPRN_SRR1 ; \
1706 li r10,MSR_RI ; \
1707 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
1708 b system_call_common ;
1709#endif
1710
1a6822d1 1711EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
acd7d8ce
NP
1712 SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1713 SYSCALL_FASTENDIAN_TEST
1714 SYSCALL_REAL
1715 SYSCALL_FASTENDIAN
1a6822d1 1716EXC_REAL_END(system_call, 0xc00, 0x100)
da2bc464 1717
1a6822d1 1718EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
acd7d8ce
NP
1719 SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
1720 SYSCALL_FASTENDIAN_TEST
1721 SYSCALL_VIRT
1722 SYSCALL_FASTENDIAN
1a6822d1 1723EXC_VIRT_END(system_call, 0x4c00, 0x100)
d807ad37 1724
acd7d8ce
NP
1725#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1726 /*
1727 * This is a hcall, so register convention is as above, with these
1728 * differences:
1729 * r13 = PACA
76fc0cfc
NP
1730 * ctr = orig r13
1731 * orig r10 saved in PACA
acd7d8ce
NP
1732 */
1733TRAMP_KVM_BEGIN(do_kvm_0xc00)
1734 /*
1735 * Save the PPR (on systems that support it) before changing to
1736 * HMT_MEDIUM. That allows the KVM code to save that value into the
1737 * guest state (it is the guest's PPR value).
1738 */
76fc0cfc 1739 OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
acd7d8ce 1740 HMT_MEDIUM
76fc0cfc 1741 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
acd7d8ce 1742 mfctr r10
76fc0cfc 1743 SET_SCRATCH0(r10)
acd7d8ce
NP
1744 std r9,PACA_EXGEN+EX_R9(r13)
1745 mfcr r9
17bdc064 1746 KVM_HANDLER PACA_EXGEN, EXC_STD, 0xc00, 0
acd7d8ce 1747#endif
da2bc464 1748
d807ad37 1749
1a6822d1
NP
1750EXC_REAL(single_step, 0xd00, 0x100)
1751EXC_VIRT(single_step, 0x4d00, 0x100, 0xd00)
da2bc464 1752TRAMP_KVM(PACA_EXGEN, 0xd00)
bc6675c6 1753EXC_COMMON(single_step_common, 0xd00, single_step_exception)
b01c8b54 1754
1a6822d1 1755EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0x20)
da0e7e62 1756EXC_VIRT_OOL_HV(h_data_storage, 0x4e00, 0x20, 0xe00)
f5c32c1d
NP
1757TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
1758EXC_COMMON_BEGIN(h_data_storage_common)
1759 mfspr r10,SPRN_HDAR
1760 std r10,PACA_EXGEN+EX_DAR(r13)
1761 mfspr r10,SPRN_HDSISR
1762 stw r10,PACA_EXGEN+EX_DSISR(r13)
1763 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1764 bl save_nvgprs
1765 RECONCILE_IRQ_STATE(r10, r11)
1766 addi r3,r1,STACK_FRAME_OVERHEAD
d7b45615
SJS
1767BEGIN_MMU_FTR_SECTION
1768 ld r4,PACA_EXGEN+EX_DAR(r13)
1769 lwz r5,PACA_EXGEN+EX_DSISR(r13)
1770 std r4,_DAR(r1)
1771 std r5,_DSISR(r1)
1772 li r5,SIGSEGV
1773 bl bad_page_fault
1774MMU_FTR_SECTION_ELSE
f5c32c1d 1775 bl unknown_exception
d7b45615 1776ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
f5c32c1d 1777 b ret_from_except
f5c32c1d 1778
1707dd16 1779
1a6822d1 1780EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0x20)
da0e7e62 1781EXC_VIRT_OOL_HV(h_instr_storage, 0x4e20, 0x20, 0xe20)
82517cab
NP
1782TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
1783EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
1784
1707dd16 1785
1a6822d1
NP
1786EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0x20)
1787EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x20, 0xe40)
031b4026
NP
1788TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
1789EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1790
1707dd16 1791
e0319829
NP
1792/*
1793 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1794 * first, and then eventaully from there to the trampoline to get into virtual
1795 * mode.
1796 */
1a6822d1 1797__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0x20, hmi_exception_early)
f14e953b 1798__TRAMP_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60, IRQS_DISABLED)
1a6822d1 1799EXC_VIRT_NONE(0x4e60, 0x20)
62f9b03b
NP
1800TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
1801TRAMP_REAL_BEGIN(hmi_exception_early)
fa4cf6b7 1802 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 1, 0xe60, 0
a4087a4d
NP
1803 mr r10,r1 /* Save r1 */
1804 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
62f9b03b 1805 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
62f9b03b 1806 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
a4087a4d
NP
1807 mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
1808 EXCEPTION_PROLOG_COMMON_1()
890274c2 1809 /* We don't touch AMR here, we never go to virtual mode */
62f9b03b
NP
1810 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1811 EXCEPTION_PROLOG_COMMON_3(0xe60)
1812 addi r3,r1,STACK_FRAME_OVERHEAD
505a314f 1813 BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
5080332c
MN
1814 cmpdi cr0,r3,0
1815
62f9b03b
NP
1816 /* Windup the stack. */
1817 /* Move original HSRR0 and HSRR1 into the respective regs */
1818 ld r9,_MSR(r1)
1819 mtspr SPRN_HSRR1,r9
1820 ld r3,_NIP(r1)
1821 mtspr SPRN_HSRR0,r3
1822 ld r9,_CTR(r1)
1823 mtctr r9
1824 ld r9,_XER(r1)
1825 mtxer r9
1826 ld r9,_LINK(r1)
1827 mtlr r9
1828 REST_GPR(0, r1)
1829 REST_8GPRS(2, r1)
1830 REST_GPR(10, r1)
1831 ld r11,_CCR(r1)
5080332c
MN
1832 REST_2GPRS(12, r1)
1833 bne 1f
62f9b03b
NP
1834 mtcr r11
1835 REST_GPR(11, r1)
5080332c 1836 ld r1,GPR1(r1)
222f20f1 1837 HRFI_TO_USER_OR_KERNEL
5080332c
MN
1838
18391: mtcr r11
1840 REST_GPR(11, r1)
62f9b03b
NP
1841 ld r1,GPR1(r1)
1842
1843 /*
1844 * Go to virtual mode and pull the HMI event information from
1845 * firmware.
1846 */
1847 .globl hmi_exception_after_realmode
1848hmi_exception_after_realmode:
1849 SET_SCRATCH0(r13)
5dba1d50 1850 EXCEPTION_PROLOG_0 PACA_EXGEN
62f9b03b
NP
1851 b tramp_real_hmi_exception
1852
5080332c 1853EXC_COMMON_BEGIN(hmi_exception_common)
47169fba
NP
1854 EXCEPTION_COMMON(PACA_EXGEN, 0xe60)
1855 FINISH_NAP
1856 bl save_nvgprs
1857 RECONCILE_IRQ_STATE(r10, r11)
1858 RUNLATCH_ON
c06075f3
NP
1859 addi r3,r1,STACK_FRAME_OVERHEAD
1860 bl handle_hmi_exception
1861 b ret_from_except
1707dd16 1862
f14e953b
MS
1863EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20, IRQS_DISABLED)
1864EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80, IRQS_DISABLED)
9bcb81bf
NP
1865TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1866#ifdef CONFIG_PPC_DOORBELL
1867EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1868#else
1869EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1870#endif
1871
0ebc4cda 1872
f14e953b
MS
1873EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0x20, IRQS_DISABLED)
1874EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x20, 0xea0, IRQS_DISABLED)
74408776
NP
1875TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1876EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1877
9baaef0a 1878
1a6822d1
NP
1879EXC_REAL_NONE(0xec0, 0x20)
1880EXC_VIRT_NONE(0x4ec0, 0x20)
1881EXC_REAL_NONE(0xee0, 0x20)
1882EXC_VIRT_NONE(0x4ee0, 0x20)
bda7fea2 1883
0ebc4cda 1884
f442d004
MS
1885EXC_REAL_OOL_MASKABLE(performance_monitor, 0xf00, 0x20, IRQS_PMI_DISABLED)
1886EXC_VIRT_OOL_MASKABLE(performance_monitor, 0x4f00, 0x20, 0xf00, IRQS_PMI_DISABLED)
b1c7f150
NP
1887TRAMP_KVM(PACA_EXGEN, 0xf00)
1888EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1889
0ebc4cda 1890
1a6822d1
NP
1891EXC_REAL_OOL(altivec_unavailable, 0xf20, 0x20)
1892EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x20, 0xf20)
d1a0ca9c
NP
1893TRAMP_KVM(PACA_EXGEN, 0xf20)
1894EXC_COMMON_BEGIN(altivec_unavailable_common)
1895 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1896#ifdef CONFIG_ALTIVEC
1897BEGIN_FTR_SECTION
1898 beq 1f
1899#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1900 BEGIN_FTR_SECTION_NESTED(69)
1901 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1902 * transaction), go do TM stuff
1903 */
1904 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1905 bne- 2f
1906 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1907#endif
1908 bl load_up_altivec
1909 b fast_exception_return
1910#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
19112: /* User process was in a transaction */
1912 bl save_nvgprs
1913 RECONCILE_IRQ_STATE(r10, r11)
1914 addi r3,r1,STACK_FRAME_OVERHEAD
1915 bl altivec_unavailable_tm
1916 b ret_from_except
1917#endif
19181:
1919END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1920#endif
1921 bl save_nvgprs
1922 RECONCILE_IRQ_STATE(r10, r11)
1923 addi r3,r1,STACK_FRAME_OVERHEAD
1924 bl altivec_unavailable_exception
1925 b ret_from_except
1926
0ebc4cda 1927
1a6822d1
NP
1928EXC_REAL_OOL(vsx_unavailable, 0xf40, 0x20)
1929EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x20, 0xf40)
792cbddd
NP
1930TRAMP_KVM(PACA_EXGEN, 0xf40)
1931EXC_COMMON_BEGIN(vsx_unavailable_common)
1932 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1933#ifdef CONFIG_VSX
1934BEGIN_FTR_SECTION
1935 beq 1f
1936#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1937 BEGIN_FTR_SECTION_NESTED(69)
1938 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1939 * transaction), go do TM stuff
1940 */
1941 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1942 bne- 2f
1943 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1944#endif
1945 b load_up_vsx
1946#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
19472: /* User process was in a transaction */
1948 bl save_nvgprs
1949 RECONCILE_IRQ_STATE(r10, r11)
1950 addi r3,r1,STACK_FRAME_OVERHEAD
1951 bl vsx_unavailable_tm
1952 b ret_from_except
1953#endif
19541:
1955END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1956#endif
1957 bl save_nvgprs
1958 RECONCILE_IRQ_STATE(r10, r11)
1959 addi r3,r1,STACK_FRAME_OVERHEAD
1960 bl vsx_unavailable_exception
1961 b ret_from_except
1962
da2bc464 1963
1a6822d1
NP
1964EXC_REAL_OOL(facility_unavailable, 0xf60, 0x20)
1965EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x20, 0xf60)
1134713c
NP
1966TRAMP_KVM(PACA_EXGEN, 0xf60)
1967EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1968
da2bc464 1969
1a6822d1
NP
1970EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0x20)
1971EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x20, 0xf80)
14b0072c
NP
1972TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1973EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1974
da2bc464 1975
1a6822d1
NP
1976EXC_REAL_NONE(0xfa0, 0x20)
1977EXC_VIRT_NONE(0x4fa0, 0x20)
1978EXC_REAL_NONE(0xfc0, 0x20)
1979EXC_VIRT_NONE(0x4fc0, 0x20)
1980EXC_REAL_NONE(0xfe0, 0x20)
1981EXC_VIRT_NONE(0x4fe0, 0x20)
1982
1983EXC_REAL_NONE(0x1000, 0x100)
1984EXC_VIRT_NONE(0x5000, 0x100)
1985EXC_REAL_NONE(0x1100, 0x100)
1986EXC_VIRT_NONE(0x5100, 0x100)
d0c0c9a1 1987
0ebc4cda 1988#ifdef CONFIG_CBE_RAS
1a6822d1
NP
1989EXC_REAL_HV(cbe_system_error, 0x1200, 0x100)
1990EXC_VIRT_NONE(0x5200, 0x100)
da2bc464 1991TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
ff1b3206 1992EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
da2bc464 1993#else /* CONFIG_CBE_RAS */
1a6822d1
NP
1994EXC_REAL_NONE(0x1200, 0x100)
1995EXC_VIRT_NONE(0x5200, 0x100)
da2bc464 1996#endif
b01c8b54 1997
ff1b3206 1998
1a6822d1
NP
1999EXC_REAL(instruction_breakpoint, 0x1300, 0x100)
2000EXC_VIRT(instruction_breakpoint, 0x5300, 0x100, 0x1300)
da2bc464 2001TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
4e96dbbf
NP
2002EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
2003
1a6822d1
NP
2004EXC_REAL_NONE(0x1400, 0x100)
2005EXC_VIRT_NONE(0x5400, 0x100)
da2bc464 2006
1a6822d1 2007EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
b92a66a6 2008 mtspr SPRN_SPRG_HSCRATCH0,r13
5dba1d50 2009 EXCEPTION_PROLOG_0 PACA_EXGEN
fa4cf6b7 2010 EXCEPTION_PROLOG_1 EXC_HV, PACA_EXGEN, 0, 0x1500, 0
b92a66a6
MN
2011
2012#ifdef CONFIG_PPC_DENORMALISATION
2013 mfspr r10,SPRN_HSRR1
afcf0095 2014 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
afcf0095
NP
2015 bne+ denorm_assist
2016#endif
1e9b4507 2017
a7c1ca19 2018 KVMTEST EXC_HV 0x1500
2d046308 2019 EXCEPTION_PROLOG_2_REAL denorm_common, EXC_HV, 1
1a6822d1 2020EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
a74599a5 2021
d7e89849 2022#ifdef CONFIG_PPC_DENORMALISATION
1a6822d1 2023EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
d7e89849 2024 b exc_real_0x1500_denorm_exception_hv
1a6822d1 2025EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
d7e89849 2026#else
1a6822d1 2027EXC_VIRT_NONE(0x5500, 0x100)
afcf0095
NP
2028#endif
2029
4bb3c7a0 2030TRAMP_KVM_HV(PACA_EXGEN, 0x1500)
b01c8b54 2031
b92a66a6 2032#ifdef CONFIG_PPC_DENORMALISATION
da2bc464 2033TRAMP_REAL_BEGIN(denorm_assist)
b92a66a6
MN
2034BEGIN_FTR_SECTION
2035/*
2036 * To denormalise we need to move a copy of the register to itself.
2037 * For POWER6 do that here for all FP regs.
2038 */
2039 mfmsr r10
2040 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2041 xori r10,r10,(MSR_FE0|MSR_FE1)
2042 mtmsrd r10
2043 sync
d7c67fb1
MN
2044
2045#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
2046#define FMR4(n) FMR2(n) ; FMR2(n+2)
2047#define FMR8(n) FMR4(n) ; FMR4(n+4)
2048#define FMR16(n) FMR8(n) ; FMR8(n+8)
2049#define FMR32(n) FMR16(n) ; FMR16(n+16)
2050 FMR32(0)
2051
b92a66a6
MN
2052FTR_SECTION_ELSE
2053/*
2054 * To denormalise we need to move a copy of the register to itself.
2055 * For POWER7 do that here for the first 32 VSX registers only.
2056 */
2057 mfmsr r10
2058 oris r10,r10,MSR_VSX@h
2059 mtmsrd r10
2060 sync
d7c67fb1
MN
2061
2062#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
2063#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
2064#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
2065#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
2066#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
2067 XVCPSGNDP32(0)
2068
b92a66a6 2069ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
fb0fce3e
MN
2070
2071BEGIN_FTR_SECTION
2072 b denorm_done
2073END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2074/*
2075 * To denormalise we need to move a copy of the register to itself.
2076 * For POWER8 we need to do that for all 64 VSX registers
2077 */
2078 XVCPSGNDP32(32)
2079denorm_done:
f14040bc
MN
2080 mfspr r11,SPRN_HSRR0
2081 subi r11,r11,4
b92a66a6
MN
2082 mtspr SPRN_HSRR0,r11
2083 mtcrf 0x80,r9
2084 ld r9,PACA_EXGEN+EX_R9(r13)
44e9309f 2085 RESTORE_PPR_PACA(PACA_EXGEN, r10)
630573c1
PM
2086BEGIN_FTR_SECTION
2087 ld r10,PACA_EXGEN+EX_CFAR(r13)
2088 mtspr SPRN_CFAR,r10
2089END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
b92a66a6
MN
2090 ld r10,PACA_EXGEN+EX_R10(r13)
2091 ld r11,PACA_EXGEN+EX_R11(r13)
2092 ld r12,PACA_EXGEN+EX_R12(r13)
2093 ld r13,PACA_EXGEN+EX_R13(r13)
222f20f1 2094 HRFI_TO_UNKNOWN
b92a66a6
MN
2095 b .
2096#endif
2097
872e2ae4 2098EXC_COMMON(denorm_common, 0x1500, unknown_exception)
d7e89849
NP
2099
2100
2101#ifdef CONFIG_CBE_RAS
1a6822d1
NP
2102EXC_REAL_HV(cbe_maintenance, 0x1600, 0x100)
2103EXC_VIRT_NONE(0x5600, 0x100)
d7e89849 2104TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
69a79344 2105EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
d7e89849 2106#else /* CONFIG_CBE_RAS */
1a6822d1
NP
2107EXC_REAL_NONE(0x1600, 0x100)
2108EXC_VIRT_NONE(0x5600, 0x100)
d7e89849
NP
2109#endif
2110
69a79344 2111
1a6822d1
NP
2112EXC_REAL(altivec_assist, 0x1700, 0x100)
2113EXC_VIRT(altivec_assist, 0x5700, 0x100, 0x1700)
d7e89849 2114TRAMP_KVM(PACA_EXGEN, 0x1700)
b51c079e
NP
2115#ifdef CONFIG_ALTIVEC
2116EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
2117#else
2118EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
2119#endif
2120
d7e89849
NP
2121
2122#ifdef CONFIG_CBE_RAS
1a6822d1
NP
2123EXC_REAL_HV(cbe_thermal, 0x1800, 0x100)
2124EXC_VIRT_NONE(0x5800, 0x100)
d7e89849 2125TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
3965f8ab 2126EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
d7e89849 2127#else /* CONFIG_CBE_RAS */
1a6822d1
NP
2128EXC_REAL_NONE(0x1800, 0x100)
2129EXC_VIRT_NONE(0x5800, 0x100)
d7e89849
NP
2130#endif
2131
75eb767e 2132#ifdef CONFIG_PPC_WATCHDOG
2104180a
NP
2133
2134#define MASKED_DEC_HANDLER_LABEL 3f
2135
2136#define MASKED_DEC_HANDLER(_H) \
21373: /* soft-nmi */ \
2138 std r12,PACA_EXGEN+EX_R12(r13); \
2139 GET_SCRATCH0(r10); \
2140 std r10,PACA_EXGEN+EX_R13(r13); \
2d046308 2141 EXCEPTION_PROLOG_2_REAL soft_nmi_common, _H, 1
2104180a 2142
cc491f1d
NP
2143/*
2144 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2145 * stack is one that is usable by maskable interrupts so long as MSR_EE
2146 * remains off. It is used for recovery when something has corrupted the
2147 * normal kernel stack, for example. The "soft NMI" must not use the process
2148 * stack because we want irq disabled sections to avoid touching the stack
2149 * at all (other than PMU interrupts), so use the emergency stack for this,
2150 * and run it entirely with interrupts hard disabled.
2151 */
2104180a
NP
2152EXC_COMMON_BEGIN(soft_nmi_common)
2153 mr r10,r1
2154 ld r1,PACAEMERGSP(r13)
2104180a 2155 subi r1,r1,INT_FRAME_SIZE
47169fba
NP
2156 EXCEPTION_COMMON_STACK(PACA_EXGEN, 0x900)
2157 bl save_nvgprs
2158 RECONCILE_IRQ_STATE(r10, r11)
c06075f3
NP
2159 addi r3,r1,STACK_FRAME_OVERHEAD
2160 bl soft_nmi_interrupt
2104180a
NP
2161 b ret_from_except
2162
75eb767e 2163#else /* CONFIG_PPC_WATCHDOG */
2104180a
NP
2164#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
2165#define MASKED_DEC_HANDLER(_H)
75eb767e 2166#endif /* CONFIG_PPC_WATCHDOG */
d7e89849 2167
0ebc4cda 2168/*
fe9e1d54
IM
2169 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2170 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2171 * - If it was a doorbell we return immediately since doorbells are edge
2172 * triggered and won't automatically refire.
0869b6fd
MS
2173 * - If it was a HMI we return immediately since we handled it in realmode
2174 * and it won't refire.
6cc3f91b 2175 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
fe9e1d54 2176 * This is called with r10 containing the value to OR to the paca field.
0ebc4cda 2177 */
4508a74a
NP
2178.macro MASKED_INTERRUPT hsrr
2179 .if \hsrr
2180masked_Hinterrupt:
2181 .else
2182masked_interrupt:
2183 .endif
2184 std r11,PACA_EXGEN+EX_R11(r13)
2185 lbz r11,PACAIRQHAPPENED(r13)
2186 or r11,r11,r10
2187 stb r11,PACAIRQHAPPENED(r13)
2188 cmpwi r10,PACA_IRQ_DEC
2189 bne 1f
2190 lis r10,0x7fff
2191 ori r10,r10,0xffff
2192 mtspr SPRN_DEC,r10
2193 b MASKED_DEC_HANDLER_LABEL
21941: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
2195 beq 2f
2196 .if \hsrr
2197 mfspr r10,SPRN_HSRR1
2198 xori r10,r10,MSR_EE /* clear MSR_EE */
2199 mtspr SPRN_HSRR1,r10
2200 .else
2201 mfspr r10,SPRN_SRR1
2202 xori r10,r10,MSR_EE /* clear MSR_EE */
2203 mtspr SPRN_SRR1,r10
2204 .endif
2205 ori r11,r11,PACA_IRQ_HARD_DIS
2206 stb r11,PACAIRQHAPPENED(r13)
22072: /* done */
2208 mtcrf 0x80,r9
2209 std r1,PACAR1(r13)
2210 ld r9,PACA_EXGEN+EX_R9(r13)
2211 ld r10,PACA_EXGEN+EX_R10(r13)
2212 ld r11,PACA_EXGEN+EX_R11(r13)
2213 /* returns to kernel where r13 must be set up, so don't restore it */
2214 .if \hsrr
2215 HRFI_TO_KERNEL
2216 .else
2217 RFI_TO_KERNEL
2218 .endif
2219 b .
2220 MASKED_DEC_HANDLER(\hsrr\())
2221.endm
57f26649 2222
a048a07d
NP
2223TRAMP_REAL_BEGIN(stf_barrier_fallback)
2224 std r9,PACA_EXRFI+EX_R9(r13)
2225 std r10,PACA_EXRFI+EX_R10(r13)
2226 sync
2227 ld r9,PACA_EXRFI+EX_R9(r13)
2228 ld r10,PACA_EXRFI+EX_R10(r13)
2229 ori 31,31,0
2230 .rept 14
2231 b 1f
22321:
2233 .endr
2234 blr
2235
aa8a5e00
ME
2236TRAMP_REAL_BEGIN(rfi_flush_fallback)
2237 SET_SCRATCH0(r13);
2238 GET_PACA(r13);
78ee9946
ME
2239 std r1,PACA_EXRFI+EX_R12(r13)
2240 ld r1,PACAKSAVE(r13)
aa8a5e00
ME
2241 std r9,PACA_EXRFI+EX_R9(r13)
2242 std r10,PACA_EXRFI+EX_R10(r13)
2243 std r11,PACA_EXRFI+EX_R11(r13)
aa8a5e00
ME
2244 mfctr r9
2245 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
bdcb1aef
NP
2246 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2247 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
aa8a5e00 2248 mtctr r11
15a3204d 2249 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
aa8a5e00
ME
2250
2251 /* order ld/st prior to dcbt stop all streams with flushing */
2252 sync
bdcb1aef
NP
2253
2254 /*
2255 * The load adresses are at staggered offsets within cachelines,
2256 * which suits some pipelines better (on others it should not
2257 * hurt).
2258 */
22591:
2260 ld r11,(0x80 + 8)*0(r10)
2261 ld r11,(0x80 + 8)*1(r10)
2262 ld r11,(0x80 + 8)*2(r10)
2263 ld r11,(0x80 + 8)*3(r10)
2264 ld r11,(0x80 + 8)*4(r10)
2265 ld r11,(0x80 + 8)*5(r10)
2266 ld r11,(0x80 + 8)*6(r10)
2267 ld r11,(0x80 + 8)*7(r10)
2268 addi r10,r10,0x80*8
aa8a5e00
ME
2269 bdnz 1b
2270
2271 mtctr r9
2272 ld r9,PACA_EXRFI+EX_R9(r13)
2273 ld r10,PACA_EXRFI+EX_R10(r13)
2274 ld r11,PACA_EXRFI+EX_R11(r13)
78ee9946 2275 ld r1,PACA_EXRFI+EX_R12(r13)
aa8a5e00
ME
2276 GET_SCRATCH0(r13);
2277 rfid
2278
2279TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2280 SET_SCRATCH0(r13);
2281 GET_PACA(r13);
78ee9946
ME
2282 std r1,PACA_EXRFI+EX_R12(r13)
2283 ld r1,PACAKSAVE(r13)
aa8a5e00
ME
2284 std r9,PACA_EXRFI+EX_R9(r13)
2285 std r10,PACA_EXRFI+EX_R10(r13)
2286 std r11,PACA_EXRFI+EX_R11(r13)
aa8a5e00
ME
2287 mfctr r9
2288 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
bdcb1aef
NP
2289 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2290 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
aa8a5e00 2291 mtctr r11
15a3204d 2292 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
aa8a5e00
ME
2293
2294 /* order ld/st prior to dcbt stop all streams with flushing */
2295 sync
bdcb1aef
NP
2296
2297 /*
2298 * The load adresses are at staggered offsets within cachelines,
2299 * which suits some pipelines better (on others it should not
2300 * hurt).
2301 */
23021:
2303 ld r11,(0x80 + 8)*0(r10)
2304 ld r11,(0x80 + 8)*1(r10)
2305 ld r11,(0x80 + 8)*2(r10)
2306 ld r11,(0x80 + 8)*3(r10)
2307 ld r11,(0x80 + 8)*4(r10)
2308 ld r11,(0x80 + 8)*5(r10)
2309 ld r11,(0x80 + 8)*6(r10)
2310 ld r11,(0x80 + 8)*7(r10)
2311 addi r10,r10,0x80*8
aa8a5e00
ME
2312 bdnz 1b
2313
2314 mtctr r9
2315 ld r9,PACA_EXRFI+EX_R9(r13)
2316 ld r10,PACA_EXRFI+EX_R10(r13)
2317 ld r11,PACA_EXRFI+EX_R11(r13)
78ee9946 2318 ld r1,PACA_EXRFI+EX_R12(r13)
aa8a5e00
ME
2319 GET_SCRATCH0(r13);
2320 hrfid
2321
57f26649
NP
2322/*
2323 * Real mode exceptions actually use this too, but alternate
2324 * instruction code patches (which end up in the common .text area)
2325 * cannot reach these if they are put there.
2326 */
2327USE_FIXED_SECTION(virt_trampolines)
4508a74a
NP
2328 MASKED_INTERRUPT EXC_STD
2329 MASKED_INTERRUPT EXC_HV
0ebc4cda 2330
4f6c11db 2331#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
da2bc464 2332TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
4f6c11db
PM
2333 /*
2334 * Here all GPRs are unchanged from when the interrupt happened
2335 * except for r13, which is saved in SPRG_SCRATCH0.
2336 */
2337 mfspr r13, SPRN_SRR0
2338 addi r13, r13, 4
2339 mtspr SPRN_SRR0, r13
2340 GET_SCRATCH0(r13)
222f20f1 2341 RFI_TO_KERNEL
4f6c11db
PM
2342 b .
2343
da2bc464 2344TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
4f6c11db
PM
2345 /*
2346 * Here all GPRs are unchanged from when the interrupt happened
2347 * except for r13, which is saved in SPRG_SCRATCH0.
2348 */
2349 mfspr r13, SPRN_HSRR0
2350 addi r13, r13, 4
2351 mtspr SPRN_HSRR0, r13
2352 GET_SCRATCH0(r13)
222f20f1 2353 HRFI_TO_KERNEL
4f6c11db
PM
2354 b .
2355#endif
2356
0ebc4cda 2357/*
057b6d7e
HB
2358 * Ensure that any handlers that get invoked from the exception prologs
2359 * above are below the first 64KB (0x10000) of the kernel image because
2360 * the prologs assemble the addresses of these handlers using the
2361 * LOAD_HANDLER macro, which uses an ori instruction.
0ebc4cda
BH
2362 */
2363
2364/*** Common interrupt handlers ***/
2365
0ebc4cda 2366
c1fb6816
MN
2367 /*
2368 * Relocation-on interrupts: A subset of the interrupts can be delivered
2369 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
2370 * it. Addresses are the same as the original interrupt addresses, but
2371 * offset by 0xc000000000004000.
2372 * It's impossible to receive interrupts below 0x300 via this mechanism.
2373 * KVM: None of these traps are from the guest ; anything that escalated
2374 * to HV=1 from HV=0 is delivered via real mode handlers.
2375 */
2376
2377 /*
2378 * This uses the standard macro, since the original 0x300 vector
2379 * only has extra guff for STAB-based processors -- which never
2380 * come here.
2381 */
da2bc464 2382
57f26649 2383EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
b1576fec 2384 b __ppc64_runlatch_on
fe1952fc 2385
57f26649 2386USE_FIXED_SECTION(virt_trampolines)
8ed8ab40
HB
2387 /*
2388 * The __end_interrupts marker must be past the out-of-line (OOL)
2389 * handlers, so that they are copied to real address 0x100 when running
2390 * a relocatable kernel. This ensures they can be reached from the short
2391 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
2392 * directly, without using LOAD_HANDLER().
2393 */
2394 .align 7
2395 .globl __end_interrupts
2396__end_interrupts:
57f26649 2397DEFINE_FIXED_SYMBOL(__end_interrupts)
61383407 2398
087aa036 2399#ifdef CONFIG_PPC_970_NAP
7c8cb4b5 2400EXC_COMMON_BEGIN(power4_fixup_nap)
087aa036
CG
2401 andc r9,r9,r10
2402 std r9,TI_LOCAL_FLAGS(r11)
2403 ld r10,_LINK(r1) /* make idle task do the */
2404 std r10,_NIP(r1) /* equivalent of a blr */
2405 blr
2406#endif
2407
57f26649
NP
2408CLOSE_FIXED_SECTION(real_vectors);
2409CLOSE_FIXED_SECTION(real_trampolines);
2410CLOSE_FIXED_SECTION(virt_vectors);
2411CLOSE_FIXED_SECTION(virt_trampolines);
2412
2413USE_TEXT_SECTION()
2414
0ebc4cda
BH
2415/*
2416 * Hash table stuff
2417 */
f4329f2e 2418 .balign IFETCH_ALIGN_BYTES
6a3bab90 2419do_hash_page:
4e003747 2420#ifdef CONFIG_PPC_BOOK3S_64
e6c2a479 2421 lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
398a719d
BH
2422 ori r0,r0,DSISR_BAD_FAULT_64S@l
2423 and. r0,r4,r0 /* weird error? */
0ebc4cda 2424 bne- handle_page_fault /* if not, try to insert a HPTE */
c911d2e1 2425 ld r11, PACA_THREAD_INFO(r13)
9c1e1052
PM
2426 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
2427 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
2428 bne 77f /* then don't call hash_page now */
0ebc4cda
BH
2429
2430 /*
2431 * r3 contains the faulting address
106713a1 2432 * r4 msr
0ebc4cda 2433 * r5 contains the trap number
aefa5688 2434 * r6 contains dsisr
0ebc4cda 2435 *
7230c564 2436 * at return r3 = 0 for success, 1 for page fault, negative for error
0ebc4cda 2437 */
106713a1 2438 mr r4,r12
aefa5688 2439 ld r6,_DSISR(r1)
106713a1
AK
2440 bl __hash_page /* build HPTE if possible */
2441 cmpdi r3,0 /* see if __hash_page succeeded */
0ebc4cda 2442
7230c564 2443 /* Success */
0ebc4cda 2444 beq fast_exc_return_irq /* Return from exception on success */
0ebc4cda 2445
7230c564
BH
2446 /* Error */
2447 blt- 13f
d89ba535
NR
2448
2449 /* Reload DSISR into r4 for the DABR check below */
2450 ld r4,_DSISR(r1)
4e003747 2451#endif /* CONFIG_PPC_BOOK3S_64 */
9c7cc234 2452
0ebc4cda
BH
2453/* Here we have a page fault that hash_page can't handle. */
2454handle_page_fault:
d89ba535
NR
245511: andis. r0,r4,DSISR_DABRMATCH@h
2456 bne- handle_dabr_fault
2457 ld r4,_DAR(r1)
0ebc4cda
BH
2458 ld r5,_DSISR(r1)
2459 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 2460 bl do_page_fault
0ebc4cda 2461 cmpdi r3,0
f474c28f 2462 beq+ ret_from_except_lite
b1576fec 2463 bl save_nvgprs
0ebc4cda
BH
2464 mr r5,r3
2465 addi r3,r1,STACK_FRAME_OVERHEAD
2466 lwz r4,_DAR(r1)
b1576fec
AB
2467 bl bad_page_fault
2468 b ret_from_except
0ebc4cda 2469
a546498f
BH
2470/* We have a data breakpoint exception - handle it */
2471handle_dabr_fault:
b1576fec 2472 bl save_nvgprs
a546498f
BH
2473 ld r4,_DAR(r1)
2474 ld r5,_DSISR(r1)
2475 addi r3,r1,STACK_FRAME_OVERHEAD
b1576fec 2476 bl do_break
f474c28f
RB
2477 /*
2478 * do_break() may have changed the NV GPRS while handling a breakpoint.
2479 * If so, we need to restore them with their updated values. Don't use
2480 * ret_from_except_lite here.
2481 */
2482 b ret_from_except
a546498f 2483
0ebc4cda 2484
4e003747 2485#ifdef CONFIG_PPC_BOOK3S_64
0ebc4cda
BH
2486/* We have a page fault that hash_page could handle but HV refused
2487 * the PTE insertion
2488 */
b1576fec 248913: bl save_nvgprs
0ebc4cda
BH
2490 mr r5,r3
2491 addi r3,r1,STACK_FRAME_OVERHEAD
2492 ld r4,_DAR(r1)
b1576fec
AB
2493 bl low_hash_fault
2494 b ret_from_except
caca285e 2495#endif
0ebc4cda 2496
9c1e1052
PM
2497/*
2498 * We come here as a result of a DSI at a point where we don't want
2499 * to call hash_page, such as when we are accessing memory (possibly
2500 * user memory) inside a PMU interrupt that occurred while interrupts
2501 * were soft-disabled. We want to invoke the exception handler for
2502 * the access, or panic if there isn't a handler.
2503 */
b1576fec 250477: bl save_nvgprs
9c1e1052
PM
2505 mr r4,r3
2506 addi r3,r1,STACK_FRAME_OVERHEAD
2507 li r5,SIGSEGV
b1576fec
AB
2508 bl bad_page_fault
2509 b ret_from_except
4e2bf01b
ME
2510
2511/*
2512 * Here we have detected that the kernel stack pointer is bad.
2513 * R9 contains the saved CR, r13 points to the paca,
2514 * r10 contains the (bad) kernel stack pointer,
2515 * r11 and r12 contain the saved SRR0 and SRR1.
2516 * We switch to using an emergency stack, save the registers there,
2517 * and call kernel_bad_stack(), which panics.
2518 */
2519bad_stack:
2520 ld r1,PACAEMERGSP(r13)
2521 subi r1,r1,64+INT_FRAME_SIZE
2522 std r9,_CCR(r1)
2523 std r10,GPR1(r1)
2524 std r11,_NIP(r1)
2525 std r12,_MSR(r1)
2526 mfspr r11,SPRN_DAR
2527 mfspr r12,SPRN_DSISR
2528 std r11,_DAR(r1)
2529 std r12,_DSISR(r1)
2530 mflr r10
2531 mfctr r11
2532 mfxer r12
2533 std r10,_LINK(r1)
2534 std r11,_CTR(r1)
2535 std r12,_XER(r1)
2536 SAVE_GPR(0,r1)
2537 SAVE_GPR(2,r1)
2538 ld r10,EX_R3(r3)
2539 std r10,GPR3(r1)
2540 SAVE_GPR(4,r1)
2541 SAVE_4GPRS(5,r1)
2542 ld r9,EX_R9(r3)
2543 ld r10,EX_R10(r3)
2544 SAVE_2GPRS(9,r1)
2545 ld r9,EX_R11(r3)
2546 ld r10,EX_R12(r3)
2547 ld r11,EX_R13(r3)
2548 std r9,GPR11(r1)
2549 std r10,GPR12(r1)
2550 std r11,GPR13(r1)
2551BEGIN_FTR_SECTION
2552 ld r10,EX_CFAR(r3)
2553 std r10,ORIG_GPR3(r1)
2554END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2555 SAVE_8GPRS(14,r1)
2556 SAVE_10GPRS(22,r1)
2557 lhz r12,PACA_TRAP_SAVE(r13)
2558 std r12,_TRAP(r1)
2559 addi r11,r1,INT_FRAME_SIZE
2560 std r11,0(r1)
2561 li r12,0
2562 std r12,0(r11)
2563 ld r2,PACATOC(r13)
2564 ld r11,exception_marker@toc(r2)
2565 std r12,RESULT(r1)
2566 std r11,STACK_FRAME_OVERHEAD-16(r1)
25671: addi r3,r1,STACK_FRAME_OVERHEAD
2568 bl kernel_bad_stack
2569 b 1b
15770a13 2570_ASM_NOKPROBE_SYMBOL(bad_stack);
0f0c6ca1 2571
a9af97aa
NP
2572/*
2573 * When doorbell is triggered from system reset wakeup, the message is
2574 * not cleared, so it would fire again when EE is enabled.
2575 *
2576 * When coming from local_irq_enable, there may be the same problem if
2577 * we were hard disabled.
2578 *
2579 * Execute msgclr to clear pending exceptions before handling it.
2580 */
2581h_doorbell_common_msgclr:
2582 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2583 PPC_MSGCLR(3)
2584 b h_doorbell_common
2585
2586doorbell_super_common_msgclr:
2587 LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2588 PPC_MSGCLRP(3)
2589 b doorbell_super_common
2590
0f0c6ca1
NP
2591/*
2592 * Called from arch_local_irq_enable when an interrupt needs
2593 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
2594 * which kind of interrupt. MSR:EE is already off. We generate a
2595 * stackframe like if a real interrupt had happened.
2596 *
2597 * Note: While MSR:EE is off, we need to make sure that _MSR
2598 * in the generated frame has EE set to 1 or the exception
2599 * handler will not properly re-enable them.
b48bbb82
NP
2600 *
2601 * Note that we don't specify LR as the NIP (return address) for
2602 * the interrupt because that would unbalance the return branch
2603 * predictor.
0f0c6ca1
NP
2604 */
2605_GLOBAL(__replay_interrupt)
2606 /* We are going to jump to the exception common code which
2607 * will retrieve various register values from the PACA which
2608 * we don't give a damn about, so we don't bother storing them.
2609 */
2610 mfmsr r12
3e23a12b 2611 LOAD_REG_ADDR(r11, replay_interrupt_return)
0f0c6ca1
NP
2612 mfcr r9
2613 ori r12,r12,MSR_EE
2614 cmpwi r3,0x900
2615 beq decrementer_common
2616 cmpwi r3,0x500
e6c1203d
NP
2617BEGIN_FTR_SECTION
2618 beq h_virt_irq_common
2619FTR_SECTION_ELSE
0f0c6ca1 2620 beq hardware_interrupt_common
e6c1203d 2621ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
f442d004
MS
2622 cmpwi r3,0xf00
2623 beq performance_monitor_common
0f0c6ca1 2624BEGIN_FTR_SECTION
d6f73fc6 2625 cmpwi r3,0xa00
a9af97aa 2626 beq h_doorbell_common_msgclr
0f0c6ca1
NP
2627 cmpwi r3,0xe60
2628 beq hmi_exception_common
2629FTR_SECTION_ELSE
2630 cmpwi r3,0xa00
a9af97aa 2631 beq doorbell_super_common_msgclr
0f0c6ca1 2632ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
3e23a12b 2633replay_interrupt_return:
0f0c6ca1 2634 blr
b48bbb82 2635
15770a13 2636_ASM_NOKPROBE_SYMBOL(__replay_interrupt)