blktrace: remove unnessary stop block trace in 'blk_trace_shutdown'
[linux-block.git] / arch / powerpc / kernel / interrupt_64.S
1 #include <asm/asm-offsets.h>
2 #include <asm/bug.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
5 #else
6 #include <asm/exception-64e.h>
7 #endif
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
11 #include <asm/kup.h>
12 #include <asm/mmu.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
15
16         .align 7
17
18 .macro DEBUG_SRR_VALID srr
19 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
20         .ifc \srr,srr
21         mfspr   r11,SPRN_SRR0
22         ld      r12,_NIP(r1)
23         clrrdi  r11,r11,2
24         clrrdi  r12,r12,2
25 100:    tdne    r11,r12
26         EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
27         mfspr   r11,SPRN_SRR1
28         ld      r12,_MSR(r1)
29 100:    tdne    r11,r12
30         EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
31         .else
32         mfspr   r11,SPRN_HSRR0
33         ld      r12,_NIP(r1)
34         clrrdi  r11,r11,2
35         clrrdi  r12,r12,2
36 100:    tdne    r11,r12
37         EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
38         mfspr   r11,SPRN_HSRR1
39         ld      r12,_MSR(r1)
40 100:    tdne    r11,r12
41         EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
42         .endif
43 #endif
44 .endm
45
46 #ifdef CONFIG_PPC_BOOK3S
47 .macro system_call_vectored name trapnr
48         .globl system_call_vectored_\name
49 system_call_vectored_\name:
50 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
51         SCV_INTERRUPT_TO_KERNEL
52         mr      r10,r1
53         ld      r1,PACAKSAVE(r13)
54         std     r10,0(r1)
55         std     r11,_NIP(r1)
56         std     r12,_MSR(r1)
57         std     r0,GPR0(r1)
58         std     r10,GPR1(r1)
59         std     r2,GPR2(r1)
60         LOAD_PACA_TOC()
61         mfcr    r12
62         li      r11,0
63         /* Save syscall parameters in r3-r8 */
64         SAVE_GPRS(3, 8, r1)
65         /* Zero r9-r12, this should only be required when restoring all GPRs */
66         std     r11,GPR9(r1)
67         std     r11,GPR10(r1)
68         std     r11,GPR11(r1)
69         std     r11,GPR12(r1)
70         std     r9,GPR13(r1)
71         SAVE_NVGPRS(r1)
72         std     r11,_XER(r1)
73         std     r11,_LINK(r1)
74         std     r11,_CTR(r1)
75
76         li      r11,\trapnr
77         std     r11,_TRAP(r1)
78         std     r12,_CCR(r1)
79         std     r3,ORIG_GPR3(r1)
80         /* Calling convention has r3 = regs, r4 = orig r0 */
81         addi    r3,r1,STACK_FRAME_OVERHEAD
82         mr      r4,r0
83         LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
84         std     r11,-16(r3)             /* "regshere" marker */
85
86 BEGIN_FTR_SECTION
87         HMT_MEDIUM
88 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
89
90         /*
91          * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
92          * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
93          * and interrupts may be masked and pending already.
94          * system_call_exception() will call trace_hardirqs_off() which means
95          * interrupts could already have been blocked before trace_hardirqs_off,
96          * but this is the best we can do.
97          */
98
99         bl      system_call_exception
100
101 .Lsyscall_vectored_\name\()_exit:
102         addi    r4,r1,STACK_FRAME_OVERHEAD
103         li      r5,1 /* scv */
104         bl      syscall_exit_prepare
105         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
106 .Lsyscall_vectored_\name\()_rst_start:
107         lbz     r11,PACAIRQHAPPENED(r13)
108         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
109         bne-    syscall_vectored_\name\()_restart
110         li      r11,IRQS_ENABLED
111         stb     r11,PACAIRQSOFTMASK(r13)
112         li      r11,0
113         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
114
115         ld      r2,_CCR(r1)
116         ld      r4,_NIP(r1)
117         ld      r5,_MSR(r1)
118
119 BEGIN_FTR_SECTION
120         stdcx.  r0,0,r1                 /* to clear the reservation */
121 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
122
123 BEGIN_FTR_SECTION
124         HMT_MEDIUM_LOW
125 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
126
127         cmpdi   r3,0
128         bne     .Lsyscall_vectored_\name\()_restore_regs
129
130         /* rfscv returns with LR->NIA and CTR->MSR */
131         mtlr    r4
132         mtctr   r5
133
134         /* Could zero these as per ABI, but we may consider a stricter ABI
135          * which preserves these if libc implementations can benefit, so
136          * restore them for now until further measurement is done. */
137         REST_GPR(0, r1)
138         REST_GPRS(4, 8, r1)
139         /* Zero volatile regs that may contain sensitive kernel data */
140         ZEROIZE_GPRS(9, 12)
141         mtspr   SPRN_XER,r0
142
143         /*
144          * We don't need to restore AMR on the way back to userspace for KUAP.
145          * The value of AMR only matters while we're in the kernel.
146          */
147         mtcr    r2
148         REST_GPRS(2, 3, r1)
149         REST_GPR(13, r1)
150         REST_GPR(1, r1)
151         RFSCV_TO_USER
152         b       .       /* prevent speculative execution */
153
154 .Lsyscall_vectored_\name\()_restore_regs:
155         mtspr   SPRN_SRR0,r4
156         mtspr   SPRN_SRR1,r5
157
158         ld      r3,_CTR(r1)
159         ld      r4,_LINK(r1)
160         ld      r5,_XER(r1)
161
162         REST_NVGPRS(r1)
163         REST_GPR(0, r1)
164         mtcr    r2
165         mtctr   r3
166         mtlr    r4
167         mtspr   SPRN_XER,r5
168         REST_GPRS(2, 13, r1)
169         REST_GPR(1, r1)
170         RFI_TO_USER
171 .Lsyscall_vectored_\name\()_rst_end:
172
173 syscall_vectored_\name\()_restart:
174 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
175         GET_PACA(r13)
176         ld      r1,PACA_EXIT_SAVE_R1(r13)
177         LOAD_PACA_TOC()
178         ld      r3,RESULT(r1)
179         addi    r4,r1,STACK_FRAME_OVERHEAD
180         li      r11,IRQS_ALL_DISABLED
181         stb     r11,PACAIRQSOFTMASK(r13)
182         bl      syscall_exit_restart
183         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
184         b       .Lsyscall_vectored_\name\()_rst_start
185 1:
186
187 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
188 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
189
190 .endm
191
192 system_call_vectored common 0x3000
193
194 /*
195  * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
196  * which is tested by system_call_exception when r0 is -1 (as set by vector
197  * entry code).
198  */
199 system_call_vectored sigill 0x7ff0
200
201 #endif /* CONFIG_PPC_BOOK3S */
202
203         .balign IFETCH_ALIGN_BYTES
204         .globl system_call_common_real
205 system_call_common_real:
206 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
207         ld      r10,PACAKMSR(r13)       /* get MSR value for kernel */
208         mtmsrd  r10
209
210         .balign IFETCH_ALIGN_BYTES
211         .globl system_call_common
212 system_call_common:
213 _ASM_NOKPROBE_SYMBOL(system_call_common)
214         mr      r10,r1
215         ld      r1,PACAKSAVE(r13)
216         std     r10,0(r1)
217         std     r11,_NIP(r1)
218         std     r12,_MSR(r1)
219         std     r0,GPR0(r1)
220         std     r10,GPR1(r1)
221         std     r2,GPR2(r1)
222 #ifdef CONFIG_PPC_E500
223 START_BTB_FLUSH_SECTION
224         BTB_FLUSH(r10)
225 END_BTB_FLUSH_SECTION
226 #endif
227         LOAD_PACA_TOC()
228         mfcr    r12
229         li      r11,0
230         /* Save syscall parameters in r3-r8 */
231         SAVE_GPRS(3, 8, r1)
232         /* Zero r9-r12, this should only be required when restoring all GPRs */
233         std     r11,GPR9(r1)
234         std     r11,GPR10(r1)
235         std     r11,GPR11(r1)
236         std     r11,GPR12(r1)
237         std     r9,GPR13(r1)
238         SAVE_NVGPRS(r1)
239         std     r11,_XER(r1)
240         std     r11,_CTR(r1)
241         mflr    r10
242
243         /*
244          * This clears CR0.SO (bit 28), which is the error indication on
245          * return from this system call.
246          */
247         rldimi  r12,r11,28,(63-28)
248         li      r11,0xc00
249         std     r10,_LINK(r1)
250         std     r11,_TRAP(r1)
251         std     r12,_CCR(r1)
252         std     r3,ORIG_GPR3(r1)
253         /* Calling convention has r3 = regs, r4 = orig r0 */
254         addi    r3,r1,STACK_FRAME_OVERHEAD
255         mr      r4,r0
256         LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
257         std     r11,-16(r3)             /* "regshere" marker */
258
259 #ifdef CONFIG_PPC_BOOK3S
260         li      r11,1
261         stb     r11,PACASRR_VALID(r13)
262 #endif
263
264         /*
265          * We always enter kernel from userspace with irq soft-mask enabled and
266          * nothing pending. system_call_exception() will call
267          * trace_hardirqs_off().
268          */
269         li      r11,IRQS_ALL_DISABLED
270         stb     r11,PACAIRQSOFTMASK(r13)
271 #ifdef CONFIG_PPC_BOOK3S
272         li      r12,-1 /* Set MSR_EE and MSR_RI */
273         mtmsrd  r12,1
274 #else
275         wrteei  1
276 #endif
277
278         bl      system_call_exception
279
280 .Lsyscall_exit:
281         addi    r4,r1,STACK_FRAME_OVERHEAD
282         li      r5,0 /* !scv */
283         bl      syscall_exit_prepare
284         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
285 #ifdef CONFIG_PPC_BOOK3S
286 .Lsyscall_rst_start:
287         lbz     r11,PACAIRQHAPPENED(r13)
288         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
289         bne-    syscall_restart
290 #endif
291         li      r11,IRQS_ENABLED
292         stb     r11,PACAIRQSOFTMASK(r13)
293         li      r11,0
294         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
295
296         ld      r2,_CCR(r1)
297         ld      r6,_LINK(r1)
298         mtlr    r6
299
300 #ifdef CONFIG_PPC_BOOK3S
301         lbz     r4,PACASRR_VALID(r13)
302         cmpdi   r4,0
303         bne     1f
304         li      r4,0
305         stb     r4,PACASRR_VALID(r13)
306 #endif
307         ld      r4,_NIP(r1)
308         ld      r5,_MSR(r1)
309         mtspr   SPRN_SRR0,r4
310         mtspr   SPRN_SRR1,r5
311 1:
312         DEBUG_SRR_VALID srr
313
314 BEGIN_FTR_SECTION
315         stdcx.  r0,0,r1                 /* to clear the reservation */
316 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
317
318         cmpdi   r3,0
319         bne     .Lsyscall_restore_regs
320         /* Zero volatile regs that may contain sensitive kernel data */
321         ZEROIZE_GPR(0)
322         ZEROIZE_GPRS(4, 12)
323         mtctr   r0
324         mtspr   SPRN_XER,r0
325 .Lsyscall_restore_regs_cont:
326
327 BEGIN_FTR_SECTION
328         HMT_MEDIUM_LOW
329 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
330
331         /*
332          * We don't need to restore AMR on the way back to userspace for KUAP.
333          * The value of AMR only matters while we're in the kernel.
334          */
335         mtcr    r2
336         REST_GPRS(2, 3, r1)
337         REST_GPR(13, r1)
338         REST_GPR(1, r1)
339         RFI_TO_USER
340         b       .       /* prevent speculative execution */
341
342 .Lsyscall_restore_regs:
343         ld      r3,_CTR(r1)
344         ld      r4,_XER(r1)
345         REST_NVGPRS(r1)
346         mtctr   r3
347         mtspr   SPRN_XER,r4
348         REST_GPR(0, r1)
349         REST_GPRS(4, 12, r1)
350         b       .Lsyscall_restore_regs_cont
351 .Lsyscall_rst_end:
352
353 #ifdef CONFIG_PPC_BOOK3S
354 syscall_restart:
355 _ASM_NOKPROBE_SYMBOL(syscall_restart)
356         GET_PACA(r13)
357         ld      r1,PACA_EXIT_SAVE_R1(r13)
358         LOAD_PACA_TOC()
359         ld      r3,RESULT(r1)
360         addi    r4,r1,STACK_FRAME_OVERHEAD
361         li      r11,IRQS_ALL_DISABLED
362         stb     r11,PACAIRQSOFTMASK(r13)
363         bl      syscall_exit_restart
364         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
365         b       .Lsyscall_rst_start
366 1:
367
368 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
369 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
370 #endif
371
372         /*
373          * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
374          * touched, no exit work created, then this can be used.
375          */
376         .balign IFETCH_ALIGN_BYTES
377         .globl fast_interrupt_return_srr
378 fast_interrupt_return_srr:
379 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
380         kuap_check_amr r3, r4
381         ld      r5,_MSR(r1)
382         andi.   r0,r5,MSR_PR
383 #ifdef CONFIG_PPC_BOOK3S
384         beq     1f
385         kuap_user_restore r3, r4
386         b       .Lfast_user_interrupt_return_srr
387 1:      kuap_kernel_restore r3, r4
388         andi.   r0,r5,MSR_RI
389         li      r3,0 /* 0 return value, no EMULATE_STACK_STORE */
390         bne+    .Lfast_kernel_interrupt_return_srr
391         addi    r3,r1,STACK_FRAME_OVERHEAD
392         bl      unrecoverable_exception
393         b       . /* should not get here */
394 #else
395         bne     .Lfast_user_interrupt_return_srr
396         b       .Lfast_kernel_interrupt_return_srr
397 #endif
398
399 .macro interrupt_return_macro srr
400         .balign IFETCH_ALIGN_BYTES
401         .globl interrupt_return_\srr
402 interrupt_return_\srr\():
403 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
404         ld      r4,_MSR(r1)
405         andi.   r0,r4,MSR_PR
406         beq     interrupt_return_\srr\()_kernel
407 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
408 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
409         addi    r3,r1,STACK_FRAME_OVERHEAD
410         bl      interrupt_exit_user_prepare
411         cmpdi   r3,0
412         bne-    .Lrestore_nvgprs_\srr
413 .Lrestore_nvgprs_\srr\()_cont:
414         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
415 #ifdef CONFIG_PPC_BOOK3S
416 .Linterrupt_return_\srr\()_user_rst_start:
417         lbz     r11,PACAIRQHAPPENED(r13)
418         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
419         bne-    interrupt_return_\srr\()_user_restart
420 #endif
421         li      r11,IRQS_ENABLED
422         stb     r11,PACAIRQSOFTMASK(r13)
423         li      r11,0
424         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
425
426 .Lfast_user_interrupt_return_\srr\():
427 #ifdef CONFIG_PPC_BOOK3S
428         .ifc \srr,srr
429         lbz     r4,PACASRR_VALID(r13)
430         .else
431         lbz     r4,PACAHSRR_VALID(r13)
432         .endif
433         cmpdi   r4,0
434         li      r4,0
435         bne     1f
436 #endif
437         ld      r11,_NIP(r1)
438         ld      r12,_MSR(r1)
439         .ifc \srr,srr
440         mtspr   SPRN_SRR0,r11
441         mtspr   SPRN_SRR1,r12
442 1:
443 #ifdef CONFIG_PPC_BOOK3S
444         stb     r4,PACASRR_VALID(r13)
445 #endif
446         .else
447         mtspr   SPRN_HSRR0,r11
448         mtspr   SPRN_HSRR1,r12
449 1:
450 #ifdef CONFIG_PPC_BOOK3S
451         stb     r4,PACAHSRR_VALID(r13)
452 #endif
453         .endif
454         DEBUG_SRR_VALID \srr
455
456 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
457         lbz     r4,PACAIRQSOFTMASK(r13)
458         tdnei   r4,IRQS_ENABLED
459 #endif
460
461 BEGIN_FTR_SECTION
462         ld      r10,_PPR(r1)
463         mtspr   SPRN_PPR,r10
464 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
465
466 BEGIN_FTR_SECTION
467         stdcx.  r0,0,r1         /* to clear the reservation */
468 FTR_SECTION_ELSE
469         ldarx   r0,0,r1
470 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
471
472         ld      r3,_CCR(r1)
473         ld      r4,_LINK(r1)
474         ld      r5,_CTR(r1)
475         ld      r6,_XER(r1)
476         li      r0,0
477
478         REST_GPRS(7, 13, r1)
479
480         mtcr    r3
481         mtlr    r4
482         mtctr   r5
483         mtspr   SPRN_XER,r6
484
485         REST_GPRS(2, 6, r1)
486         REST_GPR(0, r1)
487         REST_GPR(1, r1)
488         .ifc \srr,srr
489         RFI_TO_USER
490         .else
491         HRFI_TO_USER
492         .endif
493         b       .       /* prevent speculative execution */
494 .Linterrupt_return_\srr\()_user_rst_end:
495
496 .Lrestore_nvgprs_\srr\():
497         REST_NVGPRS(r1)
498         b       .Lrestore_nvgprs_\srr\()_cont
499
500 #ifdef CONFIG_PPC_BOOK3S
501 interrupt_return_\srr\()_user_restart:
502 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
503         GET_PACA(r13)
504         ld      r1,PACA_EXIT_SAVE_R1(r13)
505         LOAD_PACA_TOC()
506         addi    r3,r1,STACK_FRAME_OVERHEAD
507         li      r11,IRQS_ALL_DISABLED
508         stb     r11,PACAIRQSOFTMASK(r13)
509         bl      interrupt_exit_user_restart
510         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
511         b       .Linterrupt_return_\srr\()_user_rst_start
512 1:
513
514 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
515 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
516 #endif
517
518         .balign IFETCH_ALIGN_BYTES
519 interrupt_return_\srr\()_kernel:
520 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
521         addi    r3,r1,STACK_FRAME_OVERHEAD
522         bl      interrupt_exit_kernel_prepare
523
524         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
525 .Linterrupt_return_\srr\()_kernel_rst_start:
526         ld      r11,SOFTE(r1)
527         cmpwi   r11,IRQS_ENABLED
528         stb     r11,PACAIRQSOFTMASK(r13)
529         beq     .Linterrupt_return_\srr\()_soft_enabled
530
531         /*
532          * Returning to soft-disabled context.
533          * Check if a MUST_HARD_MASK interrupt has become pending, in which
534          * case we need to disable MSR[EE] in the return context.
535          */
536         ld      r12,_MSR(r1)
537         andi.   r10,r12,MSR_EE
538         beq     .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
539         lbz     r11,PACAIRQHAPPENED(r13)
540         andi.   r10,r11,PACA_IRQ_MUST_HARD_MASK
541         beq     1f // No HARD_MASK pending
542
543         /* Must clear MSR_EE from _MSR */
544 #ifdef CONFIG_PPC_BOOK3S
545         li      r10,0
546         /* Clear valid before changing _MSR */
547         .ifc \srr,srr
548         stb     r10,PACASRR_VALID(r13)
549         .else
550         stb     r10,PACAHSRR_VALID(r13)
551         .endif
552 #endif
553         xori    r12,r12,MSR_EE
554         std     r12,_MSR(r1)
555         b       .Lfast_kernel_interrupt_return_\srr\()
556
557 .Linterrupt_return_\srr\()_soft_enabled:
558 #ifdef CONFIG_PPC_BOOK3S
559         lbz     r11,PACAIRQHAPPENED(r13)
560         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
561         bne-    interrupt_return_\srr\()_kernel_restart
562 #endif
563 1:
564         li      r11,0
565         stb     r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
566
567 .Lfast_kernel_interrupt_return_\srr\():
568         cmpdi   cr1,r3,0
569 #ifdef CONFIG_PPC_BOOK3S
570         .ifc \srr,srr
571         lbz     r4,PACASRR_VALID(r13)
572         .else
573         lbz     r4,PACAHSRR_VALID(r13)
574         .endif
575         cmpdi   r4,0
576         li      r4,0
577         bne     1f
578 #endif
579         ld      r11,_NIP(r1)
580         ld      r12,_MSR(r1)
581         .ifc \srr,srr
582         mtspr   SPRN_SRR0,r11
583         mtspr   SPRN_SRR1,r12
584 1:
585 #ifdef CONFIG_PPC_BOOK3S
586         stb     r4,PACASRR_VALID(r13)
587 #endif
588         .else
589         mtspr   SPRN_HSRR0,r11
590         mtspr   SPRN_HSRR1,r12
591 1:
592 #ifdef CONFIG_PPC_BOOK3S
593         stb     r4,PACAHSRR_VALID(r13)
594 #endif
595         .endif
596         DEBUG_SRR_VALID \srr
597
598 BEGIN_FTR_SECTION
599         stdcx.  r0,0,r1         /* to clear the reservation */
600 FTR_SECTION_ELSE
601         ldarx   r0,0,r1
602 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
603
604         ld      r3,_LINK(r1)
605         ld      r4,_CTR(r1)
606         ld      r5,_XER(r1)
607         ld      r6,_CCR(r1)
608         li      r0,0
609
610         REST_GPRS(7, 12, r1)
611
612         mtlr    r3
613         mtctr   r4
614         mtspr   SPRN_XER,r5
615
616         /*
617          * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
618          * the reliable stack unwinder later on. Clear it.
619          */
620         std     r0,STACK_FRAME_OVERHEAD-16(r1)
621
622         REST_GPRS(2, 5, r1)
623
624         bne-    cr1,1f /* emulate stack store */
625         mtcr    r6
626         REST_GPR(6, r1)
627         REST_GPR(0, r1)
628         REST_GPR(1, r1)
629         .ifc \srr,srr
630         RFI_TO_KERNEL
631         .else
632         HRFI_TO_KERNEL
633         .endif
634         b       .       /* prevent speculative execution */
635
636 1:      /*
637          * Emulate stack store with update. New r1 value was already calculated
638          * and updated in our interrupt regs by emulate_loadstore, but we can't
639          * store the previous value of r1 to the stack before re-loading our
640          * registers from it, otherwise they could be clobbered.  Use
641          * PACA_EXGEN as temporary storage to hold the store data, as
642          * interrupts are disabled here so it won't be clobbered.
643          */
644         mtcr    r6
645         std     r9,PACA_EXGEN+0(r13)
646         addi    r9,r1,INT_FRAME_SIZE /* get original r1 */
647         REST_GPR(6, r1)
648         REST_GPR(0, r1)
649         REST_GPR(1, r1)
650         std     r9,0(r1) /* perform store component of stdu */
651         ld      r9,PACA_EXGEN+0(r13)
652
653         .ifc \srr,srr
654         RFI_TO_KERNEL
655         .else
656         HRFI_TO_KERNEL
657         .endif
658         b       .       /* prevent speculative execution */
659 .Linterrupt_return_\srr\()_kernel_rst_end:
660
661 #ifdef CONFIG_PPC_BOOK3S
662 interrupt_return_\srr\()_kernel_restart:
663 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
664         GET_PACA(r13)
665         ld      r1,PACA_EXIT_SAVE_R1(r13)
666         LOAD_PACA_TOC()
667         addi    r3,r1,STACK_FRAME_OVERHEAD
668         li      r11,IRQS_ALL_DISABLED
669         stb     r11,PACAIRQSOFTMASK(r13)
670         bl      interrupt_exit_kernel_restart
671         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
672         b       .Linterrupt_return_\srr\()_kernel_rst_start
673 1:
674
675 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
676 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
677 #endif
678
679 .endm
680
681 interrupt_return_macro srr
682 #ifdef CONFIG_PPC_BOOK3S
683 interrupt_return_macro hsrr
684
685         .globl __end_soft_masked
686 __end_soft_masked:
687 DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
688 #endif /* CONFIG_PPC_BOOK3S */
689
690 #ifdef CONFIG_PPC_BOOK3S
691 _GLOBAL(ret_from_fork_scv)
692         bl      schedule_tail
693         REST_NVGPRS(r1)
694         li      r3,0    /* fork() return value */
695         b       .Lsyscall_vectored_common_exit
696 #endif
697
698 _GLOBAL(ret_from_fork)
699         bl      schedule_tail
700         REST_NVGPRS(r1)
701         li      r3,0    /* fork() return value */
702         b       .Lsyscall_exit
703
704 _GLOBAL(ret_from_kernel_thread)
705         bl      schedule_tail
706         REST_NVGPRS(r1)
707         mtctr   r14
708         mr      r3,r15
709 #ifdef CONFIG_PPC64_ELF_ABI_V2
710         mr      r12,r14
711 #endif
712         bctrl
713         li      r3,0
714         b       .Lsyscall_exit