s390/bp: add missing BPENTER to program check handler
[linux-block.git] / arch / s390 / kernel / entry.S
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4 2/*
1da177e4
LT
3 * S390 low-level entry points.
4 *
a53c8fab 5 * Copyright IBM Corp. 1999, 2012
1da177e4 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
25d83cbf
HC
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
1da177e4
LT
9 */
10
2bc89b5e 11#include <linux/init.h>
144d634a 12#include <linux/linkage.h>
d09a307f 13#include <asm/asm-extable.h>
b058661a 14#include <asm/alternative-asm.h>
eb608fb3 15#include <asm/processor.h>
1da177e4 16#include <asm/cache.h>
dc24b7b4 17#include <asm/dwarf.h>
1da177e4
LT
18#include <asm/errno.h>
19#include <asm/ptrace.h>
20#include <asm/thread_info.h>
0013a854 21#include <asm/asm-offsets.h>
1da177e4
LT
22#include <asm/unistd.h>
23#include <asm/page.h>
eb546195 24#include <asm/sigp.h>
1f44a225 25#include <asm/irq.h>
9977e886 26#include <asm/vx-insn.h>
83abeffb
HB
27#include <asm/setup.h>
28#include <asm/nmi.h>
711f5df7 29#include <asm/export.h>
6dd85fbb 30#include <asm/nospec-insn.h>
1da177e4 31
3a890380 32STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
1da177e4 33STACK_SIZE = 1 << STACK_SHIFT
dc7ee00d 34STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
1da177e4 35
e5b98199
MS
36_LPP_OFFSET = __LC_LPP
37
3b051e89 38 .macro STBEAR address
fad442d3 39 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193
3b051e89
SS
40 .endm
41
42 .macro LBEAR address
fad442d3 43 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193
3b051e89
SS
44 .endm
45
46 .macro LPSWEY address,lpswe
fad442d3 47 ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193
3b051e89
SS
48 .endm
49
50 .macro MBEAR reg
fad442d3 51 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193
3b051e89
SS
52 .endm
53
ce3dc447 54 .macro CHECK_STACK savearea
63b12246 55#ifdef CONFIG_CHECK_STACK
ce3dc447 56 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
c5328901
MS
57 lghi %r14,\savearea
58 jz stack_overflow
63b12246 59#endif
63b12246
MS
60 .endm
61
ce3dc447
MS
62 .macro CHECK_VMAP_STACK savearea,oklabel
63#ifdef CONFIG_VMAP_STACK
64 lgr %r14,%r15
65 nill %r14,0x10000 - STACK_SIZE
66 oill %r14,STACK_INIT
67 clg %r14,__LC_KERNEL_STACK
68 je \oklabel
69 clg %r14,__LC_ASYNC_STACK
70 je \oklabel
b61b1595
SS
71 clg %r14,__LC_MCCK_STACK
72 je \oklabel
ce3dc447
MS
73 clg %r14,__LC_NODAT_STACK
74 je \oklabel
75 clg %r14,__LC_RESTART_STACK
76 je \oklabel
77 lghi %r14,\savearea
78 j stack_overflow
79#else
80 j \oklabel
81#endif
82 .endm
83
83abeffb
HB
84 /*
85 * The TSTMSK macro generates a test-under-mask instruction by
86 * calculating the memory offset for the specified mask value.
87 * Mask value can be any constant. The macro shifts the mask
88 * value to calculate the memory offset for the test-under-mask
89 * instruction.
90 */
91 .macro TSTMSK addr, mask, size=8, bytepos=0
92 .if (\bytepos < \size) && (\mask >> 8)
93 .if (\mask & 0xff)
94 .error "Mask exceeds byte boundary"
95 .endif
96 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
97 .exitm
98 .endif
99 .ifeq \mask
100 .error "Mask must not be zero"
101 .endif
102 off = \size - \bytepos - 1
103 tm off+\addr, \mask
104 .endm
105
d768bd89 106 .macro BPOFF
fad442d3 107 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82
d768bd89
MS
108 .endm
109
110 .macro BPON
fad442d3 111 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82
d768bd89
MS
112 .endm
113
6b73044b 114 .macro BPENTER tif_ptr,tif_mask
6982dba1 115 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \
fad442d3 116 "j .+12; nop; nop", 82
6b73044b
MS
117 .endm
118
119 .macro BPEXIT tif_ptr,tif_mask
120 TSTMSK \tif_ptr,\tif_mask
6982dba1
HC
121 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \
122 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82
6b73044b
MS
123 .endm
124
b5415c8f
AG
125#if IS_ENABLED(CONFIG_KVM)
126 /*
127 * The OUTSIDE macro jumps to the provided label in case the value
128 * in the provided register is outside of the provided range. The
129 * macro is useful for checking whether a PSW stored in a register
130 * pair points inside or outside of a block of instructions.
131 * @reg: register to check
132 * @start: start of the range
133 * @end: end of the range
134 * @outside_label: jump here if @reg is outside of [@start..@end)
135 */
136 .macro OUTSIDE reg,start,end,outside_label
137 lgr %r14,\reg
138 larl %r13,\start
139 slgr %r14,%r13
4c25f0ff 140 clgfrl %r14,.Lrange_size\@
b5415c8f 141 jhe \outside_label
4c25f0ff
HC
142 .section .rodata, "a"
143 .align 4
144.Lrange_size\@:
145 .long \end - \start
146 .previous
b5415c8f 147 .endm
fbbdfca5
AG
148
149 .macro SIEEXIT
150 lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer
151 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
152 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
153 larl %r9,sie_exit # skip forward to sie_exit
154 .endm
b5415c8f
AG
155#endif
156
6dd85fbb 157 GEN_BR_THUNK %r14
f19fbd5e 158
860dba45 159 .section .kprobes.text, "ax"
46210c44
HC
160.Ldummy:
161 /*
56e62a73 162 * This nop exists only in order to avoid that __bpon starts at
46210c44
HC
163 * the beginning of the kprobes text section. In that case we would
164 * have several symbols at the same address. E.g. objdump would take
165 * an arbitrary symbol name when disassembling this code.
56e62a73 166 * With the added nop in between the __bpon symbol is unique
46210c44
HC
167 * again.
168 */
169 nop 0
860dba45 170
d768bd89
MS
171ENTRY(__bpon)
172 .globl __bpon
173 BPON
6dd85fbb 174 BR_EX %r14
26a374ae 175ENDPROC(__bpon)
d768bd89 176
1da177e4
LT
177/*
178 * Scheduler resume function, called by switch_to
179 * gpr2 = (task_struct *) prev
180 * gpr3 = (task_struct *) next
181 * Returns:
182 * gpr2 = prev
183 */
144d634a 184ENTRY(__switch_to)
eda0c6d6 185 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
3241d3eb
HC
186 lghi %r4,__TASK_stack
187 lghi %r1,__TASK_thread
9fed920e 188 llill %r5,STACK_INIT
3241d3eb 189 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
9fed920e
VG
190 lg %r15,0(%r4,%r3) # start of kernel stack of next
191 agr %r15,%r5 # end of kernel stack of next
eda0c6d6 192 stg %r3,__LC_CURRENT # store task struct of next
eda0c6d6 193 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
3241d3eb
HC
194 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
195 aghi %r3,__TASK_pid
196 mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
d3a73acb 197 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
fad442d3 198 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
6dd85fbb 199 BR_EX %r14
26a374ae 200ENDPROC(__switch_to)
1da177e4 201
d0fc4107
MS
202#if IS_ENABLED(CONFIG_KVM)
203/*
6b33e68a
NB
204 * __sie64a calling convention:
205 * %r2 pointer to sie control block phys
206 * %r3 pointer to sie control block virt
207 * %r4 guest register save area
d0fc4107 208 */
6b33e68a 209ENTRY(__sie64a)
d0fc4107 210 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
6b73044b 211 lg %r12,__LC_CURRENT
6b33e68a
NB
212 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
213 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
214 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
92fa7a13
MS
215 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
216 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
6b33e68a 217 lmg %r0,%r13,0(%r4) # load guest gprs 0-13
d0fc4107
MS
218 lg %r14,__LC_GMAP # get gmap pointer
219 ltgr %r14,%r14
220 jz .Lsie_gmap
221 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
222.Lsie_gmap:
92fa7a13 223 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
d0fc4107
MS
224 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
225 tm __SIE_PROG20+3(%r14),3 # last exit...
226 jnz .Lsie_skip
83abeffb 227 TSTMSK __LC_CPU_FLAGS,_CIF_FPU
d0fc4107 228 jo .Lsie_skip # exit if fp/vx regs changed
6b33e68a 229 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
92fa7a13 230 BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
c929500d 231.Lsie_entry:
d0fc4107 232 sie 0(%r14)
29ccaa4b
AG
233# Let the next instruction be NOP to avoid triggering a machine check
234# and handling it in a guest as result of the instruction execution.
235 nopr 7
236.Lsie_leave:
d768bd89 237 BPOFF
92fa7a13 238 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
d0fc4107 239.Lsie_skip:
6b33e68a 240 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
d0fc4107 241 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
87d59863 242 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
d0fc4107
MS
243.Lsie_done:
244# some program checks are suppressing. C code (e.g. do_protection_exception)
c0e7bb38
CB
245# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
246# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
6b33e68a 247# Other instructions between __sie64a and .Lsie_done should not cause program
c0e7bb38 248# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
c0e7bb38
CB
249.Lrewind_pad6:
250 nopr 7
251.Lrewind_pad4:
252 nopr 7
253.Lrewind_pad2:
254 nopr 7
d0fc4107
MS
255 .globl sie_exit
256sie_exit:
92fa7a13 257 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area
d0fc4107 258 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
7041d281
MS
259 xgr %r0,%r0 # clear guest registers to
260 xgr %r1,%r1 # prevent speculative use
7041d281
MS
261 xgr %r3,%r3
262 xgr %r4,%r4
263 xgr %r5,%r5
d0fc4107 264 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
92fa7a13 265 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
6dd85fbb 266 BR_EX %r14
d0fc4107
MS
267.Lsie_fault:
268 lghi %r14,-EFAULT
92fa7a13 269 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
d0fc4107
MS
270 j sie_exit
271
c0e7bb38
CB
272 EX_TABLE(.Lrewind_pad6,.Lsie_fault)
273 EX_TABLE(.Lrewind_pad4,.Lsie_fault)
274 EX_TABLE(.Lrewind_pad2,.Lsie_fault)
d0fc4107 275 EX_TABLE(sie_exit,.Lsie_fault)
6b33e68a
NB
276ENDPROC(__sie64a)
277EXPORT_SYMBOL(__sie64a)
711f5df7 278EXPORT_SYMBOL(sie_exit)
d0fc4107
MS
279#endif
280
1da177e4
LT
281/*
282 * SVC interrupt handler routine. System calls are synchronous events and
7b7735c5 283 * are entered with interrupts disabled.
1da177e4
LT
284 */
285
144d634a 286ENTRY(system_call)
56e62a73 287 stpt __LC_SYS_ENTER_TIMER
c5328901 288 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
d768bd89 289 BPOFF
56e62a73 290 lghi %r14,0
86ed42f4 291.Lsysc_per:
3b051e89 292 STBEAR __LC_LAST_BREAK
87d59863 293 lctlg %c1,%c1,__LC_KERNEL_ASCE
56e62a73 294 lg %r12,__LC_CURRENT
c5328901 295 lg %r15,__LC_KERNEL_STACK
9365965d 296 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
56e62a73
SS
297 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
298 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
d3f46896
CB
299 # clear user controlled register to prevent speculative use
300 xgr %r0,%r0
56e62a73
SS
301 xgr %r1,%r1
302 xgr %r4,%r4
303 xgr %r5,%r5
304 xgr %r6,%r6
305 xgr %r7,%r7
306 xgr %r8,%r8
307 xgr %r9,%r9
308 xgr %r10,%r10
309 xgr %r11,%r11
310 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
af9ad822 311 mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
3b051e89 312 MBEAR %r2
56e62a73
SS
313 lgr %r3,%r14
314 brasl %r14,__do_syscall
87d59863 315 lctlg %c1,%c1,__LC_USER_ASCE
56e62a73
SS
316 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
317 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
3b051e89 318 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
56e62a73 319 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
c5328901 320 stpt __LC_EXIT_TIMER
3b051e89 321 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
26a374ae 322ENDPROC(system_call)
1da177e4
LT
323
324#
325# a new process exits the kernel with ret_from_fork
326#
144d634a 327ENTRY(ret_from_fork)
56e62a73
SS
328 lgr %r3,%r11
329 brasl %r14,__ret_from_fork
330 lctlg %c1,%c1,__LC_USER_ASCE
331 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
332 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
3b051e89 333 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
56e62a73
SS
334 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
335 stpt __LC_EXIT_TIMER
3b051e89 336 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
26a374ae
MS
337ENDPROC(ret_from_fork)
338
1da177e4
LT
339/*
340 * Program check handler routine
341 */
342
144d634a 343ENTRY(pgm_check_handler)
56e62a73 344 stpt __LC_SYS_ENTER_TIMER
d768bd89 345 BPOFF
c5328901 346 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
56e62a73
SS
347 lg %r12,__LC_CURRENT
348 lghi %r10,0
c5328901 349 lmg %r8,%r9,__LC_PGM_OLD_PSW
87d59863
HC
350 tmhh %r8,0x0001 # coming from user space?
351 jno .Lpgm_skip_asce
352 lctlg %c1,%c1,__LC_KERNEL_ASCE
56e62a73 353 j 3f # -> fault in user space
87d59863 354.Lpgm_skip_asce:
d0fc4107 355#if IS_ENABLED(CONFIG_KVM)
6b33e68a 356 # cleanup critical section for program checks in __sie64a
b5415c8f 357 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
fed626db 358 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
fbbdfca5 359 SIEEXIT
56e62a73 360 lghi %r10,_PIF_GUEST_FAULT
d0fc4107 361#endif
0b38b5e1
SS
3621: tmhh %r8,0x4000 # PER bit set in old PSW ?
363 jnz 2f # -> enabled, can't be a double fault
c5328901 364 tm __LC_PGM_ILC+3,0x80 # check for per exception
86ed42f4 365 jnz .Lpgm_svcper # -> single stepped svc
0b38b5e1 3662: CHECK_STACK __LC_SAVE_AREA_SYNC
dc7ee00d 367 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
56e62a73
SS
368 # CHECK_VMAP_STACK branches to stack_overflow or 4f
369 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
3703: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
c5328901 371 lg %r15,__LC_KERNEL_STACK
56e62a73
SS
3724: la %r11,STACK_FRAME_OVERHEAD(%r15)
373 stg %r10,__PT_FLAGS(%r11)
374 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
c5328901 375 stmg %r0,%r7,__PT_R0(%r11)
56e62a73 376 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
3b051e89 377 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK
56e62a73
SS
378 stmg %r8,%r9,__PT_PSW(%r11)
379
7041d281
MS
380 # clear user controlled registers to prevent speculative use
381 xgr %r0,%r0
382 xgr %r1,%r1
7041d281
MS
383 xgr %r3,%r3
384 xgr %r4,%r4
385 xgr %r5,%r5
386 xgr %r6,%r6
387 xgr %r7,%r7
56e62a73
SS
388 lgr %r2,%r11
389 brasl %r14,__do_pgm_check
390 tmhh %r8,0x0001 # returning to user space?
391 jno .Lpgm_exit_kernel
392 lctlg %c1,%c1,__LC_USER_ASCE
393 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
0cd9b723 394 stpt __LC_EXIT_TIMER
56e62a73
SS
395.Lpgm_exit_kernel:
396 mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
3b051e89 397 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15)
56e62a73 398 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
3b051e89 399 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
1da177e4 400
4ba069b8 401#
c5328901 402# single stepped system call
4ba069b8 403#
86ed42f4 404.Lpgm_svcper:
c5328901 405 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
86ed42f4 406 larl %r14,.Lsysc_per
c5328901 407 stg %r14,__LC_RETURN_PSW+8
56e62a73 408 lghi %r14,1
3b051e89
SS
409 LBEAR __LC_PGM_LAST_BREAK
410 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per
26a374ae 411ENDPROC(pgm_check_handler)
4ba069b8 412
1da177e4 413/*
56e62a73 414 * Interrupt handler macro used for external and IO interrupts.
1da177e4 415 */
56e62a73
SS
416.macro INT_HANDLER name,lc_old_psw,handler
417ENTRY(\name)
10bc15ba 418 stckf __LC_INT_CLOCK
56e62a73 419 stpt __LC_SYS_ENTER_TIMER
3b051e89 420 STBEAR __LC_LAST_BREAK
d768bd89 421 BPOFF
c5328901 422 stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
d5c352cd 423 lg %r12,__LC_CURRENT
56e62a73 424 lmg %r8,%r9,\lc_old_psw
b0d31159
SS
425 tmhh %r8,0x0001 # interrupting from user ?
426 jnz 1f
427#if IS_ENABLED(CONFIG_KVM)
b5415c8f 428 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
fbbdfca5
AG
429 BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
430 SIEEXIT
b0d31159
SS
431#endif
4320: CHECK_STACK __LC_SAVE_AREA_ASYNC
b0d31159 433 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
b0d31159
SS
434 j 2f
4351: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
436 lctlg %c1,%c1,__LC_KERNEL_ASCE
437 lg %r15,__LC_KERNEL_STACK
b74e409e
VG
4382: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
439 la %r11,STACK_FRAME_OVERHEAD(%r15)
c5328901 440 stmg %r0,%r7,__PT_R0(%r11)
7041d281
MS
441 # clear user controlled registers to prevent speculative use
442 xgr %r0,%r0
443 xgr %r1,%r1
7041d281
MS
444 xgr %r3,%r3
445 xgr %r4,%r4
446 xgr %r5,%r5
447 xgr %r6,%r6
448 xgr %r7,%r7
449 xgr %r10,%r10
ca1f4d70 450 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
c5328901 451 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
3b051e89 452 MBEAR %r11
c5328901 453 stmg %r8,%r9,__PT_PSW(%r11)
29b06ad7 454 lgr %r2,%r11 # pass pointer to pt_regs
56e62a73 455 brasl %r14,\handler
c5328901 456 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
56e62a73
SS
457 tmhh %r8,0x0001 # returning to user ?
458 jno 2f
87d59863 459 lctlg %c1,%c1,__LC_USER_ASCE
6b73044b 460 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
c5328901 461 stpt __LC_EXIT_TIMER
3b051e89
SS
4622: LBEAR __PT_LAST_BREAK(%r11)
463 lmg %r0,%r15,__PT_R0(%r11)
464 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE
56e62a73
SS
465ENDPROC(\name)
466.endm
916cda1a 467
56e62a73
SS
468INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
469INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
1da177e4 470
4c1051e3 471/*
0b0ed657 472 * Load idle PSW.
4c1051e3
MS
473 */
474ENTRY(psw_idle)
a994eddb 475 stg %r14,(__SF_GPRS+8*8)(%r15)
27f6b416 476 stg %r3,__SF_EMPTY(%r15)
56e62a73 477 larl %r1,psw_idle_exit
4c1051e3 478 stg %r1,__SF_EMPTY+8(%r15)
72d38b19
MS
479 larl %r1,smp_cpu_mtid
480 llgf %r1,0(%r1)
481 ltgr %r1,%r1
482 jz .Lpsw_idle_stcctm
56e62a73 483 .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
72d38b19 484.Lpsw_idle_stcctm:
419123f9 485 oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
d768bd89 486 BPON
10bc15ba 487 stckf __CLOCK_IDLE_ENTER(%r2)
27f6b416 488 stpt __TIMER_IDLE_ENTER(%r2)
4c1051e3 489 lpswe __SF_EMPTY(%r15)
56e62a73
SS
490.globl psw_idle_exit
491psw_idle_exit:
6dd85fbb 492 BR_EX %r14
26a374ae 493ENDPROC(psw_idle)
4c1051e3 494
1da177e4
LT
495/*
496 * Machine check handler routines
497 */
144d634a 498ENTRY(mcck_int_handler)
10bc15ba 499 stckf __LC_MCCK_CLOCK
d768bd89 500 BPOFF
3037a52f
MS
501 la %r1,4095 # validate r1
502 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
3b051e89 503 LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear
3037a52f 504 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
d5c352cd 505 lg %r12,__LC_CURRENT
c5328901 506 lmg %r8,%r9,__LC_MCK_OLD_PSW
83abeffb 507 TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
86ed42f4 508 jo .Lmcck_panic # yes -> rest of mcck code invalid
3037a52f
MS
509 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
510 jno .Lmcck_panic # control registers invalid -> panic
511 la %r14,4095
512 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
513 ptlb
5fa2ea07 514 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
c5328901 515 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
83abeffb 516 TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
c5328901 517 jo 3f
56e62a73
SS
518 la %r14,__LC_SYS_ENTER_TIMER
519 clc 0(8,%r14),__LC_EXIT_TIMER
c5328901 520 jl 1f
63b12246 521 la %r14,__LC_EXIT_TIMER
c5328901
MS
5221: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
523 jl 2f
63b12246 524 la %r14,__LC_LAST_UPDATE_TIMER
c5328901 5252: spt 0(%r14)
6377981f 526 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
3037a52f
MS
5273: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
528 jno .Lmcck_panic
529 tmhh %r8,0x0001 # interrupting from user ?
742aed05 530 jnz .Lmcck_user
3037a52f
MS
531 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
532 jno .Lmcck_panic
b0d31159 533#if IS_ENABLED(CONFIG_KVM)
742aed05 534 OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_stack
29ccaa4b 535 OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f
20232b18 536 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
742aed05 5374: BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
fbbdfca5 538 SIEEXIT
b61b1595 539 j .Lmcck_stack
e2c13d64 540#endif
742aed05 541.Lmcck_user:
b0d31159 542 BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
b61b1595
SS
543.Lmcck_stack:
544 lg %r15,__LC_MCCK_STACK
b61b1595 545 la %r11,STACK_FRAME_OVERHEAD(%r15)
26521412 546 stctg %c1,%c1,__PT_CR1(%r11)
b0d31159 547 lctlg %c1,%c1,__LC_KERNEL_ASCE
b0d31159 548 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
6551fbdf
MS
549 lghi %r14,__LC_GPREGS_SAVE_AREA+64
550 stmg %r0,%r7,__PT_R0(%r11)
7041d281
MS
551 # clear user controlled registers to prevent speculative use
552 xgr %r0,%r0
553 xgr %r1,%r1
7041d281
MS
554 xgr %r3,%r3
555 xgr %r4,%r4
556 xgr %r5,%r5
557 xgr %r6,%r6
558 xgr %r7,%r7
559 xgr %r10,%r10
6551fbdf 560 mvc __PT_R8(64,%r11),0(%r14)
c5328901 561 stmg %r8,%r9,__PT_PSW(%r11)
d3a73acb 562 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
c5328901
MS
563 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
564 lgr %r2,%r11 # pass pointer to pt_regs
77fa2245 565 brasl %r14,s390_do_machine_check
87d59863 566 lctlg %c1,%c1,__PT_CR1(%r11)
c5328901
MS
567 lmg %r0,%r10,__PT_R0(%r11)
568 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
63b12246
MS
569 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
570 jno 0f
6b73044b 571 BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
63b12246 572 stpt __LC_EXIT_TIMER
fad442d3 5730: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193
3b051e89
SS
574 LBEAR 0(%r12)
575 lmg %r11,%r15,__PT_R11(%r11)
576 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE
c5328901 577
86ed42f4 578.Lmcck_panic:
7f6dc8d4
AG
579 /*
580 * Iterate over all possible CPU addresses in the range 0..0xffff
581 * and stop each CPU using signal processor. Use compare and swap
582 * to allow just one CPU-stopper and prevent concurrent CPUs from
583 * stopping each other while leaving the others running.
584 */
585 lhi %r5,0
586 lhi %r6,1
587 larl %r7,.Lstop_lock
588 cs %r5,%r6,0(%r7) # single CPU-stopper only
589 jnz 4f
590 larl %r7,.Lthis_cpu
591 stap 0(%r7) # this CPU address
592 lh %r4,0(%r7)
593 nilh %r4,0
594 lhi %r0,1
595 sll %r0,16 # CPU counter
596 lhi %r3,0 # next CPU address
5970: cr %r3,%r4
598 je 2f
5991: sigp %r1,%r3,SIGP_STOP # stop next CPU
600 brc SIGP_CC_BUSY,1b
6012: ahi %r3,1
602 brct %r0,0b
6033: sigp %r1,%r4,SIGP_STOP # stop this CPU
604 brc SIGP_CC_BUSY,3b
6054: j 4b
26a374ae 606ENDPROC(mcck_int_handler)
1da177e4 607
8b646bd7 608ENTRY(restart_int_handler)
fad442d3 609 ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40
e5b98199 610 stg %r15,__LC_SAVE_AREA_RESTART
915fea04
AG
611 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
612 jz 0f
613 la %r15,4095
614 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
6150: larl %r15,.Lstosm_tmp
616 stosm 0(%r15),0x04 # turn dat on, keep irqs off
8b646bd7 617 lg %r15,__LC_RESTART_STACK
ce3dc447
MS
618 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
619 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
620 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
621 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
8b646bd7 622 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
fbe76568
HC
623 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
624 lg %r2,__LC_RESTART_DATA
915fea04 625 lgf %r3,__LC_RESTART_SOURCE
8b646bd7
MS
626 ltgr %r3,%r3 # test source cpu address
627 jm 1f # negative -> skip source stop
eb546195 6280: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
8b646bd7
MS
629 brc 10,0b # wait for status stored
6301: basr %r14,%r1 # call function
631 stap __SF_EMPTY(%r15) # store cpu address
632 llgh %r3,__SF_EMPTY(%r15)
eb546195 6332: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
8b646bd7
MS
634 brc 2,2b
6353: j 3b
26a374ae 636ENDPROC(restart_int_handler)
7dd6b334 637
860dba45
MS
638 .section .kprobes.text, "ax"
639
ce3dc447 640#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
1da177e4
LT
641/*
642 * The synchronous or the asynchronous stack overflowed. We are dead.
643 * No need to properly save the registers, we are going to panic anyway.
644 * Setup a pt_regs so that show_trace can provide a good call trace.
645 */
26a374ae 646ENTRY(stack_overflow)
ce3dc447 647 lg %r15,__LC_NODAT_STACK # change to panic stack
dc7ee00d 648 la %r11,STACK_FRAME_OVERHEAD(%r15)
c5328901
MS
649 stmg %r0,%r7,__PT_R0(%r11)
650 stmg %r8,%r9,__PT_PSW(%r11)
651 mvc __PT_R8(64,%r11),0(%r14)
652 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
c5328901
MS
653 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
654 lgr %r2,%r11 # pass pointer to pt_regs
1da177e4 655 jg kernel_stack_overflow
26a374ae 656ENDPROC(stack_overflow)
1da177e4
LT
657#endif
658
7f6dc8d4
AG
659 .section .data, "aw"
660 .align 4
661.Lstop_lock: .long 0
662.Lthis_cpu: .short 0
d35925b3 663.Lstosm_tmp: .byte 0
a876cb3f 664 .section .rodata, "a"
ff4a742d 665#define SYSCALL(esame,emu) .quad __s390x_ ## esame
9bf1226b 666 .globl sys_call_table
1da177e4 667sys_call_table:
4381f9f1 668#include "asm/syscall_table.h"
1da177e4
LT
669#undef SYSCALL
670
347a8dc3 671#ifdef CONFIG_COMPAT
1da177e4 672
ff4a742d 673#define SYSCALL(esame,emu) .quad __s390_ ## emu
61649881 674 .globl sys_call_table_emu
1da177e4 675sys_call_table_emu:
4381f9f1 676#include "asm/syscall_table.h"
1da177e4
LT
677#undef SYSCALL
678#endif