Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * S390 low-level entry points. |
4 | * | |
a53c8fab | 5 | * Copyright IBM Corp. 1999, 2012 |
1da177e4 | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
25d83cbf HC |
7 | * Hartmut Penner (hp@de.ibm.com), |
8 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | |
1da177e4 LT |
9 | */ |
10 | ||
b8c723f1 | 11 | #include <linux/export.h> |
2bc89b5e | 12 | #include <linux/init.h> |
144d634a | 13 | #include <linux/linkage.h> |
d09a307f | 14 | #include <asm/asm-extable.h> |
b058661a | 15 | #include <asm/alternative-asm.h> |
eb608fb3 | 16 | #include <asm/processor.h> |
1da177e4 | 17 | #include <asm/cache.h> |
dc24b7b4 | 18 | #include <asm/dwarf.h> |
1da177e4 LT |
19 | #include <asm/errno.h> |
20 | #include <asm/ptrace.h> | |
21 | #include <asm/thread_info.h> | |
0013a854 | 22 | #include <asm/asm-offsets.h> |
1da177e4 LT |
23 | #include <asm/unistd.h> |
24 | #include <asm/page.h> | |
eb546195 | 25 | #include <asm/sigp.h> |
1f44a225 | 26 | #include <asm/irq.h> |
fd2527f2 | 27 | #include <asm/fpu-insn.h> |
83abeffb HB |
28 | #include <asm/setup.h> |
29 | #include <asm/nmi.h> | |
6dd85fbb | 30 | #include <asm/nospec-insn.h> |
1da177e4 | 31 | |
e5b98199 MS |
32 | _LPP_OFFSET = __LC_LPP |
33 | ||
3b051e89 | 34 | .macro STBEAR address |
fad442d3 | 35 | ALTERNATIVE "nop", ".insn s,0xb2010000,\address", 193 |
3b051e89 SS |
36 | .endm |
37 | ||
38 | .macro LBEAR address | |
fad442d3 | 39 | ALTERNATIVE "nop", ".insn s,0xb2000000,\address", 193 |
3b051e89 SS |
40 | .endm |
41 | ||
42 | .macro LPSWEY address,lpswe | |
fad442d3 | 43 | ALTERNATIVE "b \lpswe; nopr", ".insn siy,0xeb0000000071,\address,0", 193 |
3b051e89 SS |
44 | .endm |
45 | ||
46 | .macro MBEAR reg | |
fad442d3 | 47 | ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK), 193 |
3b051e89 SS |
48 | .endm |
49 | ||
ce3dc447 | 50 | .macro CHECK_STACK savearea |
63b12246 | 51 | #ifdef CONFIG_CHECK_STACK |
c2c3258f | 52 | tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD |
c5328901 MS |
53 | lghi %r14,\savearea |
54 | jz stack_overflow | |
63b12246 | 55 | #endif |
63b12246 MS |
56 | .endm |
57 | ||
ce3dc447 MS |
58 | .macro CHECK_VMAP_STACK savearea,oklabel |
59 | #ifdef CONFIG_VMAP_STACK | |
60 | lgr %r14,%r15 | |
c2c3258f HC |
61 | nill %r14,0x10000 - THREAD_SIZE |
62 | oill %r14,STACK_INIT_OFFSET | |
ce3dc447 MS |
63 | clg %r14,__LC_KERNEL_STACK |
64 | je \oklabel | |
65 | clg %r14,__LC_ASYNC_STACK | |
66 | je \oklabel | |
b61b1595 SS |
67 | clg %r14,__LC_MCCK_STACK |
68 | je \oklabel | |
ce3dc447 MS |
69 | clg %r14,__LC_NODAT_STACK |
70 | je \oklabel | |
71 | clg %r14,__LC_RESTART_STACK | |
72 | je \oklabel | |
73 | lghi %r14,\savearea | |
74 | j stack_overflow | |
75 | #else | |
76 | j \oklabel | |
77 | #endif | |
78 | .endm | |
79 | ||
83abeffb HB |
80 | /* |
81 | * The TSTMSK macro generates a test-under-mask instruction by | |
82 | * calculating the memory offset for the specified mask value. | |
83 | * Mask value can be any constant. The macro shifts the mask | |
84 | * value to calculate the memory offset for the test-under-mask | |
85 | * instruction. | |
86 | */ | |
87 | .macro TSTMSK addr, mask, size=8, bytepos=0 | |
88 | .if (\bytepos < \size) && (\mask >> 8) | |
89 | .if (\mask & 0xff) | |
90 | .error "Mask exceeds byte boundary" | |
91 | .endif | |
92 | TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" | |
93 | .exitm | |
94 | .endif | |
95 | .ifeq \mask | |
96 | .error "Mask must not be zero" | |
97 | .endif | |
98 | off = \size - \bytepos - 1 | |
99 | tm off+\addr, \mask | |
100 | .endm | |
101 | ||
d768bd89 | 102 | .macro BPOFF |
fad442d3 | 103 | ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", 82 |
d768bd89 MS |
104 | .endm |
105 | ||
106 | .macro BPON | |
fad442d3 | 107 | ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", 82 |
d768bd89 MS |
108 | .endm |
109 | ||
6b73044b | 110 | .macro BPENTER tif_ptr,tif_mask |
6982dba1 | 111 | ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ |
fad442d3 | 112 | "j .+12; nop; nop", 82 |
6b73044b MS |
113 | .endm |
114 | ||
115 | .macro BPEXIT tif_ptr,tif_mask | |
116 | TSTMSK \tif_ptr,\tif_mask | |
6982dba1 HC |
117 | ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ |
118 | "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", 82 | |
6b73044b MS |
119 | .endm |
120 | ||
b5415c8f | 121 | #if IS_ENABLED(CONFIG_KVM) |
64c34318 SS |
122 | .macro SIEEXIT sie_control |
123 | lg %r9,\sie_control # get control block pointer | |
fbbdfca5 AG |
124 | ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE |
125 | lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce | |
c239c83e | 126 | ni __LC_CPU_FLAGS+7,255-_CIF_SIE |
fbbdfca5 AG |
127 | larl %r9,sie_exit # skip forward to sie_exit |
128 | .endm | |
b5415c8f AG |
129 | #endif |
130 | ||
b94c0ebb HC |
131 | .macro STACKLEAK_ERASE |
132 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK | |
133 | brasl %r14,stackleak_erase_on_task_stack | |
134 | #endif | |
135 | .endm | |
136 | ||
6dd85fbb | 137 | GEN_BR_THUNK %r14 |
f19fbd5e | 138 | |
860dba45 | 139 | .section .kprobes.text, "ax" |
46210c44 HC |
140 | .Ldummy: |
141 | /* | |
69a407bf HC |
142 | * The following nop exists only in order to avoid that the next |
143 | * symbol starts at the beginning of the kprobes text section. | |
144 | * In that case there would be several symbols at the same address. | |
145 | * E.g. objdump would take an arbitrary symbol when disassembling | |
146 | * the code. | |
147 | * With the added nop in between this cannot happen. | |
46210c44 HC |
148 | */ |
149 | nop 0 | |
860dba45 | 150 | |
1da177e4 | 151 | /* |
340750c1 HC |
152 | * Scheduler resume function, called by __switch_to |
153 | * gpr2 = (task_struct *)prev | |
154 | * gpr3 = (task_struct *)next | |
1da177e4 LT |
155 | * Returns: |
156 | * gpr2 = prev | |
157 | */ | |
340750c1 | 158 | SYM_FUNC_START(__switch_to_asm) |
eda0c6d6 | 159 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
3241d3eb HC |
160 | lghi %r4,__TASK_stack |
161 | lghi %r1,__TASK_thread | |
c2c3258f | 162 | llill %r5,STACK_INIT_OFFSET |
3241d3eb | 163 | stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev |
9fed920e VG |
164 | lg %r15,0(%r4,%r3) # start of kernel stack of next |
165 | agr %r15,%r5 # end of kernel stack of next | |
eda0c6d6 | 166 | stg %r3,__LC_CURRENT # store task struct of next |
eda0c6d6 | 167 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
3241d3eb HC |
168 | lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next |
169 | aghi %r3,__TASK_pid | |
170 | mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next | |
d3a73acb | 171 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
fad442d3 | 172 | ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 |
6dd85fbb | 173 | BR_EX %r14 |
340750c1 | 174 | SYM_FUNC_END(__switch_to_asm) |
1da177e4 | 175 | |
d0fc4107 MS |
176 | #if IS_ENABLED(CONFIG_KVM) |
177 | /* | |
6b33e68a NB |
178 | * __sie64a calling convention: |
179 | * %r2 pointer to sie control block phys | |
180 | * %r3 pointer to sie control block virt | |
181 | * %r4 guest register save area | |
d0fc4107 | 182 | */ |
fda1dffa | 183 | SYM_FUNC_START(__sie64a) |
d0fc4107 | 184 | stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers |
6b73044b | 185 | lg %r12,__LC_CURRENT |
6b33e68a NB |
186 | stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. |
187 | stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses | |
188 | stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area | |
92fa7a13 MS |
189 | xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 |
190 | mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags | |
6b33e68a | 191 | lmg %r0,%r13,0(%r4) # load guest gprs 0-13 |
d0fc4107 MS |
192 | lg %r14,__LC_GMAP # get gmap pointer |
193 | ltgr %r14,%r14 | |
194 | jz .Lsie_gmap | |
c239c83e | 195 | oi __LC_CPU_FLAGS+7,_CIF_SIE |
d0fc4107 MS |
196 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce |
197 | .Lsie_gmap: | |
92fa7a13 | 198 | lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer |
d0fc4107 MS |
199 | oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now |
200 | tm __SIE_PROG20+3(%r14),3 # last exit... | |
201 | jnz .Lsie_skip | |
6b33e68a | 202 | lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr |
f33f2d4c | 203 | BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
c929500d | 204 | .Lsie_entry: |
d0fc4107 | 205 | sie 0(%r14) |
29ccaa4b AG |
206 | # Let the next instruction be NOP to avoid triggering a machine check |
207 | # and handling it in a guest as result of the instruction execution. | |
208 | nopr 7 | |
209 | .Lsie_leave: | |
d768bd89 | 210 | BPOFF |
f33f2d4c | 211 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
d0fc4107 | 212 | .Lsie_skip: |
6b33e68a | 213 | lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer |
d0fc4107 | 214 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE |
87d59863 | 215 | lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce |
c239c83e | 216 | ni __LC_CPU_FLAGS+7,255-_CIF_SIE |
d0fc4107 | 217 | # some program checks are suppressing. C code (e.g. do_protection_exception) |
c0e7bb38 CB |
218 | # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There |
219 | # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. | |
6b33e68a | 220 | # Other instructions between __sie64a and .Lsie_done should not cause program |
c0e7bb38 | 221 | # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. |
c0e7bb38 CB |
222 | .Lrewind_pad6: |
223 | nopr 7 | |
224 | .Lrewind_pad4: | |
225 | nopr 7 | |
226 | .Lrewind_pad2: | |
227 | nopr 7 | |
fda1dffa | 228 | SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) |
92fa7a13 | 229 | lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area |
d0fc4107 | 230 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 |
7041d281 MS |
231 | xgr %r0,%r0 # clear guest registers to |
232 | xgr %r1,%r1 # prevent speculative use | |
7041d281 MS |
233 | xgr %r3,%r3 |
234 | xgr %r4,%r4 | |
235 | xgr %r5,%r5 | |
d0fc4107 | 236 | lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers |
92fa7a13 | 237 | lg %r2,__SF_SIE_REASON(%r15) # return exit reason code |
6dd85fbb | 238 | BR_EX %r14 |
d0fc4107 MS |
239 | .Lsie_fault: |
240 | lghi %r14,-EFAULT | |
92fa7a13 | 241 | stg %r14,__SF_SIE_REASON(%r15) # set exit reason code |
d0fc4107 MS |
242 | j sie_exit |
243 | ||
c0e7bb38 CB |
244 | EX_TABLE(.Lrewind_pad6,.Lsie_fault) |
245 | EX_TABLE(.Lrewind_pad4,.Lsie_fault) | |
246 | EX_TABLE(.Lrewind_pad2,.Lsie_fault) | |
d0fc4107 | 247 | EX_TABLE(sie_exit,.Lsie_fault) |
fda1dffa | 248 | SYM_FUNC_END(__sie64a) |
6b33e68a | 249 | EXPORT_SYMBOL(__sie64a) |
711f5df7 | 250 | EXPORT_SYMBOL(sie_exit) |
d0fc4107 MS |
251 | #endif |
252 | ||
1da177e4 LT |
253 | /* |
254 | * SVC interrupt handler routine. System calls are synchronous events and | |
7b7735c5 | 255 | * are entered with interrupts disabled. |
1da177e4 LT |
256 | */ |
257 | ||
fda1dffa | 258 | SYM_CODE_START(system_call) |
56e62a73 | 259 | stpt __LC_SYS_ENTER_TIMER |
c5328901 | 260 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
d768bd89 | 261 | BPOFF |
56e62a73 | 262 | lghi %r14,0 |
86ed42f4 | 263 | .Lsysc_per: |
3b051e89 | 264 | STBEAR __LC_LAST_BREAK |
87d59863 | 265 | lctlg %c1,%c1,__LC_KERNEL_ASCE |
c5328901 | 266 | lg %r15,__LC_KERNEL_STACK |
9365965d | 267 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
56e62a73 | 268 | stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
d3f46896 CB |
269 | # clear user controlled register to prevent speculative use |
270 | xgr %r0,%r0 | |
56e62a73 SS |
271 | xgr %r1,%r1 |
272 | xgr %r4,%r4 | |
273 | xgr %r5,%r5 | |
274 | xgr %r6,%r6 | |
275 | xgr %r7,%r7 | |
276 | xgr %r8,%r8 | |
277 | xgr %r9,%r9 | |
278 | xgr %r10,%r10 | |
279 | xgr %r11,%r11 | |
280 | la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs | |
af9ad822 | 281 | mvc __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC |
3b051e89 | 282 | MBEAR %r2 |
56e62a73 SS |
283 | lgr %r3,%r14 |
284 | brasl %r14,__do_syscall | |
b94c0ebb | 285 | STACKLEAK_ERASE |
87d59863 | 286 | lctlg %c1,%c1,__LC_USER_ASCE |
56e62a73 | 287 | mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) |
f33f2d4c | 288 | BPON |
3b051e89 | 289 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
56e62a73 | 290 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
c5328901 | 291 | stpt __LC_EXIT_TIMER |
3b051e89 | 292 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
fda1dffa | 293 | SYM_CODE_END(system_call) |
1da177e4 LT |
294 | |
295 | # | |
296 | # a new process exits the kernel with ret_from_fork | |
297 | # | |
fda1dffa | 298 | SYM_CODE_START(ret_from_fork) |
56e62a73 SS |
299 | lgr %r3,%r11 |
300 | brasl %r14,__ret_from_fork | |
b94c0ebb | 301 | STACKLEAK_ERASE |
56e62a73 SS |
302 | lctlg %c1,%c1,__LC_USER_ASCE |
303 | mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) | |
f33f2d4c | 304 | BPON |
3b051e89 | 305 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
56e62a73 SS |
306 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
307 | stpt __LC_EXIT_TIMER | |
3b051e89 | 308 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
fda1dffa | 309 | SYM_CODE_END(ret_from_fork) |
26a374ae | 310 | |
1da177e4 LT |
311 | /* |
312 | * Program check handler routine | |
313 | */ | |
314 | ||
fda1dffa | 315 | SYM_CODE_START(pgm_check_handler) |
56e62a73 | 316 | stpt __LC_SYS_ENTER_TIMER |
d768bd89 | 317 | BPOFF |
c5328901 | 318 | stmg %r8,%r15,__LC_SAVE_AREA_SYNC |
64c34318 | 319 | lgr %r10,%r15 |
c5328901 | 320 | lmg %r8,%r9,__LC_PGM_OLD_PSW |
87d59863 HC |
321 | tmhh %r8,0x0001 # coming from user space? |
322 | jno .Lpgm_skip_asce | |
323 | lctlg %c1,%c1,__LC_KERNEL_ASCE | |
56e62a73 | 324 | j 3f # -> fault in user space |
87d59863 | 325 | .Lpgm_skip_asce: |
0b38b5e1 SS |
326 | 1: tmhh %r8,0x4000 # PER bit set in old PSW ? |
327 | jnz 2f # -> enabled, can't be a double fault | |
c5328901 | 328 | tm __LC_PGM_ILC+3,0x80 # check for per exception |
86ed42f4 | 329 | jnz .Lpgm_svcper # -> single stepped svc |
0b38b5e1 | 330 | 2: CHECK_STACK __LC_SAVE_AREA_SYNC |
dc7ee00d | 331 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
56e62a73 SS |
332 | # CHECK_VMAP_STACK branches to stack_overflow or 4f |
333 | CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f | |
f33f2d4c | 334 | 3: lg %r15,__LC_KERNEL_STACK |
56e62a73 | 335 | 4: la %r11,STACK_FRAME_OVERHEAD(%r15) |
64c34318 | 336 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
56e62a73 | 337 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
c5328901 | 338 | stmg %r0,%r7,__PT_R0(%r11) |
56e62a73 | 339 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC |
3b051e89 | 340 | mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK |
64c34318 SS |
341 | stctg %c1,%c1,__PT_CR1(%r11) |
342 | #if IS_ENABLED(CONFIG_KVM) | |
d111855a SS |
343 | ltg %r12,__LC_GMAP |
344 | jz 5f | |
64c34318 SS |
345 | clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) |
346 | jne 5f | |
347 | BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST | |
348 | SIEEXIT __SF_SIE_CONTROL(%r10) | |
349 | #endif | |
350 | 5: stmg %r8,%r9,__PT_PSW(%r11) | |
7041d281 MS |
351 | # clear user controlled registers to prevent speculative use |
352 | xgr %r0,%r0 | |
353 | xgr %r1,%r1 | |
7041d281 MS |
354 | xgr %r3,%r3 |
355 | xgr %r4,%r4 | |
356 | xgr %r5,%r5 | |
357 | xgr %r6,%r6 | |
358 | xgr %r7,%r7 | |
56e62a73 SS |
359 | lgr %r2,%r11 |
360 | brasl %r14,__do_pgm_check | |
361 | tmhh %r8,0x0001 # returning to user space? | |
362 | jno .Lpgm_exit_kernel | |
b94c0ebb | 363 | STACKLEAK_ERASE |
56e62a73 | 364 | lctlg %c1,%c1,__LC_USER_ASCE |
f33f2d4c | 365 | BPON |
0cd9b723 | 366 | stpt __LC_EXIT_TIMER |
56e62a73 SS |
367 | .Lpgm_exit_kernel: |
368 | mvc __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) | |
3b051e89 | 369 | LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) |
56e62a73 | 370 | lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) |
3b051e89 | 371 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE |
1da177e4 | 372 | |
4ba069b8 | 373 | # |
c5328901 | 374 | # single stepped system call |
4ba069b8 | 375 | # |
86ed42f4 | 376 | .Lpgm_svcper: |
c5328901 | 377 | mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW |
86ed42f4 | 378 | larl %r14,.Lsysc_per |
c5328901 | 379 | stg %r14,__LC_RETURN_PSW+8 |
56e62a73 | 380 | lghi %r14,1 |
3b051e89 SS |
381 | LBEAR __LC_PGM_LAST_BREAK |
382 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per | |
fda1dffa | 383 | SYM_CODE_END(pgm_check_handler) |
4ba069b8 | 384 | |
1da177e4 | 385 | /* |
56e62a73 | 386 | * Interrupt handler macro used for external and IO interrupts. |
1da177e4 | 387 | */ |
56e62a73 | 388 | .macro INT_HANDLER name,lc_old_psw,handler |
fda1dffa | 389 | SYM_CODE_START(\name) |
10bc15ba | 390 | stckf __LC_INT_CLOCK |
56e62a73 | 391 | stpt __LC_SYS_ENTER_TIMER |
3b051e89 | 392 | STBEAR __LC_LAST_BREAK |
d768bd89 | 393 | BPOFF |
c5328901 | 394 | stmg %r8,%r15,__LC_SAVE_AREA_ASYNC |
56e62a73 | 395 | lmg %r8,%r9,\lc_old_psw |
b0d31159 SS |
396 | tmhh %r8,0x0001 # interrupting from user ? |
397 | jnz 1f | |
398 | #if IS_ENABLED(CONFIG_KVM) | |
c239c83e SS |
399 | TSTMSK __LC_CPU_FLAGS,_CIF_SIE |
400 | jz 0f | |
f33f2d4c | 401 | BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
64c34318 | 402 | SIEEXIT __SF_SIE_CONTROL(%r15) |
b0d31159 SS |
403 | #endif |
404 | 0: CHECK_STACK __LC_SAVE_AREA_ASYNC | |
b0d31159 | 405 | aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) |
b0d31159 | 406 | j 2f |
f33f2d4c | 407 | 1: lctlg %c1,%c1,__LC_KERNEL_ASCE |
b0d31159 | 408 | lg %r15,__LC_KERNEL_STACK |
b74e409e VG |
409 | 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
410 | la %r11,STACK_FRAME_OVERHEAD(%r15) | |
c5328901 | 411 | stmg %r0,%r7,__PT_R0(%r11) |
7041d281 MS |
412 | # clear user controlled registers to prevent speculative use |
413 | xgr %r0,%r0 | |
414 | xgr %r1,%r1 | |
7041d281 MS |
415 | xgr %r3,%r3 |
416 | xgr %r4,%r4 | |
417 | xgr %r5,%r5 | |
418 | xgr %r6,%r6 | |
419 | xgr %r7,%r7 | |
420 | xgr %r10,%r10 | |
ca1f4d70 | 421 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
c5328901 | 422 | mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC |
3b051e89 | 423 | MBEAR %r11 |
c5328901 | 424 | stmg %r8,%r9,__PT_PSW(%r11) |
29b06ad7 | 425 | lgr %r2,%r11 # pass pointer to pt_regs |
56e62a73 | 426 | brasl %r14,\handler |
c5328901 | 427 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
56e62a73 SS |
428 | tmhh %r8,0x0001 # returning to user ? |
429 | jno 2f | |
b94c0ebb | 430 | STACKLEAK_ERASE |
87d59863 | 431 | lctlg %c1,%c1,__LC_USER_ASCE |
f33f2d4c | 432 | BPON |
c5328901 | 433 | stpt __LC_EXIT_TIMER |
3b051e89 SS |
434 | 2: LBEAR __PT_LAST_BREAK(%r11) |
435 | lmg %r0,%r15,__PT_R0(%r11) | |
436 | LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE | |
fda1dffa | 437 | SYM_CODE_END(\name) |
56e62a73 | 438 | .endm |
916cda1a | 439 | |
56e62a73 SS |
440 | INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq |
441 | INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq | |
1da177e4 | 442 | |
4c1051e3 | 443 | /* |
0b0ed657 | 444 | * Load idle PSW. |
4c1051e3 | 445 | */ |
fda1dffa | 446 | SYM_FUNC_START(psw_idle) |
a994eddb | 447 | stg %r14,(__SF_GPRS+8*8)(%r15) |
27f6b416 | 448 | stg %r3,__SF_EMPTY(%r15) |
56e62a73 | 449 | larl %r1,psw_idle_exit |
4c1051e3 | 450 | stg %r1,__SF_EMPTY+8(%r15) |
72d38b19 MS |
451 | larl %r1,smp_cpu_mtid |
452 | llgf %r1,0(%r1) | |
453 | ltgr %r1,%r1 | |
454 | jz .Lpsw_idle_stcctm | |
56e62a73 | 455 | .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) |
72d38b19 | 456 | .Lpsw_idle_stcctm: |
419123f9 | 457 | oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT |
d768bd89 | 458 | BPON |
10bc15ba | 459 | stckf __CLOCK_IDLE_ENTER(%r2) |
27f6b416 | 460 | stpt __TIMER_IDLE_ENTER(%r2) |
4c1051e3 | 461 | lpswe __SF_EMPTY(%r15) |
fda1dffa | 462 | SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL) |
6dd85fbb | 463 | BR_EX %r14 |
fda1dffa | 464 | SYM_FUNC_END(psw_idle) |
4c1051e3 | 465 | |
1da177e4 LT |
466 | /* |
467 | * Machine check handler routines | |
468 | */ | |
fda1dffa | 469 | SYM_CODE_START(mcck_int_handler) |
d768bd89 | 470 | BPOFF |
c5328901 | 471 | lmg %r8,%r9,__LC_MCK_OLD_PSW |
83abeffb | 472 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE |
86ed42f4 | 473 | jo .Lmcck_panic # yes -> rest of mcck code invalid |
3037a52f MS |
474 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID |
475 | jno .Lmcck_panic # control registers invalid -> panic | |
3037a52f | 476 | ptlb |
5fa2ea07 | 477 | lghi %r14,__LC_CPU_TIMER_SAVE_AREA |
c5328901 | 478 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
83abeffb | 479 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID |
c5328901 | 480 | jo 3f |
56e62a73 SS |
481 | la %r14,__LC_SYS_ENTER_TIMER |
482 | clc 0(8,%r14),__LC_EXIT_TIMER | |
c5328901 | 483 | jl 1f |
63b12246 | 484 | la %r14,__LC_EXIT_TIMER |
c5328901 MS |
485 | 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER |
486 | jl 2f | |
63b12246 | 487 | la %r14,__LC_LAST_UPDATE_TIMER |
c5328901 | 488 | 2: spt 0(%r14) |
6377981f | 489 | mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) |
3037a52f MS |
490 | 3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID |
491 | jno .Lmcck_panic | |
492 | tmhh %r8,0x0001 # interrupting from user ? | |
742aed05 | 493 | jnz .Lmcck_user |
3037a52f MS |
494 | TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID |
495 | jno .Lmcck_panic | |
b0d31159 | 496 | #if IS_ENABLED(CONFIG_KVM) |
c239c83e SS |
497 | TSTMSK __LC_CPU_FLAGS,_CIF_SIE |
498 | jz .Lmcck_user | |
499 | # Need to compare the address instead of a CIF_SIE* flag. | |
500 | # Otherwise there would be a race between setting the flag | |
501 | # and entering SIE (or leaving and clearing the flag). This | |
502 | # would cause machine checks targeted at the guest to be | |
503 | # handled by the host. | |
29e5bc0f SS |
504 | larl %r14,.Lsie_entry |
505 | clgrjl %r9,%r14, 4f | |
506 | larl %r14,.Lsie_leave | |
507 | clgrjhe %r9,%r14, 4f | |
20232b18 | 508 | oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST |
f33f2d4c | 509 | 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST |
64c34318 | 510 | SIEEXIT __SF_SIE_CONTROL(%r15) |
e2c13d64 | 511 | #endif |
742aed05 | 512 | .Lmcck_user: |
b61b1595 | 513 | lg %r15,__LC_MCCK_STACK |
b61b1595 | 514 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
26521412 | 515 | stctg %c1,%c1,__PT_CR1(%r11) |
b0d31159 | 516 | lctlg %c1,%c1,__LC_KERNEL_ASCE |
b0d31159 | 517 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
6551fbdf MS |
518 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
519 | stmg %r0,%r7,__PT_R0(%r11) | |
7041d281 MS |
520 | # clear user controlled registers to prevent speculative use |
521 | xgr %r0,%r0 | |
522 | xgr %r1,%r1 | |
7041d281 MS |
523 | xgr %r3,%r3 |
524 | xgr %r4,%r4 | |
525 | xgr %r5,%r5 | |
526 | xgr %r6,%r6 | |
527 | xgr %r7,%r7 | |
528 | xgr %r10,%r10 | |
6551fbdf | 529 | mvc __PT_R8(64,%r11),0(%r14) |
c5328901 | 530 | stmg %r8,%r9,__PT_PSW(%r11) |
d3a73acb | 531 | xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) |
c5328901 MS |
532 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
533 | lgr %r2,%r11 # pass pointer to pt_regs | |
77fa2245 | 534 | brasl %r14,s390_do_machine_check |
87d59863 | 535 | lctlg %c1,%c1,__PT_CR1(%r11) |
c5328901 MS |
536 | lmg %r0,%r10,__PT_R0(%r11) |
537 | mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW | |
63b12246 MS |
538 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
539 | jno 0f | |
f33f2d4c | 540 | BPON |
63b12246 | 541 | stpt __LC_EXIT_TIMER |
fad442d3 | 542 | 0: ALTERNATIVE "nop", __stringify(lghi %r12,__LC_LAST_BREAK_SAVE_AREA),193 |
3b051e89 SS |
543 | LBEAR 0(%r12) |
544 | lmg %r11,%r15,__PT_R11(%r11) | |
545 | LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE | |
c5328901 | 546 | |
86ed42f4 | 547 | .Lmcck_panic: |
7f6dc8d4 AG |
548 | /* |
549 | * Iterate over all possible CPU addresses in the range 0..0xffff | |
550 | * and stop each CPU using signal processor. Use compare and swap | |
551 | * to allow just one CPU-stopper and prevent concurrent CPUs from | |
552 | * stopping each other while leaving the others running. | |
553 | */ | |
554 | lhi %r5,0 | |
555 | lhi %r6,1 | |
fda1dffa | 556 | larl %r7,stop_lock |
7f6dc8d4 AG |
557 | cs %r5,%r6,0(%r7) # single CPU-stopper only |
558 | jnz 4f | |
fda1dffa | 559 | larl %r7,this_cpu |
7f6dc8d4 AG |
560 | stap 0(%r7) # this CPU address |
561 | lh %r4,0(%r7) | |
562 | nilh %r4,0 | |
563 | lhi %r0,1 | |
564 | sll %r0,16 # CPU counter | |
565 | lhi %r3,0 # next CPU address | |
566 | 0: cr %r3,%r4 | |
567 | je 2f | |
568 | 1: sigp %r1,%r3,SIGP_STOP # stop next CPU | |
569 | brc SIGP_CC_BUSY,1b | |
570 | 2: ahi %r3,1 | |
571 | brct %r0,0b | |
572 | 3: sigp %r1,%r4,SIGP_STOP # stop this CPU | |
573 | brc SIGP_CC_BUSY,3b | |
574 | 4: j 4b | |
fda1dffa | 575 | SYM_CODE_END(mcck_int_handler) |
1da177e4 | 576 | |
fda1dffa | 577 | SYM_CODE_START(restart_int_handler) |
fad442d3 | 578 | ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 |
e5b98199 | 579 | stg %r15,__LC_SAVE_AREA_RESTART |
915fea04 AG |
580 | TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 |
581 | jz 0f | |
385bf43c | 582 | lctlg %c0,%c15,__LC_CREGS_SAVE_AREA |
edbe2898 AG |
583 | 0: larl %r15,daton_psw |
584 | lpswe 0(%r15) # turn dat on, keep irqs off | |
585 | .Ldaton: | |
8b646bd7 | 586 | lg %r15,__LC_RESTART_STACK |
ce3dc447 MS |
587 | xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) |
588 | stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) | |
589 | mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART | |
590 | mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW | |
8b646bd7 | 591 | xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) |
fbe76568 HC |
592 | lg %r1,__LC_RESTART_FN # load fn, parm & source cpu |
593 | lg %r2,__LC_RESTART_DATA | |
915fea04 | 594 | lgf %r3,__LC_RESTART_SOURCE |
8b646bd7 MS |
595 | ltgr %r3,%r3 # test source cpu address |
596 | jm 1f # negative -> skip source stop | |
eb546195 | 597 | 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu |
8b646bd7 MS |
598 | brc 10,0b # wait for status stored |
599 | 1: basr %r14,%r1 # call function | |
600 | stap __SF_EMPTY(%r15) # store cpu address | |
601 | llgh %r3,__SF_EMPTY(%r15) | |
eb546195 | 602 | 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu |
8b646bd7 MS |
603 | brc 2,2b |
604 | 3: j 3b | |
fda1dffa | 605 | SYM_CODE_END(restart_int_handler) |
7dd6b334 | 606 | |
860dba45 MS |
607 | .section .kprobes.text, "ax" |
608 | ||
ce3dc447 | 609 | #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) |
1da177e4 LT |
610 | /* |
611 | * The synchronous or the asynchronous stack overflowed. We are dead. | |
612 | * No need to properly save the registers, we are going to panic anyway. | |
613 | * Setup a pt_regs so that show_trace can provide a good call trace. | |
614 | */ | |
fda1dffa | 615 | SYM_CODE_START(stack_overflow) |
ce3dc447 | 616 | lg %r15,__LC_NODAT_STACK # change to panic stack |
dc7ee00d | 617 | la %r11,STACK_FRAME_OVERHEAD(%r15) |
c5328901 MS |
618 | stmg %r0,%r7,__PT_R0(%r11) |
619 | stmg %r8,%r9,__PT_PSW(%r11) | |
620 | mvc __PT_R8(64,%r11),0(%r14) | |
621 | stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 | |
c5328901 MS |
622 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
623 | lgr %r2,%r11 # pass pointer to pt_regs | |
1da177e4 | 624 | jg kernel_stack_overflow |
fda1dffa | 625 | SYM_CODE_END(stack_overflow) |
1da177e4 LT |
626 | #endif |
627 | ||
7f6dc8d4 | 628 | .section .data, "aw" |
fda1dffa HC |
629 | .balign 4 |
630 | SYM_DATA_LOCAL(stop_lock, .long 0) | |
631 | SYM_DATA_LOCAL(this_cpu, .short 0) | |
edbe2898 AG |
632 | .balign 8 |
633 | SYM_DATA_START_LOCAL(daton_psw) | |
634 | .quad PSW_KERNEL_BITS | |
635 | .quad .Ldaton | |
636 | SYM_DATA_END(daton_psw) | |
fda1dffa | 637 | |
a876cb3f | 638 | .section .rodata, "a" |
378ca2d2 | 639 | .balign 8 |
ff4a742d | 640 | #define SYSCALL(esame,emu) .quad __s390x_ ## esame |
fda1dffa | 641 | SYM_DATA_START(sys_call_table) |
4381f9f1 | 642 | #include "asm/syscall_table.h" |
fda1dffa | 643 | SYM_DATA_END(sys_call_table) |
1da177e4 LT |
644 | #undef SYSCALL |
645 | ||
347a8dc3 | 646 | #ifdef CONFIG_COMPAT |
1da177e4 | 647 | |
ff4a742d | 648 | #define SYSCALL(esame,emu) .quad __s390_ ## emu |
fda1dffa | 649 | SYM_DATA_START(sys_call_table_emu) |
4381f9f1 | 650 | #include "asm/syscall_table.h" |
fda1dffa | 651 | SYM_DATA_END(sys_call_table_emu) |
1da177e4 LT |
652 | #undef SYSCALL |
653 | #endif |