Commit | Line | Data |
---|---|---|
6fcb1397 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
22e38f29 BH |
2 | /* |
3 | * Common signal handling code for both 32 and 64 bits | |
4 | * | |
446957ba | 5 | * Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation |
22e38f29 | 6 | * Extracted from signal_32.c and signal_64.c |
22e38f29 BH |
7 | */ |
8 | ||
03248add | 9 | #include <linux/resume_user_mode.h> |
22e38f29 | 10 | #include <linux/signal.h> |
8b7b80b9 | 11 | #include <linux/uprobes.h> |
18b246fa | 12 | #include <linux/key.h> |
106ed886 | 13 | #include <linux/context_tracking.h> |
a768f784 | 14 | #include <linux/livepatch.h> |
3e378680 | 15 | #include <linux/syscalls.h> |
06532a67 | 16 | #include <asm/hw_breakpoint.h> |
7c0f6ba6 | 17 | #include <linux/uaccess.h> |
3dd4eb83 | 18 | #include <asm/switch_to.h> |
22e38f29 | 19 | #include <asm/unistd.h> |
ae3a197e | 20 | #include <asm/debug.h> |
2b3f8e87 | 21 | #include <asm/tm.h> |
22e38f29 | 22 | |
db277e9a CH |
23 | #include "signal.h" |
24 | ||
3dd4eb83 MS |
25 | #ifdef CONFIG_VSX |
26 | unsigned long copy_fpr_to_user(void __user *to, | |
27 | struct task_struct *task) | |
28 | { | |
29 | u64 buf[ELF_NFPREG]; | |
30 | int i; | |
31 | ||
32 | /* save FPR copy to local buffer then write to the thread_struct */ | |
33 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
34 | buf[i] = task->thread.TS_FPR(i); | |
35 | buf[i] = task->thread.fp_state.fpscr; | |
36 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); | |
37 | } | |
38 | ||
39 | unsigned long copy_fpr_from_user(struct task_struct *task, | |
40 | void __user *from) | |
41 | { | |
42 | u64 buf[ELF_NFPREG]; | |
43 | int i; | |
44 | ||
45 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) | |
46 | return 1; | |
47 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
48 | task->thread.TS_FPR(i) = buf[i]; | |
49 | task->thread.fp_state.fpscr = buf[i]; | |
50 | ||
51 | return 0; | |
52 | } | |
53 | ||
54 | unsigned long copy_vsx_to_user(void __user *to, | |
55 | struct task_struct *task) | |
56 | { | |
57 | u64 buf[ELF_NVSRHALFREG]; | |
58 | int i; | |
59 | ||
60 | /* save FPR copy to local buffer then write to the thread_struct */ | |
61 | for (i = 0; i < ELF_NVSRHALFREG; i++) | |
62 | buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; | |
63 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); | |
64 | } | |
65 | ||
66 | unsigned long copy_vsx_from_user(struct task_struct *task, | |
67 | void __user *from) | |
68 | { | |
69 | u64 buf[ELF_NVSRHALFREG]; | |
70 | int i; | |
71 | ||
72 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) | |
73 | return 1; | |
74 | for (i = 0; i < ELF_NVSRHALFREG ; i++) | |
75 | task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; | |
76 | return 0; | |
77 | } | |
78 | ||
79 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
80 | unsigned long copy_ckfpr_to_user(void __user *to, | |
81 | struct task_struct *task) | |
82 | { | |
83 | u64 buf[ELF_NFPREG]; | |
84 | int i; | |
85 | ||
86 | /* save FPR copy to local buffer then write to the thread_struct */ | |
87 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
88 | buf[i] = task->thread.TS_CKFPR(i); | |
89 | buf[i] = task->thread.ckfp_state.fpscr; | |
90 | return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); | |
91 | } | |
92 | ||
93 | unsigned long copy_ckfpr_from_user(struct task_struct *task, | |
94 | void __user *from) | |
95 | { | |
96 | u64 buf[ELF_NFPREG]; | |
97 | int i; | |
98 | ||
99 | if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) | |
100 | return 1; | |
101 | for (i = 0; i < (ELF_NFPREG - 1) ; i++) | |
102 | task->thread.TS_CKFPR(i) = buf[i]; | |
103 | task->thread.ckfp_state.fpscr = buf[i]; | |
104 | ||
105 | return 0; | |
106 | } | |
107 | ||
108 | unsigned long copy_ckvsx_to_user(void __user *to, | |
109 | struct task_struct *task) | |
110 | { | |
111 | u64 buf[ELF_NVSRHALFREG]; | |
112 | int i; | |
113 | ||
114 | /* save FPR copy to local buffer then write to the thread_struct */ | |
115 | for (i = 0; i < ELF_NVSRHALFREG; i++) | |
116 | buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; | |
117 | return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); | |
118 | } | |
119 | ||
120 | unsigned long copy_ckvsx_from_user(struct task_struct *task, | |
121 | void __user *from) | |
122 | { | |
123 | u64 buf[ELF_NVSRHALFREG]; | |
124 | int i; | |
125 | ||
126 | if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) | |
127 | return 1; | |
128 | for (i = 0; i < ELF_NVSRHALFREG ; i++) | |
129 | task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; | |
130 | return 0; | |
131 | } | |
132 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | |
3dd4eb83 MS |
133 | #endif |
134 | ||
d0c3d534 OJ |
135 | /* Log an error when sending an unhandled signal to a process. Controlled |
136 | * through debug.exception-trace sysctl. | |
137 | */ | |
138 | ||
e34166ad | 139 | int show_unhandled_signals = 1; |
d0c3d534 | 140 | |
2896b2df NP |
141 | unsigned long get_min_sigframe_size(void) |
142 | { | |
143 | if (IS_ENABLED(CONFIG_PPC64)) | |
144 | return get_min_sigframe_size_64(); | |
145 | else | |
146 | return get_min_sigframe_size_32(); | |
147 | } | |
148 | ||
149 | #ifdef CONFIG_COMPAT | |
150 | unsigned long get_min_sigframe_size_compat(void) | |
151 | { | |
152 | return get_min_sigframe_size_32(); | |
153 | } | |
154 | #endif | |
155 | ||
a3f61dc0 BH |
156 | /* |
157 | * Allocate space for the signal frame | |
158 | */ | |
c180cb30 CL |
159 | static unsigned long get_tm_stackpointer(struct task_struct *tsk); |
160 | ||
161 | void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk, | |
162 | size_t frame_size, int is_32) | |
a3f61dc0 BH |
163 | { |
164 | unsigned long oldsp, newsp; | |
c180cb30 | 165 | unsigned long sp = get_tm_stackpointer(tsk); |
a3f61dc0 BH |
166 | |
167 | /* Default to using normal stack */ | |
0ecbc6ad CL |
168 | if (is_32) |
169 | oldsp = sp & 0x0ffffffffUL; | |
170 | else | |
171 | oldsp = sp; | |
059ade65 | 172 | oldsp = sigsp(oldsp, ksig); |
a3f61dc0 BH |
173 | newsp = (oldsp - frame_size) & ~0xFUL; |
174 | ||
a3f61dc0 BH |
175 | return (void __user *)newsp; |
176 | } | |
177 | ||
f478f543 CH |
178 | static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, |
179 | int has_handler) | |
22e38f29 BH |
180 | { |
181 | unsigned long ret = regs->gpr[3]; | |
182 | int restart = 1; | |
183 | ||
184 | /* syscall ? */ | |
912237ea | 185 | if (!trap_is_syscall(regs)) |
22e38f29 BH |
186 | return; |
187 | ||
4e0e45b0 NP |
188 | if (trap_norestart(regs)) |
189 | return; | |
190 | ||
22e38f29 | 191 | /* error signalled ? */ |
7fa95f9a NP |
192 | if (trap_is_scv(regs)) { |
193 | /* 32-bit compat mode sign extend? */ | |
194 | if (!IS_ERR_VALUE(ret)) | |
195 | return; | |
196 | ret = -ret; | |
197 | } else if (!(regs->ccr & 0x10000000)) { | |
22e38f29 | 198 | return; |
7fa95f9a | 199 | } |
22e38f29 BH |
200 | |
201 | switch (ret) { | |
202 | case ERESTART_RESTARTBLOCK: | |
203 | case ERESTARTNOHAND: | |
204 | /* ERESTARTNOHAND means that the syscall should only be | |
205 | * restarted if there was no handler for the signal, and since | |
206 | * we only get here if there is a handler, we dont restart. | |
207 | */ | |
208 | restart = !has_handler; | |
209 | break; | |
210 | case ERESTARTSYS: | |
211 | /* ERESTARTSYS means to restart the syscall if there is no | |
212 | * handler or the handler was registered with SA_RESTART | |
213 | */ | |
214 | restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0; | |
215 | break; | |
216 | case ERESTARTNOINTR: | |
217 | /* ERESTARTNOINTR means that the syscall should be | |
218 | * called again after the signal handler returns. | |
219 | */ | |
220 | break; | |
221 | default: | |
222 | return; | |
223 | } | |
224 | if (restart) { | |
225 | if (ret == ERESTART_RESTARTBLOCK) | |
226 | regs->gpr[0] = __NR_restart_syscall; | |
227 | else | |
228 | regs->gpr[3] = regs->orig_gpr3; | |
59dc5bfc | 229 | regs_add_return_ip(regs, -4); |
22e38f29 BH |
230 | regs->result = 0; |
231 | } else { | |
7fa95f9a NP |
232 | if (trap_is_scv(regs)) { |
233 | regs->result = -EINTR; | |
234 | regs->gpr[3] = -EINTR; | |
235 | } else { | |
236 | regs->result = -EINTR; | |
237 | regs->gpr[3] = EINTR; | |
238 | regs->ccr |= 0x10000000; | |
239 | } | |
22e38f29 BH |
240 | } |
241 | } | |
69d15f6b | 242 | |
d1199431 | 243 | static void do_signal(struct task_struct *tsk) |
f478f543 | 244 | { |
b7f9a11a | 245 | sigset_t *oldset = sigmask_to_save(); |
46725b17 | 246 | struct ksignal ksig = { .sig = 0 }; |
f478f543 | 247 | int ret; |
f478f543 | 248 | |
d1199431 CB |
249 | BUG_ON(tsk != current); |
250 | ||
129b69df | 251 | get_signal(&ksig); |
f478f543 | 252 | |
f478f543 | 253 | /* Is there any syscall restart business here ? */ |
d1199431 | 254 | check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0); |
f478f543 | 255 | |
129b69df | 256 | if (ksig.sig <= 0) { |
f478f543 | 257 | /* No signal to deliver -- put the saved sigmask back */ |
51a7b448 | 258 | restore_saved_sigmask(); |
4e0e45b0 | 259 | set_trap_norestart(tsk->thread.regs); |
129b69df | 260 | return; /* no signals delivered */ |
f478f543 CH |
261 | } |
262 | ||
f478f543 CH |
263 | /* |
264 | * Reenable the DABR before delivering the signal to | |
265 | * user space. The DABR will have been cleared if it | |
266 | * triggered inside the kernel. | |
267 | */ | |
303e6a9d RB |
268 | if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) { |
269 | int i; | |
270 | ||
271 | for (i = 0; i < nr_wp_slots(); i++) { | |
272 | if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type) | |
273 | __set_breakpoint(i, &tsk->thread.hw_brk[i]); | |
274 | } | |
275 | } | |
276 | ||
06532a67 | 277 | /* Re-enable the breakpoints for the signal stack */ |
d1199431 | 278 | thread_change_pc(tsk, tsk->thread.regs); |
f478f543 | 279 | |
784e0300 | 280 | rseq_signal_deliver(&ksig, tsk->thread.regs); |
8a417c48 | 281 | |
0a7601b6 | 282 | if (is_32bit_task()) { |
129b69df | 283 | if (ksig.ka.sa.sa_flags & SA_SIGINFO) |
d1199431 | 284 | ret = handle_rt_signal32(&ksig, oldset, tsk); |
f478f543 | 285 | else |
d1199431 | 286 | ret = handle_signal32(&ksig, oldset, tsk); |
f478f543 | 287 | } else { |
d1199431 | 288 | ret = handle_rt_signal64(&ksig, oldset, tsk); |
f478f543 CH |
289 | } |
290 | ||
4e0e45b0 | 291 | set_trap_norestart(tsk->thread.regs); |
129b69df | 292 | signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP)); |
f478f543 CH |
293 | } |
294 | ||
18b246fa | 295 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
7d6d637d | 296 | { |
f57d56dd | 297 | if (thread_info_flags & _TIF_UPROBE) |
8b7b80b9 | 298 | uprobe_notify_resume(regs); |
8b7b80b9 | 299 | |
43347d56 MB |
300 | if (thread_info_flags & _TIF_PATCH_PENDING) |
301 | klp_update_patch_state(current); | |
302 | ||
900f0713 | 303 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { |
d1199431 CB |
304 | BUG_ON(regs != current->thread.regs); |
305 | do_signal(current); | |
306 | } | |
7d6d637d | 307 | |
a68de80f | 308 | if (thread_info_flags & _TIF_NOTIFY_RESUME) |
03248add | 309 | resume_user_mode_work(regs); |
7d6d637d | 310 | } |
2b3f8e87 | 311 | |
c180cb30 | 312 | static unsigned long get_tm_stackpointer(struct task_struct *tsk) |
2b3f8e87 MN |
313 | { |
314 | /* When in an active transaction that takes a signal, we need to be | |
315 | * careful with the stack. It's possible that the stack has moved back | |
316 | * up after the tbegin. The obvious case here is when the tbegin is | |
317 | * called inside a function that returns before a tend. In this case, | |
318 | * the stack is part of the checkpointed transactional memory state. | |
319 | * If we write over this non transactionally or in suspend, we are in | |
320 | * trouble because if we get a tm abort, the program counter and stack | |
321 | * pointer will be back at the tbegin but our in memory stack won't be | |
322 | * valid anymore. | |
323 | * | |
324 | * To avoid this, when taking a signal in an active transaction, we | |
325 | * need to use the stack pointer from the checkpointed state, rather | |
326 | * than the speculated state. This ensures that the signal context | |
327 | * (written tm suspended) will be written below the stack required for | |
446957ba | 328 | * the rollback. The transaction is aborted because of the treclaim, |
2b3f8e87 MN |
329 | * so any memory written between the tbegin and the signal will be |
330 | * rolled back anyway. | |
331 | * | |
332 | * For signals taken in non-TM or suspended mode, we use the | |
333 | * normal/non-checkpointed stack pointer. | |
334 | */ | |
59dc5bfc NP |
335 | struct pt_regs *regs = tsk->thread.regs; |
336 | unsigned long ret = regs->gpr[1]; | |
2464cc4c | 337 | |
2b3f8e87 | 338 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
d1199431 CB |
339 | BUG_ON(tsk != current); |
340 | ||
59dc5bfc | 341 | if (MSR_TM_ACTIVE(regs->msr)) { |
2464cc4c | 342 | preempt_disable(); |
d31626f7 | 343 | tm_reclaim_current(TM_CAUSE_SIGNAL); |
59dc5bfc | 344 | if (MSR_TM_TRANSACTIONAL(regs->msr)) |
2464cc4c GLD |
345 | ret = tsk->thread.ckpt_regs.gpr[1]; |
346 | ||
347 | /* | |
348 | * If we treclaim, we must clear the current thread's TM bits | |
349 | * before re-enabling preemption. Otherwise we might be | |
350 | * preempted and have the live MSR[TS] changed behind our back | |
351 | * (tm_recheckpoint_new_task() would recheckpoint). Besides, we | |
352 | * enter the signal handler in non-transactional state. | |
353 | */ | |
59dc5bfc | 354 | regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK); |
2464cc4c | 355 | preempt_enable(); |
2b3f8e87 MN |
356 | } |
357 | #endif | |
2464cc4c | 358 | return ret; |
2b3f8e87 | 359 | } |
7fe8f773 CL |
360 | |
361 | static const char fm32[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %08lx lr %08lx\n"; | |
362 | static const char fm64[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %016lx lr %016lx\n"; | |
363 | ||
364 | void signal_fault(struct task_struct *tsk, struct pt_regs *regs, | |
365 | const char *where, void __user *ptr) | |
366 | { | |
367 | if (show_unhandled_signals) | |
368 | printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm, | |
369 | task_pid_nr(tsk), where, ptr, regs->nip, regs->link); | |
370 | } |