nilfs2: use refcount_dec_and_lock() to fix potential UAF
[linux-block.git] / fs / coredump.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
10c28d93
AK
2#include <linux/slab.h>
3#include <linux/file.h>
4#include <linux/fdtable.h>
70d78fe7 5#include <linux/freezer.h>
10c28d93
AK
6#include <linux/mm.h>
7#include <linux/stat.h>
8#include <linux/fcntl.h>
9#include <linux/swap.h>
315c6926 10#include <linux/ctype.h>
10c28d93
AK
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/pagemap.h>
14#include <linux/perf_event.h>
15#include <linux/highmem.h>
16#include <linux/spinlock.h>
17#include <linux/key.h>
18#include <linux/personality.h>
19#include <linux/binfmts.h>
179899fd 20#include <linux/coredump.h>
f7ccbae4 21#include <linux/sched/coredump.h>
3f07c014 22#include <linux/sched/signal.h>
68db0cf1 23#include <linux/sched/task_stack.h>
10c28d93
AK
24#include <linux/utsname.h>
25#include <linux/pid_namespace.h>
26#include <linux/module.h>
27#include <linux/namei.h>
28#include <linux/mount.h>
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/tsacct_kern.h>
32#include <linux/cn_proc.h>
33#include <linux/audit.h>
34#include <linux/tracehook.h>
35#include <linux/kmod.h>
36#include <linux/fsnotify.h>
37#include <linux/fs_struct.h>
38#include <linux/pipe_fs_i.h>
39#include <linux/oom.h>
40#include <linux/compat.h>
378c6520
JH
41#include <linux/fs.h>
42#include <linux/path.h>
03927c8a 43#include <linux/timekeeping.h>
10c28d93 44
7c0f6ba6 45#include <linux/uaccess.h>
10c28d93
AK
46#include <asm/mmu_context.h>
47#include <asm/tlb.h>
48#include <asm/exec.h>
49
50#include <trace/events/task.h>
51#include "internal.h"
52
53#include <trace/events/sched.h>
54
55int core_uses_pid;
10c28d93 56unsigned int core_pipe_limit;
3ceadcf6
ON
57char core_pattern[CORENAME_MAX_SIZE] = "core";
58static int core_name_size = CORENAME_MAX_SIZE;
10c28d93
AK
59
60struct core_name {
61 char *corename;
62 int used, size;
63};
10c28d93
AK
64
65/* The maximal length of core_pattern is also specified in sysctl.c */
66
3ceadcf6 67static int expand_corename(struct core_name *cn, int size)
10c28d93 68{
e7fd1549 69 char *corename = krealloc(cn->corename, size, GFP_KERNEL);
10c28d93 70
e7fd1549 71 if (!corename)
10c28d93 72 return -ENOMEM;
10c28d93 73
3ceadcf6
ON
74 if (size > core_name_size) /* racy but harmless */
75 core_name_size = size;
76
77 cn->size = ksize(corename);
e7fd1549 78 cn->corename = corename;
10c28d93
AK
79 return 0;
80}
81
b4176b7c
NI
82static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
83 va_list arg)
10c28d93 84{
5fe9d8ca 85 int free, need;
404ca80e 86 va_list arg_copy;
10c28d93 87
5fe9d8ca
ON
88again:
89 free = cn->size - cn->used;
404ca80e
ED
90
91 va_copy(arg_copy, arg);
92 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
93 va_end(arg_copy);
94
5fe9d8ca
ON
95 if (need < free) {
96 cn->used += need;
97 return 0;
98 }
10c28d93 99
3ceadcf6 100 if (!expand_corename(cn, cn->size + need - free + 1))
5fe9d8ca 101 goto again;
10c28d93 102
5fe9d8ca 103 return -ENOMEM;
10c28d93
AK
104}
105
b4176b7c 106static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
bc03c691
ON
107{
108 va_list arg;
109 int ret;
110
111 va_start(arg, fmt);
112 ret = cn_vprintf(cn, fmt, arg);
113 va_end(arg);
114
115 return ret;
116}
117
b4176b7c
NI
118static __printf(2, 3)
119int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
10c28d93 120{
923bed03
ON
121 int cur = cn->used;
122 va_list arg;
123 int ret;
124
125 va_start(arg, fmt);
126 ret = cn_vprintf(cn, fmt, arg);
127 va_end(arg);
128
ac94b6e3
JH
129 if (ret == 0) {
130 /*
131 * Ensure that this coredump name component can't cause the
132 * resulting corefile path to consist of a ".." or ".".
133 */
134 if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
135 (cn->used - cur == 2 && cn->corename[cur] == '.'
136 && cn->corename[cur+1] == '.'))
137 cn->corename[cur] = '!';
138
139 /*
140 * Empty names are fishy and could be used to create a "//" in a
141 * corefile name, causing the coredump to happen one directory
142 * level too high. Enforce that all components of the core
143 * pattern are at least one character long.
144 */
145 if (cn->used == cur)
146 ret = cn_printf(cn, "!");
147 }
148
923bed03
ON
149 for (; cur < cn->used; ++cur) {
150 if (cn->corename[cur] == '/')
151 cn->corename[cur] = '!';
152 }
153 return ret;
10c28d93
AK
154}
155
f38c85f1 156static int cn_print_exe_file(struct core_name *cn, bool name_only)
10c28d93
AK
157{
158 struct file *exe_file;
f38c85f1 159 char *pathbuf, *path, *ptr;
10c28d93
AK
160 int ret;
161
162 exe_file = get_mm_exe_file(current->mm);
923bed03
ON
163 if (!exe_file)
164 return cn_esc_printf(cn, "%s (path unknown)", current->comm);
10c28d93 165
0ee931c4 166 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
10c28d93
AK
167 if (!pathbuf) {
168 ret = -ENOMEM;
169 goto put_exe_file;
170 }
171
9bf39ab2 172 path = file_path(exe_file, pathbuf, PATH_MAX);
10c28d93
AK
173 if (IS_ERR(path)) {
174 ret = PTR_ERR(path);
175 goto free_buf;
176 }
177
f38c85f1
LW
178 if (name_only) {
179 ptr = strrchr(path, '/');
180 if (ptr)
181 path = ptr + 1;
182 }
923bed03 183 ret = cn_esc_printf(cn, "%s", path);
10c28d93
AK
184
185free_buf:
186 kfree(pathbuf);
187put_exe_file:
188 fput(exe_file);
189 return ret;
190}
191
192/* format_corename will inspect the pattern parameter, and output a
193 * name into corename, which must have space for at least
194 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
195 */
315c6926
PW
196static int format_corename(struct core_name *cn, struct coredump_params *cprm,
197 size_t **argv, int *argc)
10c28d93
AK
198{
199 const struct cred *cred = current_cred();
200 const char *pat_ptr = core_pattern;
201 int ispipe = (*pat_ptr == '|');
315c6926 202 bool was_space = false;
10c28d93
AK
203 int pid_in_pattern = 0;
204 int err = 0;
205
e7fd1549 206 cn->used = 0;
3ceadcf6
ON
207 cn->corename = NULL;
208 if (expand_corename(cn, core_name_size))
10c28d93 209 return -ENOMEM;
888ffc59
ON
210 cn->corename[0] = '\0';
211
315c6926
PW
212 if (ispipe) {
213 int argvs = sizeof(core_pattern) / 2;
214 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
215 if (!(*argv))
216 return -ENOMEM;
217 (*argv)[(*argc)++] = 0;
888ffc59 218 ++pat_ptr;
db973a72
SM
219 if (!(*pat_ptr))
220 return -ENOMEM;
315c6926 221 }
10c28d93
AK
222
223 /* Repeat as long as we have more pattern to process and more output
224 space */
225 while (*pat_ptr) {
315c6926
PW
226 /*
227 * Split on spaces before doing template expansion so that
228 * %e and %E don't get split if they have spaces in them
229 */
230 if (ispipe) {
231 if (isspace(*pat_ptr)) {
2bf509d9
MD
232 if (cn->used != 0)
233 was_space = true;
315c6926
PW
234 pat_ptr++;
235 continue;
236 } else if (was_space) {
237 was_space = false;
238 err = cn_printf(cn, "%c", '\0');
239 if (err)
240 return err;
241 (*argv)[(*argc)++] = cn->used;
242 }
243 }
10c28d93 244 if (*pat_ptr != '%') {
10c28d93
AK
245 err = cn_printf(cn, "%c", *pat_ptr++);
246 } else {
247 switch (*++pat_ptr) {
248 /* single % at the end, drop that */
249 case 0:
250 goto out;
251 /* Double percent, output one percent */
252 case '%':
253 err = cn_printf(cn, "%c", '%');
254 break;
255 /* pid */
256 case 'p':
257 pid_in_pattern = 1;
258 err = cn_printf(cn, "%d",
259 task_tgid_vnr(current));
260 break;
65aafb1e
SG
261 /* global pid */
262 case 'P':
263 err = cn_printf(cn, "%d",
264 task_tgid_nr(current));
265 break;
b03023ec
ON
266 case 'i':
267 err = cn_printf(cn, "%d",
268 task_pid_vnr(current));
269 break;
270 case 'I':
271 err = cn_printf(cn, "%d",
272 task_pid_nr(current));
273 break;
10c28d93
AK
274 /* uid */
275 case 'u':
5202efe5
NI
276 err = cn_printf(cn, "%u",
277 from_kuid(&init_user_ns,
278 cred->uid));
10c28d93
AK
279 break;
280 /* gid */
281 case 'g':
5202efe5
NI
282 err = cn_printf(cn, "%u",
283 from_kgid(&init_user_ns,
284 cred->gid));
10c28d93 285 break;
12a2b4b2
ON
286 case 'd':
287 err = cn_printf(cn, "%d",
288 __get_dumpable(cprm->mm_flags));
289 break;
10c28d93
AK
290 /* signal that caused the coredump */
291 case 's':
b4176b7c
NI
292 err = cn_printf(cn, "%d",
293 cprm->siginfo->si_signo);
10c28d93
AK
294 break;
295 /* UNIX time of coredump */
296 case 't': {
03927c8a
AB
297 time64_t time;
298
299 time = ktime_get_real_seconds();
300 err = cn_printf(cn, "%lld", time);
10c28d93
AK
301 break;
302 }
303 /* hostname */
923bed03 304 case 'h':
10c28d93 305 down_read(&uts_sem);
923bed03 306 err = cn_esc_printf(cn, "%s",
10c28d93
AK
307 utsname()->nodename);
308 up_read(&uts_sem);
10c28d93 309 break;
f38c85f1 310 /* executable, could be changed by prctl PR_SET_NAME etc */
923bed03
ON
311 case 'e':
312 err = cn_esc_printf(cn, "%s", current->comm);
10c28d93 313 break;
f38c85f1
LW
314 /* file name of executable */
315 case 'f':
316 err = cn_print_exe_file(cn, true);
317 break;
10c28d93 318 case 'E':
f38c85f1 319 err = cn_print_exe_file(cn, false);
10c28d93
AK
320 break;
321 /* core limit size */
322 case 'c':
323 err = cn_printf(cn, "%lu",
324 rlimit(RLIMIT_CORE));
325 break;
326 default:
327 break;
328 }
329 ++pat_ptr;
330 }
331
332 if (err)
333 return err;
334 }
335
888ffc59 336out:
10c28d93
AK
337 /* Backward compatibility with core_uses_pid:
338 *
339 * If core_pattern does not include a %p (as is the default)
340 * and core_uses_pid is set, then .%pid will be appended to
341 * the filename. Do not do this for piped commands. */
342 if (!ispipe && !pid_in_pattern && core_uses_pid) {
343 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
344 if (err)
345 return err;
346 }
10c28d93
AK
347 return ispipe;
348}
349
5fa534c9 350static int zap_process(struct task_struct *start, int exit_code, int flags)
10c28d93
AK
351{
352 struct task_struct *t;
353 int nr = 0;
354
5fa534c9
ON
355 /* ignore all signals except SIGKILL, see prepare_signal() */
356 start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
10c28d93
AK
357 start->signal->group_exit_code = exit_code;
358 start->signal->group_stop_count = 0;
359
d61ba589 360 for_each_thread(start, t) {
10c28d93
AK
361 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
362 if (t != current && t->mm) {
363 sigaddset(&t->pending.signal, SIGKILL);
364 signal_wake_up(t, 1);
365 nr++;
366 }
d61ba589 367 }
10c28d93
AK
368
369 return nr;
370}
371
403bad72
ON
372static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
373 struct core_state *core_state, int exit_code)
10c28d93
AK
374{
375 struct task_struct *g, *p;
376 unsigned long flags;
377 int nr = -EAGAIN;
378
379 spin_lock_irq(&tsk->sighand->siglock);
380 if (!signal_group_exit(tsk->signal)) {
381 mm->core_state = core_state;
6cd8f0ac 382 tsk->signal->group_exit_task = tsk;
5fa534c9 383 nr = zap_process(tsk, exit_code, 0);
403bad72 384 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
10c28d93
AK
385 }
386 spin_unlock_irq(&tsk->sighand->siglock);
387 if (unlikely(nr < 0))
388 return nr;
389
aed8adb7 390 tsk->flags |= PF_DUMPCORE;
10c28d93
AK
391 if (atomic_read(&mm->mm_users) == nr + 1)
392 goto done;
393 /*
394 * We should find and kill all tasks which use this mm, and we should
395 * count them correctly into ->nr_threads. We don't take tasklist
396 * lock, but this is safe wrt:
397 *
398 * fork:
399 * None of sub-threads can fork after zap_process(leader). All
400 * processes which were created before this point should be
401 * visible to zap_threads() because copy_process() adds the new
402 * process to the tail of init_task.tasks list, and lock/unlock
403 * of ->siglock provides a memory barrier.
404 *
405 * do_exit:
c1e8d7c6 406 * The caller holds mm->mmap_lock. This means that the task which
10c28d93
AK
407 * uses this mm can't pass exit_mm(), so it can't exit or clear
408 * its ->mm.
409 *
410 * de_thread:
411 * It does list_replace_rcu(&leader->tasks, &current->tasks),
412 * we must see either old or new leader, this does not matter.
413 * However, it can change p->sighand, so lock_task_sighand(p)
c1e8d7c6 414 * must be used. Since p->mm != NULL and we hold ->mmap_lock
10c28d93
AK
415 * it can't fail.
416 *
417 * Note also that "g" can be the old leader with ->mm == NULL
418 * and already unhashed and thus removed from ->thread_group.
419 * This is OK, __unhash_process()->list_del_rcu() does not
420 * clear the ->next pointer, we will find the new leader via
421 * next_thread().
422 */
423 rcu_read_lock();
424 for_each_process(g) {
425 if (g == tsk->group_leader)
426 continue;
427 if (g->flags & PF_KTHREAD)
428 continue;
d61ba589
ON
429
430 for_each_thread(g, p) {
431 if (unlikely(!p->mm))
432 continue;
433 if (unlikely(p->mm == mm)) {
434 lock_task_sighand(p, &flags);
435 nr += zap_process(p, exit_code,
436 SIGNAL_GROUP_EXIT);
437 unlock_task_sighand(p, &flags);
10c28d93 438 }
d61ba589
ON
439 break;
440 }
10c28d93
AK
441 }
442 rcu_read_unlock();
443done:
444 atomic_set(&core_state->nr_threads, nr);
445 return nr;
446}
447
448static int coredump_wait(int exit_code, struct core_state *core_state)
449{
450 struct task_struct *tsk = current;
451 struct mm_struct *mm = tsk->mm;
452 int core_waiters = -EBUSY;
453
454 init_completion(&core_state->startup);
455 core_state->dumper.task = tsk;
456 core_state->dumper.next = NULL;
457
d8ed45c5 458 if (mmap_write_lock_killable(mm))
4136c26b
MH
459 return -EINTR;
460
10c28d93
AK
461 if (!mm->core_state)
462 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
d8ed45c5 463 mmap_write_unlock(mm);
10c28d93
AK
464
465 if (core_waiters > 0) {
466 struct core_thread *ptr;
467
70d78fe7 468 freezer_do_not_count();
10c28d93 469 wait_for_completion(&core_state->startup);
70d78fe7 470 freezer_count();
10c28d93
AK
471 /*
472 * Wait for all the threads to become inactive, so that
473 * all the thread context (extended register state, like
474 * fpu etc) gets copied to the memory.
475 */
476 ptr = core_state->dumper.next;
477 while (ptr != NULL) {
478 wait_task_inactive(ptr->task, 0);
479 ptr = ptr->next;
480 }
481 }
482
483 return core_waiters;
484}
485
acdedd99 486static void coredump_finish(struct mm_struct *mm, bool core_dumped)
10c28d93
AK
487{
488 struct core_thread *curr, *next;
489 struct task_struct *task;
490
6cd8f0ac 491 spin_lock_irq(&current->sighand->siglock);
acdedd99
ON
492 if (core_dumped && !__fatal_signal_pending(current))
493 current->signal->group_exit_code |= 0x80;
6cd8f0ac
ON
494 current->signal->group_exit_task = NULL;
495 current->signal->flags = SIGNAL_GROUP_EXIT;
496 spin_unlock_irq(&current->sighand->siglock);
497
10c28d93
AK
498 next = mm->core_state->dumper.next;
499 while ((curr = next) != NULL) {
500 next = curr->next;
501 task = curr->task;
502 /*
503 * see exit_mm(), curr->task must not see
504 * ->task == NULL before we read ->next.
505 */
506 smp_mb();
507 curr->task = NULL;
508 wake_up_process(task);
509 }
510
511 mm->core_state = NULL;
512}
513
528f827e
ON
514static bool dump_interrupted(void)
515{
516 /*
517 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
518 * can do try_to_freeze() and check __fatal_signal_pending(),
519 * but then we need to teach dump_write() to restart and clear
520 * TIF_SIGPENDING.
521 */
06af8679 522 return fatal_signal_pending(current) || freezing(current);
528f827e
ON
523}
524
10c28d93
AK
525static void wait_for_dump_helpers(struct file *file)
526{
de32ec4c 527 struct pipe_inode_info *pipe = file->private_data;
10c28d93
AK
528
529 pipe_lock(pipe);
530 pipe->readers++;
531 pipe->writers--;
0ddad21d 532 wake_up_interruptible_sync(&pipe->rd_wait);
dc7ee2aa
ON
533 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
534 pipe_unlock(pipe);
10c28d93 535
dc7ee2aa
ON
536 /*
537 * We actually want wait_event_freezable() but then we need
538 * to clear TIF_SIGPENDING and improve dump_interrupted().
539 */
0ddad21d 540 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
10c28d93 541
dc7ee2aa 542 pipe_lock(pipe);
10c28d93
AK
543 pipe->readers--;
544 pipe->writers++;
545 pipe_unlock(pipe);
10c28d93
AK
546}
547
548/*
549 * umh_pipe_setup
550 * helper function to customize the process used
551 * to collect the core in userspace. Specifically
552 * it sets up a pipe and installs it as fd 0 (stdin)
553 * for the process. Returns 0 on success, or
554 * PTR_ERR on failure.
555 * Note that it also sets the core limit to 1. This
556 * is a special value that we use to trap recursive
557 * core dumps
558 */
559static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
560{
561 struct file *files[2];
562 struct coredump_params *cp = (struct coredump_params *)info->data;
563 int err = create_pipe_files(files, 0);
564 if (err)
565 return err;
566
567 cp->file = files[1];
568
45525b26
AV
569 err = replace_fd(0, files[0], 0);
570 fput(files[0]);
10c28d93
AK
571 /* and disallow core files too */
572 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
573
45525b26 574 return err;
10c28d93
AK
575}
576
ae7795bc 577void do_coredump(const kernel_siginfo_t *siginfo)
10c28d93
AK
578{
579 struct core_state core_state;
580 struct core_name cn;
581 struct mm_struct *mm = current->mm;
582 struct linux_binfmt * binfmt;
583 const struct cred *old_cred;
584 struct cred *cred;
585 int retval = 0;
10c28d93 586 int ispipe;
315c6926
PW
587 size_t *argv = NULL;
588 int argc = 0;
fbb18169
JH
589 /* require nonrelative corefile path and be extra careful */
590 bool need_suid_safe = false;
acdedd99 591 bool core_dumped = false;
10c28d93
AK
592 static atomic_t core_dump_count = ATOMIC_INIT(0);
593 struct coredump_params cprm = {
5ab1c309 594 .siginfo = siginfo,
541880d9 595 .regs = signal_pt_regs(),
10c28d93
AK
596 .limit = rlimit(RLIMIT_CORE),
597 /*
598 * We must use the same mm->flags while dumping core to avoid
599 * inconsistency of bit flags, since this flag is not protected
600 * by any locks.
601 */
602 .mm_flags = mm->flags,
603 };
604
5ab1c309 605 audit_core_dumps(siginfo->si_signo);
10c28d93
AK
606
607 binfmt = mm->binfmt;
608 if (!binfmt || !binfmt->core_dump)
609 goto fail;
610 if (!__get_dumpable(cprm.mm_flags))
611 goto fail;
612
613 cred = prepare_creds();
614 if (!cred)
615 goto fail;
616 /*
617 * We cannot trust fsuid as being the "true" uid of the process
618 * nor do we know its entire history. We only know it was tainted
619 * so we dump it as root in mode 2, and only into a controlled
620 * environment (pipe handler or fully qualified path).
621 */
e579d2c2 622 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
10c28d93 623 /* Setuid core dump mode */
10c28d93 624 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
fbb18169 625 need_suid_safe = true;
10c28d93
AK
626 }
627
5ab1c309 628 retval = coredump_wait(siginfo->si_signo, &core_state);
10c28d93
AK
629 if (retval < 0)
630 goto fail_creds;
631
632 old_cred = override_creds(cred);
633
315c6926 634 ispipe = format_corename(&cn, &cprm, &argv, &argc);
10c28d93 635
fb96c475 636 if (ispipe) {
315c6926 637 int argi;
10c28d93
AK
638 int dump_count;
639 char **helper_argv;
907ed132 640 struct subprocess_info *sub_info;
10c28d93
AK
641
642 if (ispipe < 0) {
643 printk(KERN_WARNING "format_corename failed\n");
644 printk(KERN_WARNING "Aborting core\n");
e7fd1549 645 goto fail_unlock;
10c28d93
AK
646 }
647
648 if (cprm.limit == 1) {
649 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
650 *
651 * Normally core limits are irrelevant to pipes, since
652 * we're not writing to the file system, but we use
fcbc32bc 653 * cprm.limit of 1 here as a special value, this is a
10c28d93
AK
654 * consistent way to catch recursive crashes.
655 * We can still crash if the core_pattern binary sets
656 * RLIM_CORE = !1, but it runs as root, and can do
657 * lots of stupid things.
658 *
659 * Note that we use task_tgid_vnr here to grab the pid
660 * of the process group leader. That way we get the
661 * right pid if a thread in a multi-threaded
662 * core_pattern process dies.
663 */
664 printk(KERN_WARNING
665 "Process %d(%s) has RLIMIT_CORE set to 1\n",
666 task_tgid_vnr(current), current->comm);
667 printk(KERN_WARNING "Aborting core\n");
668 goto fail_unlock;
669 }
670 cprm.limit = RLIM_INFINITY;
671
672 dump_count = atomic_inc_return(&core_dump_count);
673 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
674 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
675 task_tgid_vnr(current), current->comm);
676 printk(KERN_WARNING "Skipping core dump\n");
677 goto fail_dropcount;
678 }
679
315c6926
PW
680 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
681 GFP_KERNEL);
10c28d93
AK
682 if (!helper_argv) {
683 printk(KERN_WARNING "%s failed to allocate memory\n",
684 __func__);
685 goto fail_dropcount;
686 }
315c6926
PW
687 for (argi = 0; argi < argc; argi++)
688 helper_argv[argi] = cn.corename + argv[argi];
689 helper_argv[argi] = NULL;
10c28d93 690
907ed132
LDM
691 retval = -ENOMEM;
692 sub_info = call_usermodehelper_setup(helper_argv[0],
693 helper_argv, NULL, GFP_KERNEL,
694 umh_pipe_setup, NULL, &cprm);
695 if (sub_info)
696 retval = call_usermodehelper_exec(sub_info,
697 UMH_WAIT_EXEC);
698
315c6926 699 kfree(helper_argv);
10c28d93 700 if (retval) {
888ffc59 701 printk(KERN_INFO "Core dump to |%s pipe failed\n",
10c28d93
AK
702 cn.corename);
703 goto close_fail;
fb96c475 704 }
10c28d93 705 } else {
643fe55a 706 struct user_namespace *mnt_userns;
10c28d93 707 struct inode *inode;
378c6520
JH
708 int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
709 O_LARGEFILE | O_EXCL;
10c28d93
AK
710
711 if (cprm.limit < binfmt->min_coredump)
712 goto fail_unlock;
713
fbb18169 714 if (need_suid_safe && cn.corename[0] != '/') {
10c28d93
AK
715 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
716 "to fully qualified path!\n",
717 task_tgid_vnr(current), current->comm);
718 printk(KERN_WARNING "Skipping core dump\n");
719 goto fail_unlock;
720 }
721
fbb18169
JH
722 /*
723 * Unlink the file if it exists unless this is a SUID
724 * binary - in that case, we're running around with root
725 * privs and don't want to unlink another user's coredump.
726 */
727 if (!need_suid_safe) {
fbb18169
JH
728 /*
729 * If it doesn't exist, that's fine. If there's some
730 * other problem, we'll catch it at the filp_open().
731 */
96271654 732 do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
fbb18169
JH
733 }
734
735 /*
736 * There is a race between unlinking and creating the
737 * file, but if that causes an EEXIST here, that's
738 * fine - another process raced with us while creating
739 * the corefile, and the other process won. To userspace,
740 * what matters is that at least one of the two processes
741 * writes its coredump successfully, not which one.
742 */
378c6520
JH
743 if (need_suid_safe) {
744 /*
745 * Using user namespaces, normal user tasks can change
746 * their current->fs->root to point to arbitrary
747 * directories. Since the intention of the "only dump
748 * with a fully qualified path" rule is to control where
749 * coredumps may be placed using root privileges,
750 * current->fs->root must not be used. Instead, use the
751 * root directory of init_task.
752 */
753 struct path root;
754
755 task_lock(&init_task);
756 get_fs_root(init_task.fs, &root);
757 task_unlock(&init_task);
ffb37ca3
AV
758 cprm.file = file_open_root(&root, cn.corename,
759 open_flags, 0600);
378c6520
JH
760 path_put(&root);
761 } else {
762 cprm.file = filp_open(cn.corename, open_flags, 0600);
763 }
10c28d93
AK
764 if (IS_ERR(cprm.file))
765 goto fail_unlock;
766
496ad9aa 767 inode = file_inode(cprm.file);
10c28d93
AK
768 if (inode->i_nlink > 1)
769 goto close_fail;
770 if (d_unhashed(cprm.file->f_path.dentry))
771 goto close_fail;
772 /*
773 * AK: actually i see no reason to not allow this for named
774 * pipes etc, but keep the previous behaviour for now.
775 */
776 if (!S_ISREG(inode->i_mode))
777 goto close_fail;
778 /*
40f705a7
JH
779 * Don't dump core if the filesystem changed owner or mode
780 * of the file during file creation. This is an issue when
781 * a process dumps core while its cwd is e.g. on a vfat
782 * filesystem.
10c28d93 783 */
643fe55a
CB
784 mnt_userns = file_mnt_user_ns(cprm.file);
785 if (!uid_eq(i_uid_into_mnt(mnt_userns, inode), current_fsuid()))
10c28d93 786 goto close_fail;
40f705a7
JH
787 if ((inode->i_mode & 0677) != 0600)
788 goto close_fail;
86cc0584 789 if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
10c28d93 790 goto close_fail;
643fe55a
CB
791 if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
792 0, 0, cprm.file))
10c28d93
AK
793 goto close_fail;
794 }
795
796 /* get us an unshared descriptor table; almost always a no-op */
c39ab6de 797 /* The cell spufs coredump code reads the file descriptor tables */
1f702603 798 retval = unshare_files();
10c28d93
AK
799 if (retval)
800 goto close_fail;
e86d35c3 801 if (!dump_interrupted()) {
3740d93e
LC
802 /*
803 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
804 * have this set to NULL.
805 */
806 if (!cprm.file) {
807 pr_info("Core dump to |%s disabled\n", cn.corename);
808 goto close_fail;
809 }
e86d35c3
AV
810 file_start_write(cprm.file);
811 core_dumped = binfmt->core_dump(&cprm);
d0f1088b
AV
812 /*
813 * Ensures that file size is big enough to contain the current
814 * file postion. This prevents gdb from complaining about
815 * a truncated file if the last "write" to the file was
816 * dump_skip.
817 */
818 if (cprm.to_skip) {
819 cprm.to_skip--;
820 dump_emit(&cprm, "", 1);
821 }
e86d35c3
AV
822 file_end_write(cprm.file);
823 }
10c28d93
AK
824 if (ispipe && core_pipe_limit)
825 wait_for_dump_helpers(cprm.file);
826close_fail:
827 if (cprm.file)
828 filp_close(cprm.file, NULL);
829fail_dropcount:
830 if (ispipe)
831 atomic_dec(&core_dump_count);
832fail_unlock:
315c6926 833 kfree(argv);
10c28d93 834 kfree(cn.corename);
acdedd99 835 coredump_finish(mm, core_dumped);
10c28d93
AK
836 revert_creds(old_cred);
837fail_creds:
838 put_cred(cred);
839fail:
840 return;
841}
842
843/*
844 * Core dumping helper functions. These are the only things you should
845 * do on a core-file: use only these functions to write out all the
846 * necessary info.
847 */
d0f1088b 848static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
ecc8c772
AV
849{
850 struct file *file = cprm->file;
2507a4fb
AV
851 loff_t pos = file->f_pos;
852 ssize_t n;
2c4cb043 853 if (cprm->written + nr > cprm->limit)
ecc8c772 854 return 0;
df0c09c0
JH
855
856
857 if (dump_interrupted())
858 return 0;
859 n = __kernel_write(file, addr, nr, &pos);
860 if (n != nr)
861 return 0;
862 file->f_pos = pos;
863 cprm->written += n;
864 cprm->pos += n;
865
ecc8c772
AV
866 return 1;
867}
ecc8c772 868
d0f1088b 869static int __dump_skip(struct coredump_params *cprm, size_t nr)
10c28d93 870{
9b56d543
AV
871 static char zeroes[PAGE_SIZE];
872 struct file *file = cprm->file;
10c28d93 873 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
528f827e 874 if (dump_interrupted() ||
9b56d543 875 file->f_op->llseek(file, nr, SEEK_CUR) < 0)
10c28d93 876 return 0;
1607f09c 877 cprm->pos += nr;
9b56d543 878 return 1;
10c28d93 879 } else {
9b56d543 880 while (nr > PAGE_SIZE) {
d0f1088b 881 if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
9b56d543
AV
882 return 0;
883 nr -= PAGE_SIZE;
10c28d93 884 }
d0f1088b 885 return __dump_emit(cprm, zeroes, nr);
10c28d93 886 }
10c28d93 887}
d0f1088b
AV
888
889int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
890{
891 if (cprm->to_skip) {
892 if (!__dump_skip(cprm, cprm->to_skip))
893 return 0;
894 cprm->to_skip = 0;
895 }
896 return __dump_emit(cprm, addr, nr);
897}
898EXPORT_SYMBOL(dump_emit);
899
900void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
901{
902 cprm->to_skip = pos - cprm->pos;
903}
904EXPORT_SYMBOL(dump_skip_to);
905
906void dump_skip(struct coredump_params *cprm, size_t nr)
907{
908 cprm->to_skip += nr;
909}
9b56d543 910EXPORT_SYMBOL(dump_skip);
22a8cb82 911
afc63a97
JH
912#ifdef CONFIG_ELF_CORE
913int dump_user_range(struct coredump_params *cprm, unsigned long start,
914 unsigned long len)
915{
916 unsigned long addr;
917
918 for (addr = start; addr < start + len; addr += PAGE_SIZE) {
919 struct page *page;
920 int stop;
921
922 /*
923 * To avoid having to allocate page tables for virtual address
924 * ranges that have never been used yet, and also to make it
925 * easy to generate sparse core files, use a helper that returns
926 * NULL when encountering an empty page table entry that would
927 * otherwise have been filled with the zero page.
928 */
929 page = get_dump_page(addr);
930 if (page) {
3159ed57 931 void *kaddr = kmap_local_page(page);
afc63a97
JH
932
933 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
3159ed57 934 kunmap_local(kaddr);
afc63a97 935 put_page(page);
d0f1088b
AV
936 if (stop)
937 return 0;
afc63a97 938 } else {
d0f1088b 939 dump_skip(cprm, PAGE_SIZE);
afc63a97 940 }
afc63a97
JH
941 }
942 return 1;
943}
944#endif
945
22a8cb82
AV
946int dump_align(struct coredump_params *cprm, int align)
947{
d0f1088b 948 unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1);
22a8cb82 949 if (align & (align - 1))
db51242d 950 return 0;
d0f1088b
AV
951 if (mod)
952 cprm->to_skip += align - mod;
953 return 1;
22a8cb82
AV
954}
955EXPORT_SYMBOL(dump_align);
4d22c75d 956
429a22e7
JH
957/*
958 * The purpose of always_dump_vma() is to make sure that special kernel mappings
959 * that are useful for post-mortem analysis are included in every core dump.
960 * In that way we ensure that the core dump is fully interpretable later
961 * without matching up the same kernel and hardware config to see what PC values
962 * meant. These special mappings include - vDSO, vsyscall, and other
963 * architecture specific mappings
964 */
965static bool always_dump_vma(struct vm_area_struct *vma)
966{
967 /* Any vsyscall mappings? */
968 if (vma == get_gate_vma(vma->vm_mm))
969 return true;
970
971 /*
972 * Assume that all vmas with a .name op should always be dumped.
973 * If this changes, a new vm_ops field can easily be added.
974 */
975 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
976 return true;
977
978 /*
979 * arch_vma_name() returns non-NULL for special architecture mappings,
980 * such as vDSO sections.
981 */
982 if (arch_vma_name(vma))
983 return true;
984
985 return false;
986}
987
988/*
989 * Decide how much of @vma's contents should be included in a core dump.
990 */
a07279c9
JH
991static unsigned long vma_dump_size(struct vm_area_struct *vma,
992 unsigned long mm_flags)
429a22e7
JH
993{
994#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
995
996 /* always dump the vdso and vsyscall sections */
997 if (always_dump_vma(vma))
998 goto whole;
999
1000 if (vma->vm_flags & VM_DONTDUMP)
1001 return 0;
1002
1003 /* support for DAX */
1004 if (vma_is_dax(vma)) {
1005 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1006 goto whole;
1007 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1008 goto whole;
1009 return 0;
1010 }
1011
1012 /* Hugetlb memory check */
1013 if (is_vm_hugetlb_page(vma)) {
1014 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1015 goto whole;
1016 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1017 goto whole;
1018 return 0;
1019 }
1020
1021 /* Do not dump I/O mapped devices or special mappings */
1022 if (vma->vm_flags & VM_IO)
1023 return 0;
1024
1025 /* By default, dump shared memory if mapped from an anonymous file. */
1026 if (vma->vm_flags & VM_SHARED) {
1027 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1028 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1029 goto whole;
1030 return 0;
1031 }
1032
1033 /* Dump segments that have been written to. */
1034 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1035 goto whole;
1036 if (vma->vm_file == NULL)
1037 return 0;
1038
1039 if (FILTER(MAPPED_PRIVATE))
1040 goto whole;
1041
1042 /*
1043 * If this is the beginning of an executable file mapping,
1044 * dump the first page to aid in determining what was mapped here.
1045 */
1046 if (FILTER(ELF_HEADERS) &&
1047 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ) &&
1048 (READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1049 return PAGE_SIZE;
1050
1051#undef FILTER
1052
1053 return 0;
1054
1055whole:
1056 return vma->vm_end - vma->vm_start;
1057}
a07279c9
JH
1058
1059static struct vm_area_struct *first_vma(struct task_struct *tsk,
1060 struct vm_area_struct *gate_vma)
1061{
1062 struct vm_area_struct *ret = tsk->mm->mmap;
1063
1064 if (ret)
1065 return ret;
1066 return gate_vma;
1067}
1068
1069/*
1070 * Helper function for iterating across a vma list. It ensures that the caller
1071 * will visit `gate_vma' prior to terminating the search.
1072 */
1073static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1074 struct vm_area_struct *gate_vma)
1075{
1076 struct vm_area_struct *ret;
1077
1078 ret = this_vma->vm_next;
1079 if (ret)
1080 return ret;
1081 if (this_vma == gate_vma)
1082 return NULL;
1083 return gate_vma;
1084}
1085
1086/*
1087 * Under the mmap_lock, take a snapshot of relevant information about the task's
1088 * VMAs.
1089 */
1090int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
1091 struct core_vma_metadata **vma_meta,
1092 size_t *vma_data_size_ptr)
1093{
1094 struct vm_area_struct *vma, *gate_vma;
1095 struct mm_struct *mm = current->mm;
1096 int i;
1097 size_t vma_data_size = 0;
1098
1099 /*
1100 * Once the stack expansion code is fixed to not change VMA bounds
1101 * under mmap_lock in read mode, this can be changed to take the
1102 * mmap_lock in read mode.
1103 */
1104 if (mmap_write_lock_killable(mm))
1105 return -EINTR;
1106
1107 gate_vma = get_gate_vma(mm);
1108 *vma_count = mm->map_count + (gate_vma ? 1 : 0);
1109
1110 *vma_meta = kvmalloc_array(*vma_count, sizeof(**vma_meta), GFP_KERNEL);
1111 if (!*vma_meta) {
1112 mmap_write_unlock(mm);
1113 return -ENOMEM;
1114 }
1115
1116 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
1117 vma = next_vma(vma, gate_vma), i++) {
1118 struct core_vma_metadata *m = (*vma_meta) + i;
1119
1120 m->start = vma->vm_start;
1121 m->end = vma->vm_end;
1122 m->flags = vma->vm_flags;
1123 m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1124
1125 vma_data_size += m->dump_size;
1126 }
1127
1128 mmap_write_unlock(mm);
1129
1130 if (WARN_ON(i != *vma_count))
1131 return -EFAULT;
1132
1133 *vma_data_size_ptr = vma_data_size;
1134 return 0;
1135}