generic kernel_execve()
[linux-2.6-block.git] / fs / exec.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * #!-checking implemented by tytso.
9 */
10/*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
1da177e4
LT
25#include <linux/slab.h>
26#include <linux/file.h>
9f3acc31 27#include <linux/fdtable.h>
ba92a43d 28#include <linux/mm.h>
1da177e4
LT
29#include <linux/stat.h>
30#include <linux/fcntl.h>
ba92a43d 31#include <linux/swap.h>
74aadce9 32#include <linux/string.h>
1da177e4 33#include <linux/init.h>
ca5b172b 34#include <linux/pagemap.h>
cdd6c482 35#include <linux/perf_event.h>
1da177e4
LT
36#include <linux/highmem.h>
37#include <linux/spinlock.h>
38#include <linux/key.h>
39#include <linux/personality.h>
40#include <linux/binfmts.h>
1da177e4 41#include <linux/utsname.h>
84d73786 42#include <linux/pid_namespace.h>
1da177e4
LT
43#include <linux/module.h>
44#include <linux/namei.h>
1da177e4
LT
45#include <linux/mount.h>
46#include <linux/security.h>
47#include <linux/syscalls.h>
8f0ab514 48#include <linux/tsacct_kern.h>
9f46080c 49#include <linux/cn_proc.h>
473ae30b 50#include <linux/audit.h>
6341c393 51#include <linux/tracehook.h>
5f4123be 52#include <linux/kmod.h>
6110e3ab 53#include <linux/fsnotify.h>
5ad4e53b 54#include <linux/fs_struct.h>
61be228a 55#include <linux/pipe_fs_i.h>
3d5992d2 56#include <linux/oom.h>
0e028465 57#include <linux/compat.h>
1da177e4
LT
58
59#include <asm/uaccess.h>
60#include <asm/mmu_context.h>
b6a2fea3 61#include <asm/tlb.h>
43d2b113
KH
62
63#include <trace/events/task.h>
a6f76f23 64#include "internal.h"
1da177e4 65
4ff16c25
DS
66#include <trace/events/sched.h>
67
1da177e4 68int core_uses_pid;
71ce92f3 69char core_pattern[CORENAME_MAX_SIZE] = "core";
a293980c 70unsigned int core_pipe_limit;
d6e71144
AC
71int suid_dumpable = 0;
72
1b0d300b
XF
73struct core_name {
74 char *corename;
75 int used, size;
76};
77static atomic_t call_count = ATOMIC_INIT(1);
78
1da177e4
LT
79/* The maximal length of core_pattern is also specified in sysctl.c */
80
e4dc1b14 81static LIST_HEAD(formats);
1da177e4
LT
82static DEFINE_RWLOCK(binfmt_lock);
83
8fc3dc5a 84void __register_binfmt(struct linux_binfmt * fmt, int insert)
1da177e4 85{
8fc3dc5a 86 BUG_ON(!fmt);
1da177e4 87 write_lock(&binfmt_lock);
74641f58
IK
88 insert ? list_add(&fmt->lh, &formats) :
89 list_add_tail(&fmt->lh, &formats);
1da177e4 90 write_unlock(&binfmt_lock);
1da177e4
LT
91}
92
74641f58 93EXPORT_SYMBOL(__register_binfmt);
1da177e4 94
f6b450d4 95void unregister_binfmt(struct linux_binfmt * fmt)
1da177e4 96{
1da177e4 97 write_lock(&binfmt_lock);
e4dc1b14 98 list_del(&fmt->lh);
1da177e4 99 write_unlock(&binfmt_lock);
1da177e4
LT
100}
101
102EXPORT_SYMBOL(unregister_binfmt);
103
104static inline void put_binfmt(struct linux_binfmt * fmt)
105{
106 module_put(fmt->module);
107}
108
109/*
110 * Note that a shared library must be both readable and executable due to
111 * security reasons.
112 *
113 * Also note that we take the address to load from from the file itself.
114 */
1e7bfb21 115SYSCALL_DEFINE1(uselib, const char __user *, library)
1da177e4 116{
964bd183 117 struct file *file;
964bd183
AV
118 char *tmp = getname(library);
119 int error = PTR_ERR(tmp);
47c805dc
AV
120 static const struct open_flags uselib_flags = {
121 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
122 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
123 .intent = LOOKUP_OPEN
124 };
964bd183 125
6e8341a1
AV
126 if (IS_ERR(tmp))
127 goto out;
128
47c805dc 129 file = do_filp_open(AT_FDCWD, tmp, &uselib_flags, LOOKUP_FOLLOW);
6e8341a1
AV
130 putname(tmp);
131 error = PTR_ERR(file);
132 if (IS_ERR(file))
1da177e4
LT
133 goto out;
134
135 error = -EINVAL;
6e8341a1 136 if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
1da177e4
LT
137 goto exit;
138
30524472 139 error = -EACCES;
6e8341a1 140 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1da177e4
LT
141 goto exit;
142
2a12a9d7 143 fsnotify_open(file);
6110e3ab 144
1da177e4
LT
145 error = -ENOEXEC;
146 if(file->f_op) {
147 struct linux_binfmt * fmt;
148
149 read_lock(&binfmt_lock);
e4dc1b14 150 list_for_each_entry(fmt, &formats, lh) {
1da177e4
LT
151 if (!fmt->load_shlib)
152 continue;
153 if (!try_module_get(fmt->module))
154 continue;
155 read_unlock(&binfmt_lock);
156 error = fmt->load_shlib(file);
157 read_lock(&binfmt_lock);
158 put_binfmt(fmt);
159 if (error != -ENOEXEC)
160 break;
161 }
162 read_unlock(&binfmt_lock);
163 }
6e8341a1 164exit:
1da177e4
LT
165 fput(file);
166out:
167 return error;
1da177e4
LT
168}
169
b6a2fea3 170#ifdef CONFIG_MMU
ae6b585e
ON
171/*
172 * The nascent bprm->mm is not visible until exec_mmap() but it can
173 * use a lot of memory, account these pages in current->mm temporary
174 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
175 * change the counter back via acct_arg_size(0).
176 */
0e028465 177static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
3c77f845
ON
178{
179 struct mm_struct *mm = current->mm;
180 long diff = (long)(pages - bprm->vma_pages);
181
182 if (!mm || !diff)
183 return;
184
185 bprm->vma_pages = pages;
3c77f845 186 add_mm_counter(mm, MM_ANONPAGES, diff);
3c77f845
ON
187}
188
0e028465 189static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
b6a2fea3
OW
190 int write)
191{
192 struct page *page;
193 int ret;
194
195#ifdef CONFIG_STACK_GROWSUP
196 if (write) {
d05f3169 197 ret = expand_downwards(bprm->vma, pos);
b6a2fea3
OW
198 if (ret < 0)
199 return NULL;
200 }
201#endif
202 ret = get_user_pages(current, bprm->mm, pos,
203 1, write, 1, &page, NULL);
204 if (ret <= 0)
205 return NULL;
206
207 if (write) {
b6a2fea3 208 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
a64e715f
LT
209 struct rlimit *rlim;
210
3c77f845
ON
211 acct_arg_size(bprm, size / PAGE_SIZE);
212
a64e715f
LT
213 /*
214 * We've historically supported up to 32 pages (ARG_MAX)
215 * of argument strings even with small stacks
216 */
217 if (size <= ARG_MAX)
218 return page;
b6a2fea3
OW
219
220 /*
221 * Limit to 1/4-th the stack size for the argv+env strings.
222 * This ensures that:
223 * - the remaining binfmt code will not run out of stack space,
224 * - the program will have a reasonable amount of stack left
225 * to work from.
226 */
a64e715f 227 rlim = current->signal->rlim;
d554ed89 228 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
b6a2fea3
OW
229 put_page(page);
230 return NULL;
231 }
232 }
233
234 return page;
235}
236
237static void put_arg_page(struct page *page)
238{
239 put_page(page);
240}
241
242static void free_arg_page(struct linux_binprm *bprm, int i)
243{
244}
245
246static void free_arg_pages(struct linux_binprm *bprm)
247{
248}
249
250static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
251 struct page *page)
252{
253 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
254}
255
256static int __bprm_mm_init(struct linux_binprm *bprm)
257{
eaccbfa5 258 int err;
b6a2fea3
OW
259 struct vm_area_struct *vma = NULL;
260 struct mm_struct *mm = bprm->mm;
261
262 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
263 if (!vma)
eaccbfa5 264 return -ENOMEM;
b6a2fea3
OW
265
266 down_write(&mm->mmap_sem);
267 vma->vm_mm = mm;
268
269 /*
270 * Place the stack at the largest stack address the architecture
271 * supports. Later, we'll move this to an appropriate place. We don't
272 * use STACK_TOP because that can depend on attributes which aren't
273 * configured yet.
274 */
aacb3d17 275 BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
b6a2fea3
OW
276 vma->vm_end = STACK_TOP_MAX;
277 vma->vm_start = vma->vm_end - PAGE_SIZE;
a8bef8ff 278 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
3ed75eb8 279 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5beb4930 280 INIT_LIST_HEAD(&vma->anon_vma_chain);
462e635e 281
b6a2fea3 282 err = insert_vm_struct(mm, vma);
eaccbfa5 283 if (err)
b6a2fea3 284 goto err;
b6a2fea3
OW
285
286 mm->stack_vm = mm->total_vm = 1;
287 up_write(&mm->mmap_sem);
b6a2fea3 288 bprm->p = vma->vm_end - sizeof(void *);
b6a2fea3 289 return 0;
b6a2fea3 290err:
eaccbfa5
LFC
291 up_write(&mm->mmap_sem);
292 bprm->vma = NULL;
293 kmem_cache_free(vm_area_cachep, vma);
b6a2fea3
OW
294 return err;
295}
296
297static bool valid_arg_len(struct linux_binprm *bprm, long len)
298{
299 return len <= MAX_ARG_STRLEN;
300}
301
302#else
303
0e028465 304static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
3c77f845
ON
305{
306}
307
0e028465 308static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
b6a2fea3
OW
309 int write)
310{
311 struct page *page;
312
313 page = bprm->page[pos / PAGE_SIZE];
314 if (!page && write) {
315 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
316 if (!page)
317 return NULL;
318 bprm->page[pos / PAGE_SIZE] = page;
319 }
320
321 return page;
322}
323
324static void put_arg_page(struct page *page)
325{
326}
327
328static void free_arg_page(struct linux_binprm *bprm, int i)
329{
330 if (bprm->page[i]) {
331 __free_page(bprm->page[i]);
332 bprm->page[i] = NULL;
333 }
334}
335
336static void free_arg_pages(struct linux_binprm *bprm)
337{
338 int i;
339
340 for (i = 0; i < MAX_ARG_PAGES; i++)
341 free_arg_page(bprm, i);
342}
343
344static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
345 struct page *page)
346{
347}
348
349static int __bprm_mm_init(struct linux_binprm *bprm)
350{
351 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
352 return 0;
353}
354
355static bool valid_arg_len(struct linux_binprm *bprm, long len)
356{
357 return len <= bprm->p;
358}
359
360#endif /* CONFIG_MMU */
361
362/*
363 * Create a new mm_struct and populate it with a temporary stack
364 * vm_area_struct. We don't have enough context at this point to set the stack
365 * flags, permissions, and offset, so we use temporary values. We'll update
366 * them later in setup_arg_pages().
367 */
368int bprm_mm_init(struct linux_binprm *bprm)
369{
370 int err;
371 struct mm_struct *mm = NULL;
372
373 bprm->mm = mm = mm_alloc();
374 err = -ENOMEM;
375 if (!mm)
376 goto err;
377
378 err = init_new_context(current, mm);
379 if (err)
380 goto err;
381
382 err = __bprm_mm_init(bprm);
383 if (err)
384 goto err;
385
386 return 0;
387
388err:
389 if (mm) {
390 bprm->mm = NULL;
391 mmdrop(mm);
392 }
393
394 return err;
395}
396
ba2d0162 397struct user_arg_ptr {
0e028465
ON
398#ifdef CONFIG_COMPAT
399 bool is_compat;
400#endif
401 union {
402 const char __user *const __user *native;
403#ifdef CONFIG_COMPAT
404 compat_uptr_t __user *compat;
405#endif
406 } ptr;
ba2d0162
ON
407};
408
409static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
1d1dbf81 410{
0e028465
ON
411 const char __user *native;
412
413#ifdef CONFIG_COMPAT
414 if (unlikely(argv.is_compat)) {
415 compat_uptr_t compat;
416
417 if (get_user(compat, argv.ptr.compat + nr))
418 return ERR_PTR(-EFAULT);
1d1dbf81 419
0e028465
ON
420 return compat_ptr(compat);
421 }
422#endif
423
424 if (get_user(native, argv.ptr.native + nr))
1d1dbf81
ON
425 return ERR_PTR(-EFAULT);
426
0e028465 427 return native;
1d1dbf81
ON
428}
429
1da177e4
LT
430/*
431 * count() counts the number of strings in array ARGV.
432 */
ba2d0162 433static int count(struct user_arg_ptr argv, int max)
1da177e4
LT
434{
435 int i = 0;
436
0e028465 437 if (argv.ptr.native != NULL) {
1da177e4 438 for (;;) {
1d1dbf81 439 const char __user *p = get_user_arg_ptr(argv, i);
1da177e4 440
1da177e4
LT
441 if (!p)
442 break;
1d1dbf81
ON
443
444 if (IS_ERR(p))
445 return -EFAULT;
446
362e6663 447 if (i++ >= max)
1da177e4 448 return -E2BIG;
9aea5a65
RM
449
450 if (fatal_signal_pending(current))
451 return -ERESTARTNOHAND;
1da177e4
LT
452 cond_resched();
453 }
454 }
455 return i;
456}
457
458/*
b6a2fea3
OW
459 * 'copy_strings()' copies argument/environment strings from the old
460 * processes's memory to the new process's stack. The call to get_user_pages()
461 * ensures the destination page is created and not swapped out.
1da177e4 462 */
ba2d0162 463static int copy_strings(int argc, struct user_arg_ptr argv,
75c96f85 464 struct linux_binprm *bprm)
1da177e4
LT
465{
466 struct page *kmapped_page = NULL;
467 char *kaddr = NULL;
b6a2fea3 468 unsigned long kpos = 0;
1da177e4
LT
469 int ret;
470
471 while (argc-- > 0) {
d7627467 472 const char __user *str;
1da177e4
LT
473 int len;
474 unsigned long pos;
475
1d1dbf81
ON
476 ret = -EFAULT;
477 str = get_user_arg_ptr(argv, argc);
478 if (IS_ERR(str))
1da177e4 479 goto out;
1da177e4 480
1d1dbf81
ON
481 len = strnlen_user(str, MAX_ARG_STRLEN);
482 if (!len)
483 goto out;
484
485 ret = -E2BIG;
486 if (!valid_arg_len(bprm, len))
1da177e4 487 goto out;
1da177e4 488
b6a2fea3 489 /* We're going to work our way backwords. */
1da177e4 490 pos = bprm->p;
b6a2fea3
OW
491 str += len;
492 bprm->p -= len;
1da177e4
LT
493
494 while (len > 0) {
1da177e4 495 int offset, bytes_to_copy;
1da177e4 496
9aea5a65
RM
497 if (fatal_signal_pending(current)) {
498 ret = -ERESTARTNOHAND;
499 goto out;
500 }
7993bc1f
RM
501 cond_resched();
502
1da177e4 503 offset = pos % PAGE_SIZE;
b6a2fea3
OW
504 if (offset == 0)
505 offset = PAGE_SIZE;
506
507 bytes_to_copy = offset;
508 if (bytes_to_copy > len)
509 bytes_to_copy = len;
510
511 offset -= bytes_to_copy;
512 pos -= bytes_to_copy;
513 str -= bytes_to_copy;
514 len -= bytes_to_copy;
515
516 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
517 struct page *page;
518
519 page = get_arg_page(bprm, pos, 1);
1da177e4 520 if (!page) {
b6a2fea3 521 ret = -E2BIG;
1da177e4
LT
522 goto out;
523 }
1da177e4 524
b6a2fea3
OW
525 if (kmapped_page) {
526 flush_kernel_dcache_page(kmapped_page);
1da177e4 527 kunmap(kmapped_page);
b6a2fea3
OW
528 put_arg_page(kmapped_page);
529 }
1da177e4
LT
530 kmapped_page = page;
531 kaddr = kmap(kmapped_page);
b6a2fea3
OW
532 kpos = pos & PAGE_MASK;
533 flush_arg_page(bprm, kpos, kmapped_page);
1da177e4 534 }
b6a2fea3 535 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
1da177e4
LT
536 ret = -EFAULT;
537 goto out;
538 }
1da177e4
LT
539 }
540 }
541 ret = 0;
542out:
b6a2fea3
OW
543 if (kmapped_page) {
544 flush_kernel_dcache_page(kmapped_page);
1da177e4 545 kunmap(kmapped_page);
b6a2fea3
OW
546 put_arg_page(kmapped_page);
547 }
1da177e4
LT
548 return ret;
549}
550
551/*
552 * Like copy_strings, but get argv and its values from kernel memory.
553 */
ba2d0162 554int copy_strings_kernel(int argc, const char *const *__argv,
d7627467 555 struct linux_binprm *bprm)
1da177e4
LT
556{
557 int r;
558 mm_segment_t oldfs = get_fs();
ba2d0162 559 struct user_arg_ptr argv = {
0e028465 560 .ptr.native = (const char __user *const __user *)__argv,
ba2d0162
ON
561 };
562
1da177e4 563 set_fs(KERNEL_DS);
ba2d0162 564 r = copy_strings(argc, argv, bprm);
1da177e4 565 set_fs(oldfs);
ba2d0162 566
1da177e4
LT
567 return r;
568}
1da177e4
LT
569EXPORT_SYMBOL(copy_strings_kernel);
570
571#ifdef CONFIG_MMU
b6a2fea3 572
1da177e4 573/*
b6a2fea3
OW
574 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
575 * the binfmt code determines where the new stack should reside, we shift it to
576 * its final location. The process proceeds as follows:
1da177e4 577 *
b6a2fea3
OW
578 * 1) Use shift to calculate the new vma endpoints.
579 * 2) Extend vma to cover both the old and new ranges. This ensures the
580 * arguments passed to subsequent functions are consistent.
581 * 3) Move vma's page tables to the new range.
582 * 4) Free up any cleared pgd range.
583 * 5) Shrink the vma to cover only the new range.
1da177e4 584 */
b6a2fea3 585static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
1da177e4
LT
586{
587 struct mm_struct *mm = vma->vm_mm;
b6a2fea3
OW
588 unsigned long old_start = vma->vm_start;
589 unsigned long old_end = vma->vm_end;
590 unsigned long length = old_end - old_start;
591 unsigned long new_start = old_start - shift;
592 unsigned long new_end = old_end - shift;
d16dfc55 593 struct mmu_gather tlb;
1da177e4 594
b6a2fea3 595 BUG_ON(new_start > new_end);
1da177e4 596
b6a2fea3
OW
597 /*
598 * ensure there are no vmas between where we want to go
599 * and where we are
600 */
601 if (vma != find_vma(mm, new_start))
602 return -EFAULT;
603
604 /*
605 * cover the whole range: [new_start, old_end)
606 */
5beb4930
RR
607 if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
608 return -ENOMEM;
b6a2fea3
OW
609
610 /*
611 * move the page tables downwards, on failure we rely on
612 * process cleanup to remove whatever mess we made.
613 */
614 if (length != move_page_tables(vma, old_start,
615 vma, new_start, length))
616 return -ENOMEM;
617
618 lru_add_drain();
d16dfc55 619 tlb_gather_mmu(&tlb, mm, 0);
b6a2fea3
OW
620 if (new_end > old_start) {
621 /*
622 * when the old and new regions overlap clear from new_end.
623 */
d16dfc55 624 free_pgd_range(&tlb, new_end, old_end, new_end,
b6a2fea3
OW
625 vma->vm_next ? vma->vm_next->vm_start : 0);
626 } else {
627 /*
628 * otherwise, clean from old_start; this is done to not touch
629 * the address space in [new_end, old_start) some architectures
630 * have constraints on va-space that make this illegal (IA64) -
631 * for the others its just a little faster.
632 */
d16dfc55 633 free_pgd_range(&tlb, old_start, old_end, new_end,
b6a2fea3 634 vma->vm_next ? vma->vm_next->vm_start : 0);
1da177e4 635 }
d16dfc55 636 tlb_finish_mmu(&tlb, new_end, old_end);
b6a2fea3
OW
637
638 /*
5beb4930 639 * Shrink the vma to just the new range. Always succeeds.
b6a2fea3
OW
640 */
641 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
642
643 return 0;
1da177e4
LT
644}
645
b6a2fea3
OW
646/*
647 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
648 * the stack is optionally relocated, and some extra space is added.
649 */
1da177e4
LT
650int setup_arg_pages(struct linux_binprm *bprm,
651 unsigned long stack_top,
652 int executable_stack)
653{
b6a2fea3
OW
654 unsigned long ret;
655 unsigned long stack_shift;
1da177e4 656 struct mm_struct *mm = current->mm;
b6a2fea3
OW
657 struct vm_area_struct *vma = bprm->vma;
658 struct vm_area_struct *prev = NULL;
659 unsigned long vm_flags;
660 unsigned long stack_base;
803bf5ec
MN
661 unsigned long stack_size;
662 unsigned long stack_expand;
663 unsigned long rlim_stack;
1da177e4
LT
664
665#ifdef CONFIG_STACK_GROWSUP
1da177e4 666 /* Limit stack size to 1GB */
d554ed89 667 stack_base = rlimit_max(RLIMIT_STACK);
1da177e4
LT
668 if (stack_base > (1 << 30))
669 stack_base = 1 << 30;
1da177e4 670
b6a2fea3
OW
671 /* Make sure we didn't let the argument array grow too large. */
672 if (vma->vm_end - vma->vm_start > stack_base)
673 return -ENOMEM;
1da177e4 674
b6a2fea3 675 stack_base = PAGE_ALIGN(stack_top - stack_base);
1da177e4 676
b6a2fea3
OW
677 stack_shift = vma->vm_start - stack_base;
678 mm->arg_start = bprm->p - stack_shift;
679 bprm->p = vma->vm_end - stack_shift;
1da177e4 680#else
b6a2fea3
OW
681 stack_top = arch_align_stack(stack_top);
682 stack_top = PAGE_ALIGN(stack_top);
1b528181
RM
683
684 if (unlikely(stack_top < mmap_min_addr) ||
685 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
686 return -ENOMEM;
687
b6a2fea3
OW
688 stack_shift = vma->vm_end - stack_top;
689
690 bprm->p -= stack_shift;
1da177e4 691 mm->arg_start = bprm->p;
1da177e4
LT
692#endif
693
1da177e4 694 if (bprm->loader)
b6a2fea3
OW
695 bprm->loader -= stack_shift;
696 bprm->exec -= stack_shift;
1da177e4 697
1da177e4 698 down_write(&mm->mmap_sem);
96a8e13e 699 vm_flags = VM_STACK_FLAGS;
b6a2fea3
OW
700
701 /*
702 * Adjust stack execute permissions; explicitly enable for
703 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
704 * (arch default) otherwise.
705 */
706 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
707 vm_flags |= VM_EXEC;
708 else if (executable_stack == EXSTACK_DISABLE_X)
709 vm_flags &= ~VM_EXEC;
710 vm_flags |= mm->def_flags;
a8bef8ff 711 vm_flags |= VM_STACK_INCOMPLETE_SETUP;
b6a2fea3
OW
712
713 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
714 vm_flags);
715 if (ret)
716 goto out_unlock;
717 BUG_ON(prev != vma);
718
719 /* Move stack pages down in memory. */
720 if (stack_shift) {
721 ret = shift_arg_pages(vma, stack_shift);
fc63cf23
AB
722 if (ret)
723 goto out_unlock;
1da177e4
LT
724 }
725
a8bef8ff
MG
726 /* mprotect_fixup is overkill to remove the temporary stack flags */
727 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
728
5ef097dd 729 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
803bf5ec
MN
730 stack_size = vma->vm_end - vma->vm_start;
731 /*
732 * Align this down to a page boundary as expand_stack
733 * will align it up.
734 */
735 rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
b6a2fea3 736#ifdef CONFIG_STACK_GROWSUP
803bf5ec
MN
737 if (stack_size + stack_expand > rlim_stack)
738 stack_base = vma->vm_start + rlim_stack;
739 else
740 stack_base = vma->vm_end + stack_expand;
b6a2fea3 741#else
803bf5ec
MN
742 if (stack_size + stack_expand > rlim_stack)
743 stack_base = vma->vm_end - rlim_stack;
744 else
745 stack_base = vma->vm_start - stack_expand;
b6a2fea3 746#endif
3af9e859 747 current->mm->start_stack = bprm->p;
b6a2fea3
OW
748 ret = expand_stack(vma, stack_base);
749 if (ret)
750 ret = -EFAULT;
751
752out_unlock:
1da177e4 753 up_write(&mm->mmap_sem);
fc63cf23 754 return ret;
1da177e4 755}
1da177e4
LT
756EXPORT_SYMBOL(setup_arg_pages);
757
1da177e4
LT
758#endif /* CONFIG_MMU */
759
760struct file *open_exec(const char *name)
761{
1da177e4 762 struct file *file;
e56b6a5d 763 int err;
47c805dc
AV
764 static const struct open_flags open_exec_flags = {
765 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
766 .acc_mode = MAY_EXEC | MAY_OPEN,
767 .intent = LOOKUP_OPEN
768 };
1da177e4 769
47c805dc 770 file = do_filp_open(AT_FDCWD, name, &open_exec_flags, LOOKUP_FOLLOW);
6e8341a1 771 if (IS_ERR(file))
e56b6a5d
CH
772 goto out;
773
774 err = -EACCES;
6e8341a1
AV
775 if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
776 goto exit;
e56b6a5d 777
6e8341a1
AV
778 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
779 goto exit;
e56b6a5d 780
2a12a9d7 781 fsnotify_open(file);
6110e3ab 782
e56b6a5d 783 err = deny_write_access(file);
6e8341a1
AV
784 if (err)
785 goto exit;
1da177e4 786
6e8341a1 787out:
e56b6a5d
CH
788 return file;
789
6e8341a1
AV
790exit:
791 fput(file);
e56b6a5d
CH
792 return ERR_PTR(err);
793}
1da177e4
LT
794EXPORT_SYMBOL(open_exec);
795
6777d773
MZ
796int kernel_read(struct file *file, loff_t offset,
797 char *addr, unsigned long count)
1da177e4
LT
798{
799 mm_segment_t old_fs;
800 loff_t pos = offset;
801 int result;
802
803 old_fs = get_fs();
804 set_fs(get_ds());
805 /* The cast to a user pointer is valid due to the set_fs() */
806 result = vfs_read(file, (void __user *)addr, count, &pos);
807 set_fs(old_fs);
808 return result;
809}
810
811EXPORT_SYMBOL(kernel_read);
812
813static int exec_mmap(struct mm_struct *mm)
814{
815 struct task_struct *tsk;
816 struct mm_struct * old_mm, *active_mm;
817
818 /* Notify parent that we're no longer interested in the old VM */
819 tsk = current;
820 old_mm = current->mm;
821 mm_release(tsk, old_mm);
822
823 if (old_mm) {
4fe7efdb 824 sync_mm_rss(old_mm);
1da177e4
LT
825 /*
826 * Make sure that if there is a core dump in progress
827 * for the old mm, we get out and die instead of going
828 * through with the exec. We must hold mmap_sem around
999d9fc1 829 * checking core_state and changing tsk->mm.
1da177e4
LT
830 */
831 down_read(&old_mm->mmap_sem);
999d9fc1 832 if (unlikely(old_mm->core_state)) {
1da177e4
LT
833 up_read(&old_mm->mmap_sem);
834 return -EINTR;
835 }
836 }
837 task_lock(tsk);
838 active_mm = tsk->active_mm;
839 tsk->mm = mm;
840 tsk->active_mm = mm;
841 activate_mm(active_mm, mm);
842 task_unlock(tsk);
843 arch_pick_mmap_layout(mm);
844 if (old_mm) {
845 up_read(&old_mm->mmap_sem);
7dddb12c 846 BUG_ON(active_mm != old_mm);
701085b2 847 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
31a78f23 848 mm_update_next_owner(old_mm);
1da177e4
LT
849 mmput(old_mm);
850 return 0;
851 }
852 mmdrop(active_mm);
853 return 0;
854}
855
856/*
857 * This function makes sure the current process has its own signal table,
858 * so that flush_signal_handlers can later reset the handlers without
859 * disturbing other processes. (Other processes might share the signal
860 * table via the CLONE_SIGHAND option to clone().)
861 */
858119e1 862static int de_thread(struct task_struct *tsk)
1da177e4
LT
863{
864 struct signal_struct *sig = tsk->signal;
b2c903b8 865 struct sighand_struct *oldsighand = tsk->sighand;
1da177e4 866 spinlock_t *lock = &oldsighand->siglock;
1da177e4 867
aafe6c2a 868 if (thread_group_empty(tsk))
1da177e4
LT
869 goto no_thread_group;
870
871 /*
872 * Kill all other threads in the thread group.
1da177e4 873 */
1da177e4 874 spin_lock_irq(lock);
ed5d2cac 875 if (signal_group_exit(sig)) {
1da177e4
LT
876 /*
877 * Another group action in progress, just
878 * return so that the signal is processed.
879 */
880 spin_unlock_irq(lock);
1da177e4
LT
881 return -EAGAIN;
882 }
d344193a 883
ed5d2cac 884 sig->group_exit_task = tsk;
d344193a
ON
885 sig->notify_count = zap_other_threads(tsk);
886 if (!thread_group_leader(tsk))
887 sig->notify_count--;
1da177e4 888
d344193a 889 while (sig->notify_count) {
1da177e4
LT
890 __set_current_state(TASK_UNINTERRUPTIBLE);
891 spin_unlock_irq(lock);
892 schedule();
893 spin_lock_irq(lock);
894 }
1da177e4
LT
895 spin_unlock_irq(lock);
896
897 /*
898 * At this point all other threads have exited, all we have to
899 * do is to wait for the thread group leader to become inactive,
900 * and to assume its PID:
901 */
aafe6c2a 902 if (!thread_group_leader(tsk)) {
8187926b 903 struct task_struct *leader = tsk->group_leader;
6db840fa 904
2800d8d1 905 sig->notify_count = -1; /* for exit_notify() */
6db840fa
ON
906 for (;;) {
907 write_lock_irq(&tasklist_lock);
908 if (likely(leader->exit_state))
909 break;
910 __set_current_state(TASK_UNINTERRUPTIBLE);
911 write_unlock_irq(&tasklist_lock);
912 schedule();
913 }
1da177e4 914
f5e90281
RM
915 /*
916 * The only record we have of the real-time age of a
917 * process, regardless of execs it's done, is start_time.
918 * All the past CPU time is accumulated in signal_struct
919 * from sister threads now dead. But in this non-leader
920 * exec, nothing survives from the original leader thread,
921 * whose birth marks the true age of this process now.
922 * When we take on its identity by switching to its PID, we
923 * also take its birthdate (always earlier than our own).
924 */
aafe6c2a 925 tsk->start_time = leader->start_time;
f5e90281 926
bac0abd6
PE
927 BUG_ON(!same_thread_group(leader, tsk));
928 BUG_ON(has_group_leader_pid(tsk));
1da177e4
LT
929 /*
930 * An exec() starts a new thread group with the
931 * TGID of the previous thread group. Rehash the
932 * two threads with a switched PID, and release
933 * the former thread group leader:
934 */
d73d6529
EB
935
936 /* Become a process group leader with the old leader's pid.
c18258c6
EB
937 * The old leader becomes a thread of the this thread group.
938 * Note: The old leader also uses this pid until release_task
d73d6529
EB
939 * is called. Odd but simple and correct.
940 */
aafe6c2a
EB
941 detach_pid(tsk, PIDTYPE_PID);
942 tsk->pid = leader->pid;
3743ca05 943 attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
aafe6c2a
EB
944 transfer_pid(leader, tsk, PIDTYPE_PGID);
945 transfer_pid(leader, tsk, PIDTYPE_SID);
9cd80bbb 946
aafe6c2a 947 list_replace_rcu(&leader->tasks, &tsk->tasks);
9cd80bbb 948 list_replace_init(&leader->sibling, &tsk->sibling);
1da177e4 949
aafe6c2a
EB
950 tsk->group_leader = tsk;
951 leader->group_leader = tsk;
de12a787 952
aafe6c2a 953 tsk->exit_signal = SIGCHLD;
087806b1 954 leader->exit_signal = -1;
962b564c
ON
955
956 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
957 leader->exit_state = EXIT_DEAD;
eac1b5e5
ON
958
959 /*
960 * We are going to release_task()->ptrace_unlink() silently,
961 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
962 * the tracer wont't block again waiting for this thread.
963 */
964 if (unlikely(leader->ptrace))
965 __wake_up_parent(leader, leader->parent);
1da177e4 966 write_unlock_irq(&tasklist_lock);
8187926b
ON
967
968 release_task(leader);
ed5d2cac 969 }
1da177e4 970
6db840fa
ON
971 sig->group_exit_task = NULL;
972 sig->notify_count = 0;
1da177e4
LT
973
974no_thread_group:
e6368253
ON
975 /* we have changed execution domain */
976 tsk->exit_signal = SIGCHLD;
977
1da177e4 978 exit_itimers(sig);
cbaffba1 979 flush_itimer_signals();
329f7dba 980
b2c903b8
ON
981 if (atomic_read(&oldsighand->count) != 1) {
982 struct sighand_struct *newsighand;
1da177e4 983 /*
b2c903b8
ON
984 * This ->sighand is shared with the CLONE_SIGHAND
985 * but not CLONE_THREAD task, switch to the new one.
1da177e4 986 */
b2c903b8
ON
987 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
988 if (!newsighand)
989 return -ENOMEM;
990
1da177e4
LT
991 atomic_set(&newsighand->count, 1);
992 memcpy(newsighand->action, oldsighand->action,
993 sizeof(newsighand->action));
994
995 write_lock_irq(&tasklist_lock);
996 spin_lock(&oldsighand->siglock);
aafe6c2a 997 rcu_assign_pointer(tsk->sighand, newsighand);
1da177e4
LT
998 spin_unlock(&oldsighand->siglock);
999 write_unlock_irq(&tasklist_lock);
1000
fba2afaa 1001 __cleanup_sighand(oldsighand);
1da177e4
LT
1002 }
1003
aafe6c2a 1004 BUG_ON(!thread_group_leader(tsk));
1da177e4
LT
1005 return 0;
1006}
0840a90d 1007
1da177e4
LT
1008/*
1009 * These functions flushes out all traces of the currently running executable
1010 * so that a new one can be started
1011 */
858119e1 1012static void flush_old_files(struct files_struct * files)
1da177e4
LT
1013{
1014 long j = -1;
badf1662 1015 struct fdtable *fdt;
1da177e4
LT
1016
1017 spin_lock(&files->file_lock);
1018 for (;;) {
1019 unsigned long set, i;
1020
1021 j++;
8ded2bbc 1022 i = j * BITS_PER_LONG;
badf1662 1023 fdt = files_fdtable(files);
bbea9f69 1024 if (i >= fdt->max_fds)
1da177e4 1025 break;
1fd36adc 1026 set = fdt->close_on_exec[j];
1da177e4
LT
1027 if (!set)
1028 continue;
1fd36adc 1029 fdt->close_on_exec[j] = 0;
1da177e4
LT
1030 spin_unlock(&files->file_lock);
1031 for ( ; set ; i++,set >>= 1) {
1032 if (set & 1) {
1033 sys_close(i);
1034 }
1035 }
1036 spin_lock(&files->file_lock);
1037
1038 }
1039 spin_unlock(&files->file_lock);
1040}
1041
59714d65 1042char *get_task_comm(char *buf, struct task_struct *tsk)
1da177e4
LT
1043{
1044 /* buf must be at least sizeof(tsk->comm) in size */
1045 task_lock(tsk);
1046 strncpy(buf, tsk->comm, sizeof(tsk->comm));
1047 task_unlock(tsk);
59714d65 1048 return buf;
1da177e4 1049}
7d74f492 1050EXPORT_SYMBOL_GPL(get_task_comm);
1da177e4
LT
1051
1052void set_task_comm(struct task_struct *tsk, char *buf)
1053{
1054 task_lock(tsk);
4614a696 1055
43d2b113
KH
1056 trace_task_rename(tsk, buf);
1057
4614a696 1058 /*
1059 * Threads may access current->comm without holding
1060 * the task lock, so write the string carefully.
1061 * Readers without a lock may see incomplete new
1062 * names but are safe from non-terminating string reads.
1063 */
1064 memset(tsk->comm, 0, TASK_COMM_LEN);
1065 wmb();
1da177e4
LT
1066 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1067 task_unlock(tsk);
cdd6c482 1068 perf_event_comm(tsk);
1da177e4
LT
1069}
1070
96e02d15
HC
1071static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
1072{
1073 int i, ch;
1074
1075 /* Copies the binary name from after last slash */
1076 for (i = 0; (ch = *(fn++)) != '\0';) {
1077 if (ch == '/')
1078 i = 0; /* overwrite what we wrote */
1079 else
1080 if (i < len - 1)
1081 tcomm[i++] = ch;
1082 }
1083 tcomm[i] = '\0';
1084}
1085
1da177e4
LT
1086int flush_old_exec(struct linux_binprm * bprm)
1087{
221af7f8 1088 int retval;
1da177e4
LT
1089
1090 /*
1091 * Make sure we have a private signal table and that
1092 * we are unassociated from the previous thread group.
1093 */
1094 retval = de_thread(current);
1095 if (retval)
1096 goto out;
1097
925d1c40
MH
1098 set_mm_exe_file(bprm->mm, bprm->file);
1099
96e02d15 1100 filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
1da177e4
LT
1101 /*
1102 * Release all of the old mmap stuff
1103 */
3c77f845 1104 acct_arg_size(bprm, 0);
1da177e4
LT
1105 retval = exec_mmap(bprm->mm);
1106 if (retval)
fd8328be 1107 goto out;
1da177e4
LT
1108
1109 bprm->mm = NULL; /* We're using it now */
7ab02af4 1110
dac853ae 1111 set_fs(USER_DS);
19e5109f 1112 current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD);
7ab02af4
LT
1113 flush_thread();
1114 current->personality &= ~bprm->per_clear;
1115
221af7f8
LT
1116 return 0;
1117
1118out:
1119 return retval;
1120}
1121EXPORT_SYMBOL(flush_old_exec);
1122
1b5d783c
AV
1123void would_dump(struct linux_binprm *bprm, struct file *file)
1124{
1125 if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0)
1126 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1127}
1128EXPORT_SYMBOL(would_dump);
1129
221af7f8
LT
1130void setup_new_exec(struct linux_binprm * bprm)
1131{
221af7f8 1132 arch_pick_mmap_layout(current->mm);
1da177e4
LT
1133
1134 /* This is the point of no return */
1da177e4
LT
1135 current->sas_ss_sp = current->sas_ss_size = 0;
1136
8e96e3b7 1137 if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
6c5d5238 1138 set_dumpable(current->mm, 1);
d6e71144 1139 else
6c5d5238 1140 set_dumpable(current->mm, suid_dumpable);
d6e71144 1141
96e02d15 1142 set_task_comm(current, bprm->tcomm);
1da177e4 1143
0551fbd2
BH
1144 /* Set the new mm task size. We have to do that late because it may
1145 * depend on TIF_32BIT which is only updated in flush_thread() on
1146 * some architectures like powerpc
1147 */
1148 current->mm->task_size = TASK_SIZE;
1149
a6f76f23 1150 /* install the new credentials */
8e96e3b7
EB
1151 if (!uid_eq(bprm->cred->uid, current_euid()) ||
1152 !gid_eq(bprm->cred->gid, current_egid())) {
d2d56c5f 1153 current->pdeath_signal = 0;
1b5d783c
AV
1154 } else {
1155 would_dump(bprm, bprm->file);
1156 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1157 set_dumpable(current->mm, suid_dumpable);
1da177e4
LT
1158 }
1159
f65cb45c
IM
1160 /*
1161 * Flush performance counters when crossing a
1162 * security domain:
1163 */
1164 if (!get_dumpable(current->mm))
cdd6c482 1165 perf_event_exit_task(current);
f65cb45c 1166
1da177e4
LT
1167 /* An exec changes our domain. We are no longer part of the thread
1168 group */
1169
1170 current->self_exec_id++;
1171
1172 flush_signal_handlers(current, 0);
1173 flush_old_files(current->files);
1da177e4 1174}
221af7f8 1175EXPORT_SYMBOL(setup_new_exec);
1da177e4 1176
a2a8474c
ON
1177/*
1178 * Prepare credentials and lock ->cred_guard_mutex.
1179 * install_exec_creds() commits the new creds and drops the lock.
1180 * Or, if exec fails before, free_bprm() should release ->cred and
1181 * and unlock.
1182 */
1183int prepare_bprm_creds(struct linux_binprm *bprm)
1184{
9b1bf12d 1185 if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
a2a8474c
ON
1186 return -ERESTARTNOINTR;
1187
1188 bprm->cred = prepare_exec_creds();
1189 if (likely(bprm->cred))
1190 return 0;
1191
9b1bf12d 1192 mutex_unlock(&current->signal->cred_guard_mutex);
a2a8474c
ON
1193 return -ENOMEM;
1194}
1195
1196void free_bprm(struct linux_binprm *bprm)
1197{
1198 free_arg_pages(bprm);
1199 if (bprm->cred) {
9b1bf12d 1200 mutex_unlock(&current->signal->cred_guard_mutex);
a2a8474c
ON
1201 abort_creds(bprm->cred);
1202 }
1203 kfree(bprm);
1204}
1205
a6f76f23
DH
1206/*
1207 * install the new credentials for this executable
1208 */
1209void install_exec_creds(struct linux_binprm *bprm)
1210{
1211 security_bprm_committing_creds(bprm);
1212
1213 commit_creds(bprm->cred);
1214 bprm->cred = NULL;
a2a8474c
ON
1215 /*
1216 * cred_guard_mutex must be held at least to this point to prevent
a6f76f23 1217 * ptrace_attach() from altering our determination of the task's
a2a8474c
ON
1218 * credentials; any time after this it may be unlocked.
1219 */
a6f76f23 1220 security_bprm_committed_creds(bprm);
9b1bf12d 1221 mutex_unlock(&current->signal->cred_guard_mutex);
a6f76f23
DH
1222}
1223EXPORT_SYMBOL(install_exec_creds);
1224
1225/*
1226 * determine how safe it is to execute the proposed program
9b1bf12d 1227 * - the caller must hold ->cred_guard_mutex to protect against
a6f76f23
DH
1228 * PTRACE_ATTACH
1229 */
f47ec3f2 1230static int check_unsafe_exec(struct linux_binprm *bprm)
a6f76f23 1231{
0bf2f3ae 1232 struct task_struct *p = current, *t;
f1191b50 1233 unsigned n_fs;
498052bb 1234 int res = 0;
a6f76f23 1235
4b9d33e6
TH
1236 if (p->ptrace) {
1237 if (p->ptrace & PT_PTRACE_CAP)
1238 bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1239 else
1240 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1241 }
a6f76f23 1242
259e5e6c
AL
1243 /*
1244 * This isn't strictly necessary, but it makes it harder for LSMs to
1245 * mess up.
1246 */
1247 if (current->no_new_privs)
1248 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1249
0bf2f3ae 1250 n_fs = 1;
2a4419b5 1251 spin_lock(&p->fs->lock);
437f7fdb 1252 rcu_read_lock();
0bf2f3ae
DH
1253 for (t = next_thread(p); t != p; t = next_thread(t)) {
1254 if (t->fs == p->fs)
1255 n_fs++;
0bf2f3ae 1256 }
437f7fdb 1257 rcu_read_unlock();
0bf2f3ae 1258
f1191b50 1259 if (p->fs->users > n_fs) {
a6f76f23 1260 bprm->unsafe |= LSM_UNSAFE_SHARE;
498052bb 1261 } else {
8c652f96
ON
1262 res = -EAGAIN;
1263 if (!p->fs->in_exec) {
1264 p->fs->in_exec = 1;
1265 res = 1;
1266 }
498052bb 1267 }
2a4419b5 1268 spin_unlock(&p->fs->lock);
498052bb
AV
1269
1270 return res;
a6f76f23
DH
1271}
1272
1da177e4
LT
1273/*
1274 * Fill the binprm structure from the inode.
1275 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
a6f76f23
DH
1276 *
1277 * This may be called multiple times for binary chains (scripts for example).
1da177e4
LT
1278 */
1279int prepare_binprm(struct linux_binprm *bprm)
1280{
a6f76f23 1281 umode_t mode;
0f7fc9e4 1282 struct inode * inode = bprm->file->f_path.dentry->d_inode;
1da177e4
LT
1283 int retval;
1284
1285 mode = inode->i_mode;
1da177e4
LT
1286 if (bprm->file->f_op == NULL)
1287 return -EACCES;
1288
a6f76f23
DH
1289 /* clear any previous set[ug]id data from a previous binary */
1290 bprm->cred->euid = current_euid();
1291 bprm->cred->egid = current_egid();
1da177e4 1292
259e5e6c
AL
1293 if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
1294 !current->no_new_privs) {
1da177e4
LT
1295 /* Set-uid? */
1296 if (mode & S_ISUID) {
9e4a36ec
EB
1297 if (!kuid_has_mapping(bprm->cred->user_ns, inode->i_uid))
1298 return -EPERM;
a6f76f23
DH
1299 bprm->per_clear |= PER_CLEAR_ON_SETID;
1300 bprm->cred->euid = inode->i_uid;
9e4a36ec 1301
1da177e4
LT
1302 }
1303
1304 /* Set-gid? */
1305 /*
1306 * If setgid is set but no group execute bit then this
1307 * is a candidate for mandatory locking, not a setgid
1308 * executable.
1309 */
1310 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
9e4a36ec
EB
1311 if (!kgid_has_mapping(bprm->cred->user_ns, inode->i_gid))
1312 return -EPERM;
a6f76f23
DH
1313 bprm->per_clear |= PER_CLEAR_ON_SETID;
1314 bprm->cred->egid = inode->i_gid;
1da177e4
LT
1315 }
1316 }
1317
1318 /* fill in binprm security blob */
a6f76f23 1319 retval = security_bprm_set_creds(bprm);
1da177e4
LT
1320 if (retval)
1321 return retval;
a6f76f23 1322 bprm->cred_prepared = 1;
1da177e4 1323
a6f76f23
DH
1324 memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1325 return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1da177e4
LT
1326}
1327
1328EXPORT_SYMBOL(prepare_binprm);
1329
4fc75ff4
NP
1330/*
1331 * Arguments are '\0' separated strings found at the location bprm->p
1332 * points to; chop off the first by relocating brpm->p to right after
1333 * the first '\0' encountered.
1334 */
b6a2fea3 1335int remove_arg_zero(struct linux_binprm *bprm)
1da177e4 1336{
b6a2fea3
OW
1337 int ret = 0;
1338 unsigned long offset;
1339 char *kaddr;
1340 struct page *page;
4fc75ff4 1341
b6a2fea3
OW
1342 if (!bprm->argc)
1343 return 0;
1da177e4 1344
b6a2fea3
OW
1345 do {
1346 offset = bprm->p & ~PAGE_MASK;
1347 page = get_arg_page(bprm, bprm->p, 0);
1348 if (!page) {
1349 ret = -EFAULT;
1350 goto out;
1351 }
e8e3c3d6 1352 kaddr = kmap_atomic(page);
4fc75ff4 1353
b6a2fea3
OW
1354 for (; offset < PAGE_SIZE && kaddr[offset];
1355 offset++, bprm->p++)
1356 ;
4fc75ff4 1357
e8e3c3d6 1358 kunmap_atomic(kaddr);
b6a2fea3 1359 put_arg_page(page);
4fc75ff4 1360
b6a2fea3
OW
1361 if (offset == PAGE_SIZE)
1362 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1363 } while (offset == PAGE_SIZE);
4fc75ff4 1364
b6a2fea3
OW
1365 bprm->p++;
1366 bprm->argc--;
1367 ret = 0;
4fc75ff4 1368
b6a2fea3
OW
1369out:
1370 return ret;
1da177e4 1371}
1da177e4
LT
1372EXPORT_SYMBOL(remove_arg_zero);
1373
1374/*
1375 * cycle the list of binary formats handler, until one recognizes the image
1376 */
1377int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1378{
85f33466 1379 unsigned int depth = bprm->recursion_depth;
1da177e4
LT
1380 int try,retval;
1381 struct linux_binfmt *fmt;
6308191f 1382 pid_t old_pid, old_vpid;
1da177e4 1383
1da177e4
LT
1384 retval = security_bprm_check(bprm);
1385 if (retval)
1386 return retval;
1387
473ae30b
AV
1388 retval = audit_bprm(bprm);
1389 if (retval)
1390 return retval;
1391
bb188d7e 1392 /* Need to fetch pid before load_binary changes it */
6308191f 1393 old_pid = current->pid;
bb188d7e 1394 rcu_read_lock();
6308191f 1395 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
bb188d7e
DV
1396 rcu_read_unlock();
1397
1da177e4
LT
1398 retval = -ENOENT;
1399 for (try=0; try<2; try++) {
1400 read_lock(&binfmt_lock);
e4dc1b14 1401 list_for_each_entry(fmt, &formats, lh) {
1da177e4
LT
1402 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1403 if (!fn)
1404 continue;
1405 if (!try_module_get(fmt->module))
1406 continue;
1407 read_unlock(&binfmt_lock);
1408 retval = fn(bprm, regs);
85f33466
RM
1409 /*
1410 * Restore the depth counter to its starting value
1411 * in this call, so we don't have to rely on every
1412 * load_binary function to restore it on return.
1413 */
1414 bprm->recursion_depth = depth;
1da177e4 1415 if (retval >= 0) {
4ff16c25
DS
1416 if (depth == 0) {
1417 trace_sched_process_exec(current, old_pid, bprm);
6308191f 1418 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
4ff16c25 1419 }
1da177e4
LT
1420 put_binfmt(fmt);
1421 allow_write_access(bprm->file);
1422 if (bprm->file)
1423 fput(bprm->file);
1424 bprm->file = NULL;
1425 current->did_exec = 1;
9f46080c 1426 proc_exec_connector(current);
1da177e4
LT
1427 return retval;
1428 }
1429 read_lock(&binfmt_lock);
1430 put_binfmt(fmt);
1431 if (retval != -ENOEXEC || bprm->mm == NULL)
1432 break;
1433 if (!bprm->file) {
1434 read_unlock(&binfmt_lock);
1435 return retval;
1436 }
1437 }
1438 read_unlock(&binfmt_lock);
b4edf8bd 1439#ifdef CONFIG_MODULES
1da177e4
LT
1440 if (retval != -ENOEXEC || bprm->mm == NULL) {
1441 break;
5f4123be 1442 } else {
1da177e4
LT
1443#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1444 if (printable(bprm->buf[0]) &&
1445 printable(bprm->buf[1]) &&
1446 printable(bprm->buf[2]) &&
1447 printable(bprm->buf[3]))
1448 break; /* -ENOEXEC */
91219352
TH
1449 if (try)
1450 break; /* -ENOEXEC */
1da177e4 1451 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1da177e4 1452 }
b4edf8bd
TH
1453#else
1454 break;
1455#endif
1da177e4
LT
1456 }
1457 return retval;
1458}
1459
1460EXPORT_SYMBOL(search_binary_handler);
1461
1462/*
1463 * sys_execve() executes a new program.
1464 */
ba2d0162
ON
1465static int do_execve_common(const char *filename,
1466 struct user_arg_ptr argv,
1467 struct user_arg_ptr envp,
1468 struct pt_regs *regs)
1da177e4
LT
1469{
1470 struct linux_binprm *bprm;
1471 struct file *file;
3b125388 1472 struct files_struct *displaced;
8c652f96 1473 bool clear_in_exec;
1da177e4 1474 int retval;
72fa5997
VK
1475 const struct cred *cred = current_cred();
1476
1477 /*
1478 * We move the actual failure in case of RLIMIT_NPROC excess from
1479 * set*uid() to execve() because too many poorly written programs
1480 * don't check setuid() return code. Here we additionally recheck
1481 * whether NPROC limit is still exceeded.
1482 */
1483 if ((current->flags & PF_NPROC_EXCEEDED) &&
1484 atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) {
1485 retval = -EAGAIN;
1486 goto out_ret;
1487 }
1488
1489 /* We're below the limit (still or again), so we don't want to make
1490 * further execve() calls fail. */
1491 current->flags &= ~PF_NPROC_EXCEEDED;
1da177e4 1492
3b125388 1493 retval = unshare_files(&displaced);
fd8328be
AV
1494 if (retval)
1495 goto out_ret;
1496
1da177e4 1497 retval = -ENOMEM;
11b0b5ab 1498 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1da177e4 1499 if (!bprm)
fd8328be 1500 goto out_files;
1da177e4 1501
a2a8474c
ON
1502 retval = prepare_bprm_creds(bprm);
1503 if (retval)
a6f76f23 1504 goto out_free;
498052bb
AV
1505
1506 retval = check_unsafe_exec(bprm);
8c652f96 1507 if (retval < 0)
a2a8474c 1508 goto out_free;
8c652f96 1509 clear_in_exec = retval;
a2a8474c 1510 current->in_execve = 1;
a6f76f23 1511
1da177e4
LT
1512 file = open_exec(filename);
1513 retval = PTR_ERR(file);
1514 if (IS_ERR(file))
498052bb 1515 goto out_unmark;
1da177e4
LT
1516
1517 sched_exec();
1518
1da177e4
LT
1519 bprm->file = file;
1520 bprm->filename = filename;
1521 bprm->interp = filename;
1da177e4 1522
b6a2fea3
OW
1523 retval = bprm_mm_init(bprm);
1524 if (retval)
1525 goto out_file;
1da177e4 1526
b6a2fea3 1527 bprm->argc = count(argv, MAX_ARG_STRINGS);
1da177e4 1528 if ((retval = bprm->argc) < 0)
a6f76f23 1529 goto out;
1da177e4 1530
b6a2fea3 1531 bprm->envc = count(envp, MAX_ARG_STRINGS);
1da177e4 1532 if ((retval = bprm->envc) < 0)
1da177e4
LT
1533 goto out;
1534
1535 retval = prepare_binprm(bprm);
1536 if (retval < 0)
1537 goto out;
1538
1539 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1540 if (retval < 0)
1541 goto out;
1542
1543 bprm->exec = bprm->p;
1544 retval = copy_strings(bprm->envc, envp, bprm);
1545 if (retval < 0)
1546 goto out;
1547
1548 retval = copy_strings(bprm->argc, argv, bprm);
1549 if (retval < 0)
1550 goto out;
1551
1552 retval = search_binary_handler(bprm,regs);
a6f76f23
DH
1553 if (retval < 0)
1554 goto out;
1da177e4 1555
a6f76f23 1556 /* execve succeeded */
498052bb 1557 current->fs->in_exec = 0;
f9ce1f1c 1558 current->in_execve = 0;
a6f76f23
DH
1559 acct_update_integrals(current);
1560 free_bprm(bprm);
1561 if (displaced)
1562 put_files_struct(displaced);
1563 return retval;
1da177e4 1564
a6f76f23 1565out:
3c77f845
ON
1566 if (bprm->mm) {
1567 acct_arg_size(bprm, 0);
1568 mmput(bprm->mm);
1569 }
1da177e4
LT
1570
1571out_file:
1572 if (bprm->file) {
1573 allow_write_access(bprm->file);
1574 fput(bprm->file);
1575 }
a6f76f23 1576
498052bb 1577out_unmark:
8c652f96
ON
1578 if (clear_in_exec)
1579 current->fs->in_exec = 0;
f9ce1f1c 1580 current->in_execve = 0;
a6f76f23
DH
1581
1582out_free:
08a6fac1 1583 free_bprm(bprm);
1da177e4 1584
fd8328be 1585out_files:
3b125388
AV
1586 if (displaced)
1587 reset_files_struct(displaced);
1da177e4
LT
1588out_ret:
1589 return retval;
1590}
1591
ba2d0162
ON
1592int do_execve(const char *filename,
1593 const char __user *const __user *__argv,
1594 const char __user *const __user *__envp,
1595 struct pt_regs *regs)
1596{
0e028465
ON
1597 struct user_arg_ptr argv = { .ptr.native = __argv };
1598 struct user_arg_ptr envp = { .ptr.native = __envp };
1599 return do_execve_common(filename, argv, envp, regs);
1600}
1601
1602#ifdef CONFIG_COMPAT
1603int compat_do_execve(char *filename,
1604 compat_uptr_t __user *__argv,
1605 compat_uptr_t __user *__envp,
1606 struct pt_regs *regs)
1607{
1608 struct user_arg_ptr argv = {
1609 .is_compat = true,
1610 .ptr.compat = __argv,
1611 };
1612 struct user_arg_ptr envp = {
1613 .is_compat = true,
1614 .ptr.compat = __envp,
1615 };
ba2d0162
ON
1616 return do_execve_common(filename, argv, envp, regs);
1617}
0e028465 1618#endif
ba2d0162 1619
964ee7df 1620void set_binfmt(struct linux_binfmt *new)
1da177e4 1621{
801460d0
HS
1622 struct mm_struct *mm = current->mm;
1623
1624 if (mm->binfmt)
1625 module_put(mm->binfmt->module);
1da177e4 1626
801460d0 1627 mm->binfmt = new;
964ee7df
ON
1628 if (new)
1629 __module_get(new->module);
1da177e4
LT
1630}
1631
1632EXPORT_SYMBOL(set_binfmt);
1633
1b0d300b
XF
1634static int expand_corename(struct core_name *cn)
1635{
1636 char *old_corename = cn->corename;
1637
1638 cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
1639 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
1640
1641 if (!cn->corename) {
1642 kfree(old_corename);
1643 return -ENOMEM;
1644 }
1645
1646 return 0;
1647}
1648
1649static int cn_printf(struct core_name *cn, const char *fmt, ...)
1650{
1651 char *cur;
1652 int need;
1653 int ret;
1654 va_list arg;
1655
1656 va_start(arg, fmt);
1657 need = vsnprintf(NULL, 0, fmt, arg);
1658 va_end(arg);
1659
1660 if (likely(need < cn->size - cn->used - 1))
1661 goto out_printf;
1662
1663 ret = expand_corename(cn);
1664 if (ret)
1665 goto expand_fail;
1666
1667out_printf:
1668 cur = cn->corename + cn->used;
1669 va_start(arg, fmt);
1670 vsnprintf(cur, need + 1, fmt, arg);
1671 va_end(arg);
1672 cn->used += need;
1673 return 0;
1674
1675expand_fail:
1676 return ret;
1677}
1678
2c563731
JS
1679static void cn_escape(char *str)
1680{
1681 for (; *str; str++)
1682 if (*str == '/')
1683 *str = '!';
1684}
1685
57cc083a
JS
1686static int cn_print_exe_file(struct core_name *cn)
1687{
1688 struct file *exe_file;
2c563731 1689 char *pathbuf, *path;
57cc083a
JS
1690 int ret;
1691
1692 exe_file = get_mm_exe_file(current->mm);
2c563731
JS
1693 if (!exe_file) {
1694 char *commstart = cn->corename + cn->used;
1695 ret = cn_printf(cn, "%s (path unknown)", current->comm);
1696 cn_escape(commstart);
1697 return ret;
1698 }
57cc083a
JS
1699
1700 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
1701 if (!pathbuf) {
1702 ret = -ENOMEM;
1703 goto put_exe_file;
1704 }
1705
1706 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
1707 if (IS_ERR(path)) {
1708 ret = PTR_ERR(path);
1709 goto free_buf;
1710 }
1711
2c563731 1712 cn_escape(path);
57cc083a
JS
1713
1714 ret = cn_printf(cn, "%s", path);
1715
1716free_buf:
1717 kfree(pathbuf);
1718put_exe_file:
1719 fput(exe_file);
1720 return ret;
1721}
1722
1da177e4
LT
1723/* format_corename will inspect the pattern parameter, and output a
1724 * name into corename, which must have space for at least
1725 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1726 */
1b0d300b 1727static int format_corename(struct core_name *cn, long signr)
1da177e4 1728{
86a264ab 1729 const struct cred *cred = current_cred();
565b9b14
ON
1730 const char *pat_ptr = core_pattern;
1731 int ispipe = (*pat_ptr == '|');
1da177e4 1732 int pid_in_pattern = 0;
1b0d300b
XF
1733 int err = 0;
1734
1735 cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
1736 cn->corename = kmalloc(cn->size, GFP_KERNEL);
1737 cn->used = 0;
1738
1739 if (!cn->corename)
1740 return -ENOMEM;
1da177e4
LT
1741
1742 /* Repeat as long as we have more pattern to process and more output
1743 space */
1744 while (*pat_ptr) {
1745 if (*pat_ptr != '%') {
1b0d300b 1746 if (*pat_ptr == 0)
1da177e4 1747 goto out;
1b0d300b 1748 err = cn_printf(cn, "%c", *pat_ptr++);
1da177e4
LT
1749 } else {
1750 switch (*++pat_ptr) {
1b0d300b 1751 /* single % at the end, drop that */
1da177e4
LT
1752 case 0:
1753 goto out;
1754 /* Double percent, output one percent */
1755 case '%':
1b0d300b 1756 err = cn_printf(cn, "%c", '%');
1da177e4
LT
1757 break;
1758 /* pid */
1759 case 'p':
1760 pid_in_pattern = 1;
1b0d300b
XF
1761 err = cn_printf(cn, "%d",
1762 task_tgid_vnr(current));
1da177e4
LT
1763 break;
1764 /* uid */
1765 case 'u':
1b0d300b 1766 err = cn_printf(cn, "%d", cred->uid);
1da177e4
LT
1767 break;
1768 /* gid */
1769 case 'g':
1b0d300b 1770 err = cn_printf(cn, "%d", cred->gid);
1da177e4
LT
1771 break;
1772 /* signal that caused the coredump */
1773 case 's':
1b0d300b 1774 err = cn_printf(cn, "%ld", signr);
1da177e4
LT
1775 break;
1776 /* UNIX time of coredump */
1777 case 't': {
1778 struct timeval tv;
1779 do_gettimeofday(&tv);
1b0d300b 1780 err = cn_printf(cn, "%lu", tv.tv_sec);
1da177e4
LT
1781 break;
1782 }
1783 /* hostname */
2c563731
JS
1784 case 'h': {
1785 char *namestart = cn->corename + cn->used;
1da177e4 1786 down_read(&uts_sem);
1b0d300b
XF
1787 err = cn_printf(cn, "%s",
1788 utsname()->nodename);
1da177e4 1789 up_read(&uts_sem);
2c563731 1790 cn_escape(namestart);
1da177e4 1791 break;
2c563731 1792 }
1da177e4 1793 /* executable */
2c563731
JS
1794 case 'e': {
1795 char *commstart = cn->corename + cn->used;
1b0d300b 1796 err = cn_printf(cn, "%s", current->comm);
2c563731 1797 cn_escape(commstart);
1da177e4 1798 break;
2c563731 1799 }
57cc083a
JS
1800 case 'E':
1801 err = cn_print_exe_file(cn);
1802 break;
74aadce9
NH
1803 /* core limit size */
1804 case 'c':
1b0d300b
XF
1805 err = cn_printf(cn, "%lu",
1806 rlimit(RLIMIT_CORE));
74aadce9 1807 break;
1da177e4
LT
1808 default:
1809 break;
1810 }
1811 ++pat_ptr;
1812 }
1b0d300b
XF
1813
1814 if (err)
1815 return err;
1da177e4 1816 }
1b0d300b 1817
1da177e4
LT
1818 /* Backward compatibility with core_uses_pid:
1819 *
1820 * If core_pattern does not include a %p (as is the default)
1821 * and core_uses_pid is set, then .%pid will be appended to
c4bbafda 1822 * the filename. Do not do this for piped commands. */
6409324b 1823 if (!ispipe && !pid_in_pattern && core_uses_pid) {
1b0d300b
XF
1824 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
1825 if (err)
1826 return err;
1da177e4 1827 }
c4bbafda 1828out:
c4bbafda 1829 return ispipe;
1da177e4
LT
1830}
1831
5c99cbf4 1832static int zap_process(struct task_struct *start, int exit_code)
aceecc04
ON
1833{
1834 struct task_struct *t;
8cd9c249 1835 int nr = 0;
281de339 1836
d5f70c00 1837 start->signal->flags = SIGNAL_GROUP_EXIT;
5c99cbf4 1838 start->signal->group_exit_code = exit_code;
d5f70c00 1839 start->signal->group_stop_count = 0;
aceecc04
ON
1840
1841 t = start;
1842 do {
6dfca329 1843 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
aceecc04 1844 if (t != current && t->mm) {
281de339
ON
1845 sigaddset(&t->pending.signal, SIGKILL);
1846 signal_wake_up(t, 1);
8cd9c249 1847 nr++;
aceecc04 1848 }
e4901f92 1849 } while_each_thread(start, t);
8cd9c249
ON
1850
1851 return nr;
aceecc04
ON
1852}
1853
dcf560c5 1854static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
8cd9c249 1855 struct core_state *core_state, int exit_code)
1da177e4
LT
1856{
1857 struct task_struct *g, *p;
5debfa6d 1858 unsigned long flags;
8cd9c249 1859 int nr = -EAGAIN;
dcf560c5
ON
1860
1861 spin_lock_irq(&tsk->sighand->siglock);
ed5d2cac 1862 if (!signal_group_exit(tsk->signal)) {
8cd9c249 1863 mm->core_state = core_state;
5c99cbf4 1864 nr = zap_process(tsk, exit_code);
1da177e4 1865 }
dcf560c5 1866 spin_unlock_irq(&tsk->sighand->siglock);
8cd9c249
ON
1867 if (unlikely(nr < 0))
1868 return nr;
1da177e4 1869
8cd9c249 1870 if (atomic_read(&mm->mm_users) == nr + 1)
5debfa6d 1871 goto done;
e4901f92
ON
1872 /*
1873 * We should find and kill all tasks which use this mm, and we should
999d9fc1 1874 * count them correctly into ->nr_threads. We don't take tasklist
e4901f92
ON
1875 * lock, but this is safe wrt:
1876 *
1877 * fork:
1878 * None of sub-threads can fork after zap_process(leader). All
1879 * processes which were created before this point should be
1880 * visible to zap_threads() because copy_process() adds the new
1881 * process to the tail of init_task.tasks list, and lock/unlock
1882 * of ->siglock provides a memory barrier.
1883 *
1884 * do_exit:
1885 * The caller holds mm->mmap_sem. This means that the task which
1886 * uses this mm can't pass exit_mm(), so it can't exit or clear
1887 * its ->mm.
1888 *
1889 * de_thread:
1890 * It does list_replace_rcu(&leader->tasks, &current->tasks),
1891 * we must see either old or new leader, this does not matter.
1892 * However, it can change p->sighand, so lock_task_sighand(p)
1893 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1894 * it can't fail.
1895 *
1896 * Note also that "g" can be the old leader with ->mm == NULL
1897 * and already unhashed and thus removed from ->thread_group.
1898 * This is OK, __unhash_process()->list_del_rcu() does not
1899 * clear the ->next pointer, we will find the new leader via
1900 * next_thread().
1901 */
7b1c6154 1902 rcu_read_lock();
aceecc04 1903 for_each_process(g) {
5debfa6d
ON
1904 if (g == tsk->group_leader)
1905 continue;
15b9f360
ON
1906 if (g->flags & PF_KTHREAD)
1907 continue;
aceecc04
ON
1908 p = g;
1909 do {
1910 if (p->mm) {
15b9f360 1911 if (unlikely(p->mm == mm)) {
5debfa6d 1912 lock_task_sighand(p, &flags);
5c99cbf4 1913 nr += zap_process(p, exit_code);
5debfa6d
ON
1914 unlock_task_sighand(p, &flags);
1915 }
aceecc04
ON
1916 break;
1917 }
e4901f92 1918 } while_each_thread(g, p);
aceecc04 1919 }
7b1c6154 1920 rcu_read_unlock();
5debfa6d 1921done:
c5f1cc8c 1922 atomic_set(&core_state->nr_threads, nr);
8cd9c249 1923 return nr;
1da177e4
LT
1924}
1925
9d5b327b 1926static int coredump_wait(int exit_code, struct core_state *core_state)
1da177e4 1927{
dcf560c5
ON
1928 struct task_struct *tsk = current;
1929 struct mm_struct *mm = tsk->mm;
269b005a 1930 int core_waiters = -EBUSY;
1da177e4 1931
9d5b327b 1932 init_completion(&core_state->startup);
b564daf8
ON
1933 core_state->dumper.task = tsk;
1934 core_state->dumper.next = NULL;
269b005a
ON
1935
1936 down_write(&mm->mmap_sem);
1937 if (!mm->core_state)
1938 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
2384f55f
ON
1939 up_write(&mm->mmap_sem);
1940
11aeca0b
SS
1941 if (core_waiters > 0) {
1942 struct core_thread *ptr;
1943
9d5b327b 1944 wait_for_completion(&core_state->startup);
11aeca0b
SS
1945 /*
1946 * Wait for all the threads to become inactive, so that
1947 * all the thread context (extended register state, like
1948 * fpu etc) gets copied to the memory.
1949 */
1950 ptr = core_state->dumper.next;
1951 while (ptr != NULL) {
1952 wait_task_inactive(ptr->task, 0);
1953 ptr = ptr->next;
1954 }
1955 }
57b59c4a 1956
dcf560c5 1957 return core_waiters;
1da177e4
LT
1958}
1959
a94e2d40
ON
1960static void coredump_finish(struct mm_struct *mm)
1961{
1962 struct core_thread *curr, *next;
1963 struct task_struct *task;
1964
1965 next = mm->core_state->dumper.next;
1966 while ((curr = next) != NULL) {
1967 next = curr->next;
1968 task = curr->task;
1969 /*
1970 * see exit_mm(), curr->task must not see
1971 * ->task == NULL before we read ->next.
1972 */
1973 smp_mb();
1974 curr->task = NULL;
1975 wake_up_process(task);
1976 }
1977
1978 mm->core_state = NULL;
1979}
1980
6c5d5238
KH
1981/*
1982 * set_dumpable converts traditional three-value dumpable to two flags and
1983 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1984 * these bits are not changed atomically. So get_dumpable can observe the
1985 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1986 * return either old dumpable or new one by paying attention to the order of
1987 * modifying the bits.
1988 *
1989 * dumpable | mm->flags (binary)
1990 * old new | initial interim final
1991 * ---------+-----------------------
1992 * 0 1 | 00 01 01
1993 * 0 2 | 00 10(*) 11
1994 * 1 0 | 01 00 00
1995 * 1 2 | 01 11 11
1996 * 2 0 | 11 10(*) 00
1997 * 2 1 | 11 11 01
1998 *
1999 * (*) get_dumpable regards interim value of 10 as 11.
2000 */
2001void set_dumpable(struct mm_struct *mm, int value)
2002{
2003 switch (value) {
54b50199 2004 case SUID_DUMPABLE_DISABLED:
6c5d5238
KH
2005 clear_bit(MMF_DUMPABLE, &mm->flags);
2006 smp_wmb();
2007 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
2008 break;
54b50199 2009 case SUID_DUMPABLE_ENABLED:
6c5d5238
KH
2010 set_bit(MMF_DUMPABLE, &mm->flags);
2011 smp_wmb();
2012 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
2013 break;
54b50199 2014 case SUID_DUMPABLE_SAFE:
6c5d5238
KH
2015 set_bit(MMF_DUMP_SECURELY, &mm->flags);
2016 smp_wmb();
2017 set_bit(MMF_DUMPABLE, &mm->flags);
2018 break;
2019 }
2020}
6c5d5238 2021
30736a4d 2022static int __get_dumpable(unsigned long mm_flags)
6c5d5238
KH
2023{
2024 int ret;
2025
30736a4d 2026 ret = mm_flags & MMF_DUMPABLE_MASK;
54b50199 2027 return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
6c5d5238
KH
2028}
2029
30736a4d
MH
2030int get_dumpable(struct mm_struct *mm)
2031{
2032 return __get_dumpable(mm->flags);
2033}
2034
61be228a
NH
2035static void wait_for_dump_helpers(struct file *file)
2036{
2037 struct pipe_inode_info *pipe;
2038
2039 pipe = file->f_path.dentry->d_inode->i_pipe;
2040
2041 pipe_lock(pipe);
2042 pipe->readers++;
2043 pipe->writers--;
2044
2045 while ((pipe->readers > 1) && (!signal_pending(current))) {
2046 wake_up_interruptible_sync(&pipe->wait);
2047 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
2048 pipe_wait(pipe);
2049 }
2050
2051 pipe->readers--;
2052 pipe->writers++;
2053 pipe_unlock(pipe);
2054
2055}
2056
2057
898b374a 2058/*
1bef8291 2059 * umh_pipe_setup
898b374a
NH
2060 * helper function to customize the process used
2061 * to collect the core in userspace. Specifically
2062 * it sets up a pipe and installs it as fd 0 (stdin)
2063 * for the process. Returns 0 on success, or
2064 * PTR_ERR on failure.
2065 * Note that it also sets the core limit to 1. This
2066 * is a special value that we use to trap recursive
2067 * core dumps
2068 */
87966996 2069static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
898b374a 2070{
e4fad8e5 2071 struct file *files[2];
898b374a
NH
2072 struct fdtable *fdt;
2073 struct coredump_params *cp = (struct coredump_params *)info->data;
2074 struct files_struct *cf = current->files;
e4fad8e5
AV
2075 int err = create_pipe_files(files, 0);
2076 if (err)
2077 return err;
898b374a 2078
e4fad8e5 2079 cp->file = files[1];
898b374a
NH
2080
2081 sys_close(0);
e4fad8e5 2082 fd_install(0, files[0]);
898b374a
NH
2083 spin_lock(&cf->file_lock);
2084 fdt = files_fdtable(cf);
1dce27c5
DH
2085 __set_open_fd(0, fdt);
2086 __clear_close_on_exec(0, fdt);
898b374a
NH
2087 spin_unlock(&cf->file_lock);
2088
2089 /* and disallow core files too */
2090 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
2091
2092 return 0;
2093}
2094
8cd3ac3a 2095void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1da177e4 2096{
9d5b327b 2097 struct core_state core_state;
1b0d300b 2098 struct core_name cn;
1da177e4
LT
2099 struct mm_struct *mm = current->mm;
2100 struct linux_binfmt * binfmt;
d84f4f99
DH
2101 const struct cred *old_cred;
2102 struct cred *cred;
1da177e4 2103 int retval = 0;
d6e71144 2104 int flag = 0;
d5bf4c4f 2105 int ispipe;
9520628e 2106 bool need_nonrelative = false;
a293980c 2107 static atomic_t core_dump_count = ATOMIC_INIT(0);
f6151dfe
MH
2108 struct coredump_params cprm = {
2109 .signr = signr,
2110 .regs = regs,
d554ed89 2111 .limit = rlimit(RLIMIT_CORE),
30736a4d
MH
2112 /*
2113 * We must use the same mm->flags while dumping core to avoid
2114 * inconsistency of bit flags, since this flag is not protected
2115 * by any locks.
2116 */
2117 .mm_flags = mm->flags,
f6151dfe 2118 };
1da177e4 2119
0a4ff8c2
SG
2120 audit_core_dumps(signr);
2121
801460d0 2122 binfmt = mm->binfmt;
1da177e4
LT
2123 if (!binfmt || !binfmt->core_dump)
2124 goto fail;
269b005a
ON
2125 if (!__get_dumpable(cprm.mm_flags))
2126 goto fail;
d84f4f99
DH
2127
2128 cred = prepare_creds();
5e43aef5 2129 if (!cred)
d84f4f99 2130 goto fail;
d6e71144 2131 /*
9520628e
KC
2132 * We cannot trust fsuid as being the "true" uid of the process
2133 * nor do we know its entire history. We only know it was tainted
2134 * so we dump it as root in mode 2, and only into a controlled
2135 * environment (pipe handler or fully qualified path).
d6e71144 2136 */
54b50199 2137 if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
30736a4d 2138 /* Setuid core dump mode */
d6e71144 2139 flag = O_EXCL; /* Stop rewrite attacks */
8e96e3b7 2140 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
9520628e 2141 need_nonrelative = true;
d6e71144 2142 }
1291cf41 2143
9d5b327b 2144 retval = coredump_wait(exit_code, &core_state);
5e43aef5
ON
2145 if (retval < 0)
2146 goto fail_creds;
d84f4f99
DH
2147
2148 old_cred = override_creds(cred);
1da177e4
LT
2149
2150 /*
2151 * Clear any false indication of pending signals that might
2152 * be seen by the filesystem code called to write the core file.
2153 */
1da177e4
LT
2154 clear_thread_flag(TIF_SIGPENDING);
2155
1b0d300b
XF
2156 ispipe = format_corename(&cn, signr);
2157
c4bbafda 2158 if (ispipe) {
d5bf4c4f
ON
2159 int dump_count;
2160 char **helper_argv;
2161
99b64567
ON
2162 if (ispipe < 0) {
2163 printk(KERN_WARNING "format_corename failed\n");
2164 printk(KERN_WARNING "Aborting core\n");
2165 goto fail_corename;
2166 }
2167
898b374a 2168 if (cprm.limit == 1) {
108ceeb0
JZ
2169 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
2170 *
725eae32
NH
2171 * Normally core limits are irrelevant to pipes, since
2172 * we're not writing to the file system, but we use
108ceeb0
JZ
2173 * cprm.limit of 1 here as a speacial value, this is a
2174 * consistent way to catch recursive crashes.
2175 * We can still crash if the core_pattern binary sets
2176 * RLIM_CORE = !1, but it runs as root, and can do
2177 * lots of stupid things.
2178 *
725eae32
NH
2179 * Note that we use task_tgid_vnr here to grab the pid
2180 * of the process group leader. That way we get the
2181 * right pid if a thread in a multi-threaded
2182 * core_pattern process dies.
2183 */
2184 printk(KERN_WARNING
898b374a 2185 "Process %d(%s) has RLIMIT_CORE set to 1\n",
725eae32
NH
2186 task_tgid_vnr(current), current->comm);
2187 printk(KERN_WARNING "Aborting core\n");
2188 goto fail_unlock;
2189 }
d5bf4c4f 2190 cprm.limit = RLIM_INFINITY;
725eae32 2191
a293980c
NH
2192 dump_count = atomic_inc_return(&core_dump_count);
2193 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
2194 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
2195 task_tgid_vnr(current), current->comm);
2196 printk(KERN_WARNING "Skipping core dump\n");
2197 goto fail_dropcount;
2198 }
2199
1b0d300b 2200 helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
350eaf79
TH
2201 if (!helper_argv) {
2202 printk(KERN_WARNING "%s failed to allocate memory\n",
2203 __func__);
a293980c 2204 goto fail_dropcount;
350eaf79 2205 }
32321137 2206
d5bf4c4f
ON
2207 retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
2208 NULL, UMH_WAIT_EXEC, umh_pipe_setup,
2209 NULL, &cprm);
2210 argv_free(helper_argv);
2211 if (retval) {
d025c9db 2212 printk(KERN_INFO "Core dump to %s pipe failed\n",
1b0d300b 2213 cn.corename);
d5bf4c4f 2214 goto close_fail;
d025c9db 2215 }
c7135411
ON
2216 } else {
2217 struct inode *inode;
2218
2219 if (cprm.limit < binfmt->min_coredump)
2220 goto fail_unlock;
2221
9520628e
KC
2222 if (need_nonrelative && cn.corename[0] != '/') {
2223 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
2224 "to fully qualified path!\n",
2225 task_tgid_vnr(current), current->comm);
2226 printk(KERN_WARNING "Skipping core dump\n");
2227 goto fail_unlock;
2228 }
2229
1b0d300b 2230 cprm.file = filp_open(cn.corename,
6d4df677
AD
2231 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
2232 0600);
c7135411
ON
2233 if (IS_ERR(cprm.file))
2234 goto fail_unlock;
1da177e4 2235
c7135411
ON
2236 inode = cprm.file->f_path.dentry->d_inode;
2237 if (inode->i_nlink > 1)
2238 goto close_fail;
2239 if (d_unhashed(cprm.file->f_path.dentry))
2240 goto close_fail;
2241 /*
2242 * AK: actually i see no reason to not allow this for named
2243 * pipes etc, but keep the previous behaviour for now.
2244 */
2245 if (!S_ISREG(inode->i_mode))
2246 goto close_fail;
2247 /*
2248 * Dont allow local users get cute and trick others to coredump
2249 * into their pre-created files.
2250 */
8e96e3b7 2251 if (!uid_eq(inode->i_uid, current_fsuid()))
c7135411
ON
2252 goto close_fail;
2253 if (!cprm.file->f_op || !cprm.file->f_op->write)
2254 goto close_fail;
2255 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
2256 goto close_fail;
2257 }
1da177e4 2258
c7135411 2259 retval = binfmt->core_dump(&cprm);
1da177e4
LT
2260 if (retval)
2261 current->signal->group_exit_code |= 0x80;
d5bf4c4f 2262
61be228a 2263 if (ispipe && core_pipe_limit)
f6151dfe 2264 wait_for_dump_helpers(cprm.file);
d5bf4c4f
ON
2265close_fail:
2266 if (cprm.file)
2267 filp_close(cprm.file, NULL);
a293980c 2268fail_dropcount:
d5bf4c4f 2269 if (ispipe)
a293980c 2270 atomic_dec(&core_dump_count);
1da177e4 2271fail_unlock:
1b0d300b
XF
2272 kfree(cn.corename);
2273fail_corename:
5e43aef5 2274 coredump_finish(mm);
d84f4f99 2275 revert_creds(old_cred);
5e43aef5 2276fail_creds:
d84f4f99 2277 put_cred(cred);
1da177e4 2278fail:
8cd3ac3a 2279 return;
1da177e4 2280}
3aa0ce82
LT
2281
2282/*
2283 * Core dumping helper functions. These are the only things you should
2284 * do on a core-file: use only these functions to write out all the
2285 * necessary info.
2286 */
2287int dump_write(struct file *file, const void *addr, int nr)
2288{
2289 return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
2290}
8fd01d6c 2291EXPORT_SYMBOL(dump_write);
3aa0ce82
LT
2292
2293int dump_seek(struct file *file, loff_t off)
2294{
2295 int ret = 1;
2296
2297 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
2298 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
2299 return 0;
2300 } else {
2301 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
2302
2303 if (!buf)
2304 return 0;
2305 while (off > 0) {
2306 unsigned long n = off;
2307
2308 if (n > PAGE_SIZE)
2309 n = PAGE_SIZE;
2310 if (!dump_write(file, buf, n)) {
2311 ret = 0;
2312 break;
2313 }
2314 off -= n;
2315 }
2316 free_page((unsigned long)buf);
2317 }
2318 return ret;
2319}
8fd01d6c 2320EXPORT_SYMBOL(dump_seek);
282124d1
AV
2321
2322#ifdef __ARCH_WANT_KERNEL_EXECVE
2323int kernel_execve(const char *filename,
2324 const char *const argv[],
2325 const char *const envp[])
2326{
2327 struct pt_regs *p = current_pt_regs();
2328 int ret;
2329
2330 ret = do_execve(filename,
2331 (const char __user *const __user *)argv,
2332 (const char __user *const __user *)envp, p);
2333 if (ret < 0)
2334 return ret;
2335
2336 /*
2337 * We were successful. We won't be returning to our caller, but
2338 * instead to user space by manipulating the kernel stack.
2339 */
2340 ret_from_kernel_execve(p);
2341}
2342#endif