[PATCH] f_count may wrap around
[linux-2.6-block.git] / fs / exec.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * #!-checking implemented by tytso.
9 */
10/*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
1da177e4
LT
25#include <linux/slab.h>
26#include <linux/file.h>
9f3acc31 27#include <linux/fdtable.h>
ba92a43d 28#include <linux/mm.h>
1da177e4
LT
29#include <linux/stat.h>
30#include <linux/fcntl.h>
31#include <linux/smp_lock.h>
ba92a43d 32#include <linux/swap.h>
74aadce9 33#include <linux/string.h>
1da177e4 34#include <linux/init.h>
1da177e4
LT
35#include <linux/highmem.h>
36#include <linux/spinlock.h>
37#include <linux/key.h>
38#include <linux/personality.h>
39#include <linux/binfmts.h>
1da177e4 40#include <linux/utsname.h>
84d73786 41#include <linux/pid_namespace.h>
1da177e4
LT
42#include <linux/module.h>
43#include <linux/namei.h>
44#include <linux/proc_fs.h>
1da177e4
LT
45#include <linux/mount.h>
46#include <linux/security.h>
47#include <linux/syscalls.h>
8f0ab514 48#include <linux/tsacct_kern.h>
9f46080c 49#include <linux/cn_proc.h>
473ae30b 50#include <linux/audit.h>
6341c393 51#include <linux/tracehook.h>
1da177e4
LT
52
53#include <asm/uaccess.h>
54#include <asm/mmu_context.h>
b6a2fea3 55#include <asm/tlb.h>
1da177e4
LT
56
57#ifdef CONFIG_KMOD
58#include <linux/kmod.h>
59#endif
60
702773b1
DW
61#ifdef __alpha__
62/* for /sbin/loader handling in search_binary_handler() */
63#include <linux/a.out.h>
64#endif
65
1da177e4 66int core_uses_pid;
71ce92f3 67char core_pattern[CORENAME_MAX_SIZE] = "core";
d6e71144
AC
68int suid_dumpable = 0;
69
1da177e4
LT
70/* The maximal length of core_pattern is also specified in sysctl.c */
71
e4dc1b14 72static LIST_HEAD(formats);
1da177e4
LT
73static DEFINE_RWLOCK(binfmt_lock);
74
75int register_binfmt(struct linux_binfmt * fmt)
76{
1da177e4
LT
77 if (!fmt)
78 return -EINVAL;
1da177e4 79 write_lock(&binfmt_lock);
e4dc1b14 80 list_add(&fmt->lh, &formats);
1da177e4
LT
81 write_unlock(&binfmt_lock);
82 return 0;
83}
84
85EXPORT_SYMBOL(register_binfmt);
86
f6b450d4 87void unregister_binfmt(struct linux_binfmt * fmt)
1da177e4 88{
1da177e4 89 write_lock(&binfmt_lock);
e4dc1b14 90 list_del(&fmt->lh);
1da177e4 91 write_unlock(&binfmt_lock);
1da177e4
LT
92}
93
94EXPORT_SYMBOL(unregister_binfmt);
95
96static inline void put_binfmt(struct linux_binfmt * fmt)
97{
98 module_put(fmt->module);
99}
100
101/*
102 * Note that a shared library must be both readable and executable due to
103 * security reasons.
104 *
105 * Also note that we take the address to load from from the file itself.
106 */
107asmlinkage long sys_uselib(const char __user * library)
108{
109 struct file * file;
110 struct nameidata nd;
111 int error;
112
b500531e 113 error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
1da177e4
LT
114 if (error)
115 goto out;
116
117 error = -EINVAL;
4ac91378 118 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
1da177e4
LT
119 goto exit;
120
30524472
AV
121 error = -EACCES;
122 if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
123 goto exit;
124
b77b0646 125 error = vfs_permission(&nd, MAY_READ | MAY_EXEC | MAY_OPEN);
1da177e4
LT
126 if (error)
127 goto exit;
128
abe8be3a 129 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
1da177e4
LT
130 error = PTR_ERR(file);
131 if (IS_ERR(file))
132 goto out;
133
134 error = -ENOEXEC;
135 if(file->f_op) {
136 struct linux_binfmt * fmt;
137
138 read_lock(&binfmt_lock);
e4dc1b14 139 list_for_each_entry(fmt, &formats, lh) {
1da177e4
LT
140 if (!fmt->load_shlib)
141 continue;
142 if (!try_module_get(fmt->module))
143 continue;
144 read_unlock(&binfmt_lock);
145 error = fmt->load_shlib(file);
146 read_lock(&binfmt_lock);
147 put_binfmt(fmt);
148 if (error != -ENOEXEC)
149 break;
150 }
151 read_unlock(&binfmt_lock);
152 }
153 fput(file);
154out:
155 return error;
156exit:
834f2a4a 157 release_open_intent(&nd);
1d957f9b 158 path_put(&nd.path);
1da177e4
LT
159 goto out;
160}
161
b6a2fea3
OW
162#ifdef CONFIG_MMU
163
164static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
165 int write)
166{
167 struct page *page;
168 int ret;
169
170#ifdef CONFIG_STACK_GROWSUP
171 if (write) {
172 ret = expand_stack_downwards(bprm->vma, pos);
173 if (ret < 0)
174 return NULL;
175 }
176#endif
177 ret = get_user_pages(current, bprm->mm, pos,
178 1, write, 1, &page, NULL);
179 if (ret <= 0)
180 return NULL;
181
182 if (write) {
b6a2fea3 183 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
a64e715f
LT
184 struct rlimit *rlim;
185
186 /*
187 * We've historically supported up to 32 pages (ARG_MAX)
188 * of argument strings even with small stacks
189 */
190 if (size <= ARG_MAX)
191 return page;
b6a2fea3
OW
192
193 /*
194 * Limit to 1/4-th the stack size for the argv+env strings.
195 * This ensures that:
196 * - the remaining binfmt code will not run out of stack space,
197 * - the program will have a reasonable amount of stack left
198 * to work from.
199 */
a64e715f 200 rlim = current->signal->rlim;
b6a2fea3
OW
201 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
202 put_page(page);
203 return NULL;
204 }
205 }
206
207 return page;
208}
209
210static void put_arg_page(struct page *page)
211{
212 put_page(page);
213}
214
215static void free_arg_page(struct linux_binprm *bprm, int i)
216{
217}
218
219static void free_arg_pages(struct linux_binprm *bprm)
220{
221}
222
223static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
224 struct page *page)
225{
226 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
227}
228
229static int __bprm_mm_init(struct linux_binprm *bprm)
230{
231 int err = -ENOMEM;
232 struct vm_area_struct *vma = NULL;
233 struct mm_struct *mm = bprm->mm;
234
235 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
236 if (!vma)
237 goto err;
238
239 down_write(&mm->mmap_sem);
240 vma->vm_mm = mm;
241
242 /*
243 * Place the stack at the largest stack address the architecture
244 * supports. Later, we'll move this to an appropriate place. We don't
245 * use STACK_TOP because that can depend on attributes which aren't
246 * configured yet.
247 */
248 vma->vm_end = STACK_TOP_MAX;
249 vma->vm_start = vma->vm_end - PAGE_SIZE;
250
251 vma->vm_flags = VM_STACK_FLAGS;
3ed75eb8 252 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
b6a2fea3
OW
253 err = insert_vm_struct(mm, vma);
254 if (err) {
255 up_write(&mm->mmap_sem);
256 goto err;
257 }
258
259 mm->stack_vm = mm->total_vm = 1;
260 up_write(&mm->mmap_sem);
261
262 bprm->p = vma->vm_end - sizeof(void *);
263
264 return 0;
265
266err:
267 if (vma) {
268 bprm->vma = NULL;
269 kmem_cache_free(vm_area_cachep, vma);
270 }
271
272 return err;
273}
274
275static bool valid_arg_len(struct linux_binprm *bprm, long len)
276{
277 return len <= MAX_ARG_STRLEN;
278}
279
280#else
281
282static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
283 int write)
284{
285 struct page *page;
286
287 page = bprm->page[pos / PAGE_SIZE];
288 if (!page && write) {
289 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
290 if (!page)
291 return NULL;
292 bprm->page[pos / PAGE_SIZE] = page;
293 }
294
295 return page;
296}
297
298static void put_arg_page(struct page *page)
299{
300}
301
302static void free_arg_page(struct linux_binprm *bprm, int i)
303{
304 if (bprm->page[i]) {
305 __free_page(bprm->page[i]);
306 bprm->page[i] = NULL;
307 }
308}
309
310static void free_arg_pages(struct linux_binprm *bprm)
311{
312 int i;
313
314 for (i = 0; i < MAX_ARG_PAGES; i++)
315 free_arg_page(bprm, i);
316}
317
318static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
319 struct page *page)
320{
321}
322
323static int __bprm_mm_init(struct linux_binprm *bprm)
324{
325 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
326 return 0;
327}
328
329static bool valid_arg_len(struct linux_binprm *bprm, long len)
330{
331 return len <= bprm->p;
332}
333
334#endif /* CONFIG_MMU */
335
336/*
337 * Create a new mm_struct and populate it with a temporary stack
338 * vm_area_struct. We don't have enough context at this point to set the stack
339 * flags, permissions, and offset, so we use temporary values. We'll update
340 * them later in setup_arg_pages().
341 */
342int bprm_mm_init(struct linux_binprm *bprm)
343{
344 int err;
345 struct mm_struct *mm = NULL;
346
347 bprm->mm = mm = mm_alloc();
348 err = -ENOMEM;
349 if (!mm)
350 goto err;
351
352 err = init_new_context(current, mm);
353 if (err)
354 goto err;
355
356 err = __bprm_mm_init(bprm);
357 if (err)
358 goto err;
359
360 return 0;
361
362err:
363 if (mm) {
364 bprm->mm = NULL;
365 mmdrop(mm);
366 }
367
368 return err;
369}
370
1da177e4
LT
371/*
372 * count() counts the number of strings in array ARGV.
373 */
374static int count(char __user * __user * argv, int max)
375{
376 int i = 0;
377
378 if (argv != NULL) {
379 for (;;) {
380 char __user * p;
381
382 if (get_user(p, argv))
383 return -EFAULT;
384 if (!p)
385 break;
386 argv++;
387 if(++i > max)
388 return -E2BIG;
389 cond_resched();
390 }
391 }
392 return i;
393}
394
395/*
b6a2fea3
OW
396 * 'copy_strings()' copies argument/environment strings from the old
397 * processes's memory to the new process's stack. The call to get_user_pages()
398 * ensures the destination page is created and not swapped out.
1da177e4 399 */
75c96f85
AB
400static int copy_strings(int argc, char __user * __user * argv,
401 struct linux_binprm *bprm)
1da177e4
LT
402{
403 struct page *kmapped_page = NULL;
404 char *kaddr = NULL;
b6a2fea3 405 unsigned long kpos = 0;
1da177e4
LT
406 int ret;
407
408 while (argc-- > 0) {
409 char __user *str;
410 int len;
411 unsigned long pos;
412
413 if (get_user(str, argv+argc) ||
b6a2fea3 414 !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
1da177e4
LT
415 ret = -EFAULT;
416 goto out;
417 }
418
b6a2fea3 419 if (!valid_arg_len(bprm, len)) {
1da177e4
LT
420 ret = -E2BIG;
421 goto out;
422 }
423
b6a2fea3 424 /* We're going to work our way backwords. */
1da177e4 425 pos = bprm->p;
b6a2fea3
OW
426 str += len;
427 bprm->p -= len;
1da177e4
LT
428
429 while (len > 0) {
1da177e4 430 int offset, bytes_to_copy;
1da177e4
LT
431
432 offset = pos % PAGE_SIZE;
b6a2fea3
OW
433 if (offset == 0)
434 offset = PAGE_SIZE;
435
436 bytes_to_copy = offset;
437 if (bytes_to_copy > len)
438 bytes_to_copy = len;
439
440 offset -= bytes_to_copy;
441 pos -= bytes_to_copy;
442 str -= bytes_to_copy;
443 len -= bytes_to_copy;
444
445 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
446 struct page *page;
447
448 page = get_arg_page(bprm, pos, 1);
1da177e4 449 if (!page) {
b6a2fea3 450 ret = -E2BIG;
1da177e4
LT
451 goto out;
452 }
1da177e4 453
b6a2fea3
OW
454 if (kmapped_page) {
455 flush_kernel_dcache_page(kmapped_page);
1da177e4 456 kunmap(kmapped_page);
b6a2fea3
OW
457 put_arg_page(kmapped_page);
458 }
1da177e4
LT
459 kmapped_page = page;
460 kaddr = kmap(kmapped_page);
b6a2fea3
OW
461 kpos = pos & PAGE_MASK;
462 flush_arg_page(bprm, kpos, kmapped_page);
1da177e4 463 }
b6a2fea3 464 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
1da177e4
LT
465 ret = -EFAULT;
466 goto out;
467 }
1da177e4
LT
468 }
469 }
470 ret = 0;
471out:
b6a2fea3
OW
472 if (kmapped_page) {
473 flush_kernel_dcache_page(kmapped_page);
1da177e4 474 kunmap(kmapped_page);
b6a2fea3
OW
475 put_arg_page(kmapped_page);
476 }
1da177e4
LT
477 return ret;
478}
479
480/*
481 * Like copy_strings, but get argv and its values from kernel memory.
482 */
483int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
484{
485 int r;
486 mm_segment_t oldfs = get_fs();
487 set_fs(KERNEL_DS);
488 r = copy_strings(argc, (char __user * __user *)argv, bprm);
489 set_fs(oldfs);
490 return r;
491}
1da177e4
LT
492EXPORT_SYMBOL(copy_strings_kernel);
493
494#ifdef CONFIG_MMU
b6a2fea3 495
1da177e4 496/*
b6a2fea3
OW
497 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
498 * the binfmt code determines where the new stack should reside, we shift it to
499 * its final location. The process proceeds as follows:
1da177e4 500 *
b6a2fea3
OW
501 * 1) Use shift to calculate the new vma endpoints.
502 * 2) Extend vma to cover both the old and new ranges. This ensures the
503 * arguments passed to subsequent functions are consistent.
504 * 3) Move vma's page tables to the new range.
505 * 4) Free up any cleared pgd range.
506 * 5) Shrink the vma to cover only the new range.
1da177e4 507 */
b6a2fea3 508static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
1da177e4
LT
509{
510 struct mm_struct *mm = vma->vm_mm;
b6a2fea3
OW
511 unsigned long old_start = vma->vm_start;
512 unsigned long old_end = vma->vm_end;
513 unsigned long length = old_end - old_start;
514 unsigned long new_start = old_start - shift;
515 unsigned long new_end = old_end - shift;
516 struct mmu_gather *tlb;
1da177e4 517
b6a2fea3 518 BUG_ON(new_start > new_end);
1da177e4 519
b6a2fea3
OW
520 /*
521 * ensure there are no vmas between where we want to go
522 * and where we are
523 */
524 if (vma != find_vma(mm, new_start))
525 return -EFAULT;
526
527 /*
528 * cover the whole range: [new_start, old_end)
529 */
530 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
531
532 /*
533 * move the page tables downwards, on failure we rely on
534 * process cleanup to remove whatever mess we made.
535 */
536 if (length != move_page_tables(vma, old_start,
537 vma, new_start, length))
538 return -ENOMEM;
539
540 lru_add_drain();
541 tlb = tlb_gather_mmu(mm, 0);
542 if (new_end > old_start) {
543 /*
544 * when the old and new regions overlap clear from new_end.
545 */
42b77728 546 free_pgd_range(tlb, new_end, old_end, new_end,
b6a2fea3
OW
547 vma->vm_next ? vma->vm_next->vm_start : 0);
548 } else {
549 /*
550 * otherwise, clean from old_start; this is done to not touch
551 * the address space in [new_end, old_start) some architectures
552 * have constraints on va-space that make this illegal (IA64) -
553 * for the others its just a little faster.
554 */
42b77728 555 free_pgd_range(tlb, old_start, old_end, new_end,
b6a2fea3 556 vma->vm_next ? vma->vm_next->vm_start : 0);
1da177e4 557 }
b6a2fea3
OW
558 tlb_finish_mmu(tlb, new_end, old_end);
559
560 /*
561 * shrink the vma to just the new range.
562 */
563 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
564
565 return 0;
1da177e4
LT
566}
567
568#define EXTRA_STACK_VM_PAGES 20 /* random */
569
b6a2fea3
OW
570/*
571 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
572 * the stack is optionally relocated, and some extra space is added.
573 */
1da177e4
LT
574int setup_arg_pages(struct linux_binprm *bprm,
575 unsigned long stack_top,
576 int executable_stack)
577{
b6a2fea3
OW
578 unsigned long ret;
579 unsigned long stack_shift;
1da177e4 580 struct mm_struct *mm = current->mm;
b6a2fea3
OW
581 struct vm_area_struct *vma = bprm->vma;
582 struct vm_area_struct *prev = NULL;
583 unsigned long vm_flags;
584 unsigned long stack_base;
1da177e4
LT
585
586#ifdef CONFIG_STACK_GROWSUP
1da177e4
LT
587 /* Limit stack size to 1GB */
588 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
589 if (stack_base > (1 << 30))
590 stack_base = 1 << 30;
1da177e4 591
b6a2fea3
OW
592 /* Make sure we didn't let the argument array grow too large. */
593 if (vma->vm_end - vma->vm_start > stack_base)
594 return -ENOMEM;
1da177e4 595
b6a2fea3 596 stack_base = PAGE_ALIGN(stack_top - stack_base);
1da177e4 597
b6a2fea3
OW
598 stack_shift = vma->vm_start - stack_base;
599 mm->arg_start = bprm->p - stack_shift;
600 bprm->p = vma->vm_end - stack_shift;
1da177e4 601#else
b6a2fea3
OW
602 stack_top = arch_align_stack(stack_top);
603 stack_top = PAGE_ALIGN(stack_top);
604 stack_shift = vma->vm_end - stack_top;
605
606 bprm->p -= stack_shift;
1da177e4 607 mm->arg_start = bprm->p;
1da177e4
LT
608#endif
609
1da177e4 610 if (bprm->loader)
b6a2fea3
OW
611 bprm->loader -= stack_shift;
612 bprm->exec -= stack_shift;
1da177e4 613
1da177e4 614 down_write(&mm->mmap_sem);
96a8e13e 615 vm_flags = VM_STACK_FLAGS;
b6a2fea3
OW
616
617 /*
618 * Adjust stack execute permissions; explicitly enable for
619 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
620 * (arch default) otherwise.
621 */
622 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
623 vm_flags |= VM_EXEC;
624 else if (executable_stack == EXSTACK_DISABLE_X)
625 vm_flags &= ~VM_EXEC;
626 vm_flags |= mm->def_flags;
627
628 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
629 vm_flags);
630 if (ret)
631 goto out_unlock;
632 BUG_ON(prev != vma);
633
634 /* Move stack pages down in memory. */
635 if (stack_shift) {
636 ret = shift_arg_pages(vma, stack_shift);
637 if (ret) {
1da177e4 638 up_write(&mm->mmap_sem);
1da177e4
LT
639 return ret;
640 }
1da177e4
LT
641 }
642
b6a2fea3
OW
643#ifdef CONFIG_STACK_GROWSUP
644 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
645#else
646 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
647#endif
648 ret = expand_stack(vma, stack_base);
649 if (ret)
650 ret = -EFAULT;
651
652out_unlock:
1da177e4 653 up_write(&mm->mmap_sem);
1da177e4
LT
654 return 0;
655}
1da177e4
LT
656EXPORT_SYMBOL(setup_arg_pages);
657
1da177e4
LT
658#endif /* CONFIG_MMU */
659
660struct file *open_exec(const char *name)
661{
662 struct nameidata nd;
1da177e4 663 struct file *file;
e56b6a5d 664 int err;
1da177e4 665
e56b6a5d
CH
666 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd,
667 FMODE_READ|FMODE_EXEC);
668 if (err)
669 goto out;
670
671 err = -EACCES;
672 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
673 goto out_path_put;
674
30524472
AV
675 if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
676 goto out_path_put;
677
e56b6a5d
CH
678 err = vfs_permission(&nd, MAY_EXEC | MAY_OPEN);
679 if (err)
680 goto out_path_put;
681
682 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
683 if (IS_ERR(file))
684 return file;
685
686 err = deny_write_access(file);
687 if (err) {
688 fput(file);
689 goto out;
1da177e4 690 }
1da177e4 691
e56b6a5d
CH
692 return file;
693
694 out_path_put:
695 release_open_intent(&nd);
696 path_put(&nd.path);
697 out:
698 return ERR_PTR(err);
699}
1da177e4
LT
700EXPORT_SYMBOL(open_exec);
701
702int kernel_read(struct file *file, unsigned long offset,
703 char *addr, unsigned long count)
704{
705 mm_segment_t old_fs;
706 loff_t pos = offset;
707 int result;
708
709 old_fs = get_fs();
710 set_fs(get_ds());
711 /* The cast to a user pointer is valid due to the set_fs() */
712 result = vfs_read(file, (void __user *)addr, count, &pos);
713 set_fs(old_fs);
714 return result;
715}
716
717EXPORT_SYMBOL(kernel_read);
718
719static int exec_mmap(struct mm_struct *mm)
720{
721 struct task_struct *tsk;
722 struct mm_struct * old_mm, *active_mm;
723
724 /* Notify parent that we're no longer interested in the old VM */
725 tsk = current;
726 old_mm = current->mm;
727 mm_release(tsk, old_mm);
728
729 if (old_mm) {
730 /*
731 * Make sure that if there is a core dump in progress
732 * for the old mm, we get out and die instead of going
733 * through with the exec. We must hold mmap_sem around
999d9fc1 734 * checking core_state and changing tsk->mm.
1da177e4
LT
735 */
736 down_read(&old_mm->mmap_sem);
999d9fc1 737 if (unlikely(old_mm->core_state)) {
1da177e4
LT
738 up_read(&old_mm->mmap_sem);
739 return -EINTR;
740 }
741 }
742 task_lock(tsk);
743 active_mm = tsk->active_mm;
744 tsk->mm = mm;
745 tsk->active_mm = mm;
746 activate_mm(active_mm, mm);
747 task_unlock(tsk);
4cd1a8fc 748 mm_update_next_owner(old_mm);
1da177e4
LT
749 arch_pick_mmap_layout(mm);
750 if (old_mm) {
751 up_read(&old_mm->mmap_sem);
7dddb12c 752 BUG_ON(active_mm != old_mm);
1da177e4
LT
753 mmput(old_mm);
754 return 0;
755 }
756 mmdrop(active_mm);
757 return 0;
758}
759
760/*
761 * This function makes sure the current process has its own signal table,
762 * so that flush_signal_handlers can later reset the handlers without
763 * disturbing other processes. (Other processes might share the signal
764 * table via the CLONE_SIGHAND option to clone().)
765 */
858119e1 766static int de_thread(struct task_struct *tsk)
1da177e4
LT
767{
768 struct signal_struct *sig = tsk->signal;
b2c903b8 769 struct sighand_struct *oldsighand = tsk->sighand;
1da177e4 770 spinlock_t *lock = &oldsighand->siglock;
329f7dba 771 struct task_struct *leader = NULL;
1da177e4
LT
772 int count;
773
aafe6c2a 774 if (thread_group_empty(tsk))
1da177e4
LT
775 goto no_thread_group;
776
777 /*
778 * Kill all other threads in the thread group.
1da177e4 779 */
1da177e4 780 spin_lock_irq(lock);
ed5d2cac 781 if (signal_group_exit(sig)) {
1da177e4
LT
782 /*
783 * Another group action in progress, just
784 * return so that the signal is processed.
785 */
786 spin_unlock_irq(lock);
1da177e4
LT
787 return -EAGAIN;
788 }
ed5d2cac 789 sig->group_exit_task = tsk;
aafe6c2a 790 zap_other_threads(tsk);
1da177e4 791
fea9d175
ON
792 /* Account for the thread group leader hanging around: */
793 count = thread_group_leader(tsk) ? 1 : 2;
6db840fa 794 sig->notify_count = count;
1da177e4 795 while (atomic_read(&sig->count) > count) {
1da177e4
LT
796 __set_current_state(TASK_UNINTERRUPTIBLE);
797 spin_unlock_irq(lock);
798 schedule();
799 spin_lock_irq(lock);
800 }
1da177e4
LT
801 spin_unlock_irq(lock);
802
803 /*
804 * At this point all other threads have exited, all we have to
805 * do is to wait for the thread group leader to become inactive,
806 * and to assume its PID:
807 */
aafe6c2a 808 if (!thread_group_leader(tsk)) {
aafe6c2a 809 leader = tsk->group_leader;
6db840fa 810
2800d8d1 811 sig->notify_count = -1; /* for exit_notify() */
6db840fa
ON
812 for (;;) {
813 write_lock_irq(&tasklist_lock);
814 if (likely(leader->exit_state))
815 break;
816 __set_current_state(TASK_UNINTERRUPTIBLE);
817 write_unlock_irq(&tasklist_lock);
818 schedule();
819 }
1da177e4 820
7a5e873f
ON
821 if (unlikely(task_child_reaper(tsk) == leader))
822 task_active_pid_ns(tsk)->child_reaper = tsk;
f5e90281
RM
823 /*
824 * The only record we have of the real-time age of a
825 * process, regardless of execs it's done, is start_time.
826 * All the past CPU time is accumulated in signal_struct
827 * from sister threads now dead. But in this non-leader
828 * exec, nothing survives from the original leader thread,
829 * whose birth marks the true age of this process now.
830 * When we take on its identity by switching to its PID, we
831 * also take its birthdate (always earlier than our own).
832 */
aafe6c2a 833 tsk->start_time = leader->start_time;
f5e90281 834
bac0abd6
PE
835 BUG_ON(!same_thread_group(leader, tsk));
836 BUG_ON(has_group_leader_pid(tsk));
1da177e4
LT
837 /*
838 * An exec() starts a new thread group with the
839 * TGID of the previous thread group. Rehash the
840 * two threads with a switched PID, and release
841 * the former thread group leader:
842 */
d73d6529
EB
843
844 /* Become a process group leader with the old leader's pid.
c18258c6
EB
845 * The old leader becomes a thread of the this thread group.
846 * Note: The old leader also uses this pid until release_task
d73d6529
EB
847 * is called. Odd but simple and correct.
848 */
aafe6c2a
EB
849 detach_pid(tsk, PIDTYPE_PID);
850 tsk->pid = leader->pid;
3743ca05 851 attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
aafe6c2a
EB
852 transfer_pid(leader, tsk, PIDTYPE_PGID);
853 transfer_pid(leader, tsk, PIDTYPE_SID);
854 list_replace_rcu(&leader->tasks, &tsk->tasks);
1da177e4 855
aafe6c2a
EB
856 tsk->group_leader = tsk;
857 leader->group_leader = tsk;
de12a787 858
aafe6c2a 859 tsk->exit_signal = SIGCHLD;
962b564c
ON
860
861 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
862 leader->exit_state = EXIT_DEAD;
1da177e4
LT
863
864 write_unlock_irq(&tasklist_lock);
ed5d2cac 865 }
1da177e4 866
6db840fa
ON
867 sig->group_exit_task = NULL;
868 sig->notify_count = 0;
1da177e4
LT
869
870no_thread_group:
1da177e4 871 exit_itimers(sig);
cbaffba1 872 flush_itimer_signals();
329f7dba
ON
873 if (leader)
874 release_task(leader);
875
b2c903b8
ON
876 if (atomic_read(&oldsighand->count) != 1) {
877 struct sighand_struct *newsighand;
1da177e4 878 /*
b2c903b8
ON
879 * This ->sighand is shared with the CLONE_SIGHAND
880 * but not CLONE_THREAD task, switch to the new one.
1da177e4 881 */
b2c903b8
ON
882 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
883 if (!newsighand)
884 return -ENOMEM;
885
1da177e4
LT
886 atomic_set(&newsighand->count, 1);
887 memcpy(newsighand->action, oldsighand->action,
888 sizeof(newsighand->action));
889
890 write_lock_irq(&tasklist_lock);
891 spin_lock(&oldsighand->siglock);
aafe6c2a 892 rcu_assign_pointer(tsk->sighand, newsighand);
1da177e4
LT
893 spin_unlock(&oldsighand->siglock);
894 write_unlock_irq(&tasklist_lock);
895
fba2afaa 896 __cleanup_sighand(oldsighand);
1da177e4
LT
897 }
898
aafe6c2a 899 BUG_ON(!thread_group_leader(tsk));
1da177e4
LT
900 return 0;
901}
0840a90d 902
1da177e4
LT
903/*
904 * These functions flushes out all traces of the currently running executable
905 * so that a new one can be started
906 */
858119e1 907static void flush_old_files(struct files_struct * files)
1da177e4
LT
908{
909 long j = -1;
badf1662 910 struct fdtable *fdt;
1da177e4
LT
911
912 spin_lock(&files->file_lock);
913 for (;;) {
914 unsigned long set, i;
915
916 j++;
917 i = j * __NFDBITS;
badf1662 918 fdt = files_fdtable(files);
bbea9f69 919 if (i >= fdt->max_fds)
1da177e4 920 break;
badf1662 921 set = fdt->close_on_exec->fds_bits[j];
1da177e4
LT
922 if (!set)
923 continue;
badf1662 924 fdt->close_on_exec->fds_bits[j] = 0;
1da177e4
LT
925 spin_unlock(&files->file_lock);
926 for ( ; set ; i++,set >>= 1) {
927 if (set & 1) {
928 sys_close(i);
929 }
930 }
931 spin_lock(&files->file_lock);
932
933 }
934 spin_unlock(&files->file_lock);
935}
936
59714d65 937char *get_task_comm(char *buf, struct task_struct *tsk)
1da177e4
LT
938{
939 /* buf must be at least sizeof(tsk->comm) in size */
940 task_lock(tsk);
941 strncpy(buf, tsk->comm, sizeof(tsk->comm));
942 task_unlock(tsk);
59714d65 943 return buf;
1da177e4
LT
944}
945
946void set_task_comm(struct task_struct *tsk, char *buf)
947{
948 task_lock(tsk);
949 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
950 task_unlock(tsk);
951}
952
953int flush_old_exec(struct linux_binprm * bprm)
954{
955 char * name;
956 int i, ch, retval;
1da177e4
LT
957 char tcomm[sizeof(current->comm)];
958
959 /*
960 * Make sure we have a private signal table and that
961 * we are unassociated from the previous thread group.
962 */
963 retval = de_thread(current);
964 if (retval)
965 goto out;
966
925d1c40
MH
967 set_mm_exe_file(bprm->mm, bprm->file);
968
1da177e4
LT
969 /*
970 * Release all of the old mmap stuff
971 */
972 retval = exec_mmap(bprm->mm);
973 if (retval)
fd8328be 974 goto out;
1da177e4
LT
975
976 bprm->mm = NULL; /* We're using it now */
977
978 /* This is the point of no return */
1da177e4
LT
979 current->sas_ss_sp = current->sas_ss_size = 0;
980
981 if (current->euid == current->uid && current->egid == current->gid)
6c5d5238 982 set_dumpable(current->mm, 1);
d6e71144 983 else
6c5d5238 984 set_dumpable(current->mm, suid_dumpable);
d6e71144 985
1da177e4 986 name = bprm->filename;
36772092
PBG
987
988 /* Copies the binary name from after last slash */
1da177e4
LT
989 for (i=0; (ch = *(name++)) != '\0';) {
990 if (ch == '/')
36772092 991 i = 0; /* overwrite what we wrote */
1da177e4
LT
992 else
993 if (i < (sizeof(tcomm) - 1))
994 tcomm[i++] = ch;
995 }
996 tcomm[i] = '\0';
997 set_task_comm(current, tcomm);
998
999 current->flags &= ~PF_RANDOMIZE;
1000 flush_thread();
1001
0551fbd2
BH
1002 /* Set the new mm task size. We have to do that late because it may
1003 * depend on TIF_32BIT which is only updated in flush_thread() on
1004 * some architectures like powerpc
1005 */
1006 current->mm->task_size = TASK_SIZE;
1007
d2d56c5f
MH
1008 if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
1009 suid_keys(current);
1010 set_dumpable(current->mm, suid_dumpable);
1011 current->pdeath_signal = 0;
1012 } else if (file_permission(bprm->file, MAY_READ) ||
1013 (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
1da177e4 1014 suid_keys(current);
6c5d5238 1015 set_dumpable(current->mm, suid_dumpable);
1da177e4
LT
1016 }
1017
1018 /* An exec changes our domain. We are no longer part of the thread
1019 group */
1020
1021 current->self_exec_id++;
1022
1023 flush_signal_handlers(current, 0);
1024 flush_old_files(current->files);
1025
1026 return 0;
1027
1da177e4
LT
1028out:
1029 return retval;
1030}
1031
1032EXPORT_SYMBOL(flush_old_exec);
1033
1034/*
1035 * Fill the binprm structure from the inode.
1036 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1037 */
1038int prepare_binprm(struct linux_binprm *bprm)
1039{
1040 int mode;
0f7fc9e4 1041 struct inode * inode = bprm->file->f_path.dentry->d_inode;
1da177e4
LT
1042 int retval;
1043
1044 mode = inode->i_mode;
1da177e4
LT
1045 if (bprm->file->f_op == NULL)
1046 return -EACCES;
1047
1048 bprm->e_uid = current->euid;
1049 bprm->e_gid = current->egid;
1050
0f7fc9e4 1051 if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1da177e4
LT
1052 /* Set-uid? */
1053 if (mode & S_ISUID) {
1054 current->personality &= ~PER_CLEAR_ON_SETID;
1055 bprm->e_uid = inode->i_uid;
1056 }
1057
1058 /* Set-gid? */
1059 /*
1060 * If setgid is set but no group execute bit then this
1061 * is a candidate for mandatory locking, not a setgid
1062 * executable.
1063 */
1064 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1065 current->personality &= ~PER_CLEAR_ON_SETID;
1066 bprm->e_gid = inode->i_gid;
1067 }
1068 }
1069
1070 /* fill in binprm security blob */
1071 retval = security_bprm_set(bprm);
1072 if (retval)
1073 return retval;
1074
1075 memset(bprm->buf,0,BINPRM_BUF_SIZE);
1076 return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
1077}
1078
1079EXPORT_SYMBOL(prepare_binprm);
1080
858119e1 1081static int unsafe_exec(struct task_struct *p)
1da177e4 1082{
6341c393
RM
1083 int unsafe = tracehook_unsafe_exec(p);
1084
1da177e4
LT
1085 if (atomic_read(&p->fs->count) > 1 ||
1086 atomic_read(&p->files->count) > 1 ||
1087 atomic_read(&p->sighand->count) > 1)
1088 unsafe |= LSM_UNSAFE_SHARE;
1089
1090 return unsafe;
1091}
1092
1093void compute_creds(struct linux_binprm *bprm)
1094{
1095 int unsafe;
1096
d2d56c5f 1097 if (bprm->e_uid != current->uid) {
1da177e4 1098 suid_keys(current);
d2d56c5f
MH
1099 current->pdeath_signal = 0;
1100 }
1da177e4
LT
1101 exec_keys(current);
1102
1103 task_lock(current);
1104 unsafe = unsafe_exec(current);
1105 security_bprm_apply_creds(bprm, unsafe);
1106 task_unlock(current);
1107 security_bprm_post_apply_creds(bprm);
1108}
1da177e4
LT
1109EXPORT_SYMBOL(compute_creds);
1110
4fc75ff4
NP
1111/*
1112 * Arguments are '\0' separated strings found at the location bprm->p
1113 * points to; chop off the first by relocating brpm->p to right after
1114 * the first '\0' encountered.
1115 */
b6a2fea3 1116int remove_arg_zero(struct linux_binprm *bprm)
1da177e4 1117{
b6a2fea3
OW
1118 int ret = 0;
1119 unsigned long offset;
1120 char *kaddr;
1121 struct page *page;
4fc75ff4 1122
b6a2fea3
OW
1123 if (!bprm->argc)
1124 return 0;
1da177e4 1125
b6a2fea3
OW
1126 do {
1127 offset = bprm->p & ~PAGE_MASK;
1128 page = get_arg_page(bprm, bprm->p, 0);
1129 if (!page) {
1130 ret = -EFAULT;
1131 goto out;
1132 }
1133 kaddr = kmap_atomic(page, KM_USER0);
4fc75ff4 1134
b6a2fea3
OW
1135 for (; offset < PAGE_SIZE && kaddr[offset];
1136 offset++, bprm->p++)
1137 ;
4fc75ff4 1138
b6a2fea3
OW
1139 kunmap_atomic(kaddr, KM_USER0);
1140 put_arg_page(page);
4fc75ff4 1141
b6a2fea3
OW
1142 if (offset == PAGE_SIZE)
1143 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1144 } while (offset == PAGE_SIZE);
4fc75ff4 1145
b6a2fea3
OW
1146 bprm->p++;
1147 bprm->argc--;
1148 ret = 0;
4fc75ff4 1149
b6a2fea3
OW
1150out:
1151 return ret;
1da177e4 1152}
1da177e4
LT
1153EXPORT_SYMBOL(remove_arg_zero);
1154
1155/*
1156 * cycle the list of binary formats handler, until one recognizes the image
1157 */
1158int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1159{
1160 int try,retval;
1161 struct linux_binfmt *fmt;
702773b1 1162#ifdef __alpha__
1da177e4
LT
1163 /* handle /sbin/loader.. */
1164 {
1165 struct exec * eh = (struct exec *) bprm->buf;
1166
1167 if (!bprm->loader && eh->fh.f_magic == 0x183 &&
1168 (eh->fh.f_flags & 0x3000) == 0x3000)
1169 {
1170 struct file * file;
1171 unsigned long loader;
1172
1173 allow_write_access(bprm->file);
1174 fput(bprm->file);
1175 bprm->file = NULL;
1176
b6a2fea3 1177 loader = bprm->vma->vm_end - sizeof(void *);
1da177e4
LT
1178
1179 file = open_exec("/sbin/loader");
1180 retval = PTR_ERR(file);
1181 if (IS_ERR(file))
1182 return retval;
1183
1184 /* Remember if the application is TASO. */
1185 bprm->sh_bang = eh->ah.entry < 0x100000000UL;
1186
1187 bprm->file = file;
1188 bprm->loader = loader;
1189 retval = prepare_binprm(bprm);
1190 if (retval<0)
1191 return retval;
1192 /* should call search_binary_handler recursively here,
1193 but it does not matter */
1194 }
1195 }
1196#endif
1197 retval = security_bprm_check(bprm);
1198 if (retval)
1199 return retval;
1200
1201 /* kernel module loader fixup */
1202 /* so we don't try to load run modprobe in kernel space. */
1203 set_fs(USER_DS);
473ae30b
AV
1204
1205 retval = audit_bprm(bprm);
1206 if (retval)
1207 return retval;
1208
1da177e4
LT
1209 retval = -ENOENT;
1210 for (try=0; try<2; try++) {
1211 read_lock(&binfmt_lock);
e4dc1b14 1212 list_for_each_entry(fmt, &formats, lh) {
1da177e4
LT
1213 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1214 if (!fn)
1215 continue;
1216 if (!try_module_get(fmt->module))
1217 continue;
1218 read_unlock(&binfmt_lock);
1219 retval = fn(bprm, regs);
1220 if (retval >= 0) {
6341c393 1221 tracehook_report_exec(fmt, bprm, regs);
1da177e4
LT
1222 put_binfmt(fmt);
1223 allow_write_access(bprm->file);
1224 if (bprm->file)
1225 fput(bprm->file);
1226 bprm->file = NULL;
1227 current->did_exec = 1;
9f46080c 1228 proc_exec_connector(current);
1da177e4
LT
1229 return retval;
1230 }
1231 read_lock(&binfmt_lock);
1232 put_binfmt(fmt);
1233 if (retval != -ENOEXEC || bprm->mm == NULL)
1234 break;
1235 if (!bprm->file) {
1236 read_unlock(&binfmt_lock);
1237 return retval;
1238 }
1239 }
1240 read_unlock(&binfmt_lock);
1241 if (retval != -ENOEXEC || bprm->mm == NULL) {
1242 break;
1243#ifdef CONFIG_KMOD
1244 }else{
1245#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1246 if (printable(bprm->buf[0]) &&
1247 printable(bprm->buf[1]) &&
1248 printable(bprm->buf[2]) &&
1249 printable(bprm->buf[3]))
1250 break; /* -ENOEXEC */
1251 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1252#endif
1253 }
1254 }
1255 return retval;
1256}
1257
1258EXPORT_SYMBOL(search_binary_handler);
1259
08a6fac1
AV
1260void free_bprm(struct linux_binprm *bprm)
1261{
1262 free_arg_pages(bprm);
1263 kfree(bprm);
1264}
1265
1da177e4
LT
1266/*
1267 * sys_execve() executes a new program.
1268 */
1269int do_execve(char * filename,
1270 char __user *__user *argv,
1271 char __user *__user *envp,
1272 struct pt_regs * regs)
1273{
1274 struct linux_binprm *bprm;
1275 struct file *file;
3b125388 1276 struct files_struct *displaced;
1da177e4 1277 int retval;
1da177e4 1278
3b125388 1279 retval = unshare_files(&displaced);
fd8328be
AV
1280 if (retval)
1281 goto out_ret;
1282
1da177e4 1283 retval = -ENOMEM;
11b0b5ab 1284 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1da177e4 1285 if (!bprm)
fd8328be 1286 goto out_files;
1da177e4
LT
1287
1288 file = open_exec(filename);
1289 retval = PTR_ERR(file);
1290 if (IS_ERR(file))
1291 goto out_kfree;
1292
1293 sched_exec();
1294
1da177e4
LT
1295 bprm->file = file;
1296 bprm->filename = filename;
1297 bprm->interp = filename;
1da177e4 1298
b6a2fea3
OW
1299 retval = bprm_mm_init(bprm);
1300 if (retval)
1301 goto out_file;
1da177e4 1302
b6a2fea3 1303 bprm->argc = count(argv, MAX_ARG_STRINGS);
1da177e4
LT
1304 if ((retval = bprm->argc) < 0)
1305 goto out_mm;
1306
b6a2fea3 1307 bprm->envc = count(envp, MAX_ARG_STRINGS);
1da177e4
LT
1308 if ((retval = bprm->envc) < 0)
1309 goto out_mm;
1310
1311 retval = security_bprm_alloc(bprm);
1312 if (retval)
1313 goto out;
1314
1315 retval = prepare_binprm(bprm);
1316 if (retval < 0)
1317 goto out;
1318
1319 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1320 if (retval < 0)
1321 goto out;
1322
1323 bprm->exec = bprm->p;
1324 retval = copy_strings(bprm->envc, envp, bprm);
1325 if (retval < 0)
1326 goto out;
1327
1328 retval = copy_strings(bprm->argc, argv, bprm);
1329 if (retval < 0)
1330 goto out;
1331
7b34e428 1332 current->flags &= ~PF_KTHREAD;
1da177e4
LT
1333 retval = search_binary_handler(bprm,regs);
1334 if (retval >= 0) {
1da177e4
LT
1335 /* execve success */
1336 security_bprm_free(bprm);
1337 acct_update_integrals(current);
08a6fac1 1338 free_bprm(bprm);
3b125388
AV
1339 if (displaced)
1340 put_files_struct(displaced);
1da177e4
LT
1341 return retval;
1342 }
1343
1344out:
1da177e4
LT
1345 if (bprm->security)
1346 security_bprm_free(bprm);
1347
1348out_mm:
1349 if (bprm->mm)
b6a2fea3 1350 mmput (bprm->mm);
1da177e4
LT
1351
1352out_file:
1353 if (bprm->file) {
1354 allow_write_access(bprm->file);
1355 fput(bprm->file);
1356 }
1da177e4 1357out_kfree:
08a6fac1 1358 free_bprm(bprm);
1da177e4 1359
fd8328be 1360out_files:
3b125388
AV
1361 if (displaced)
1362 reset_files_struct(displaced);
1da177e4
LT
1363out_ret:
1364 return retval;
1365}
1366
1367int set_binfmt(struct linux_binfmt *new)
1368{
1369 struct linux_binfmt *old = current->binfmt;
1370
1371 if (new) {
1372 if (!try_module_get(new->module))
1373 return -1;
1374 }
1375 current->binfmt = new;
1376 if (old)
1377 module_put(old->module);
1378 return 0;
1379}
1380
1381EXPORT_SYMBOL(set_binfmt);
1382
1da177e4
LT
1383/* format_corename will inspect the pattern parameter, and output a
1384 * name into corename, which must have space for at least
1385 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1386 */
565b9b14 1387static int format_corename(char *corename, int nr_threads, long signr)
1da177e4 1388{
565b9b14
ON
1389 const char *pat_ptr = core_pattern;
1390 int ispipe = (*pat_ptr == '|');
1da177e4
LT
1391 char *out_ptr = corename;
1392 char *const out_end = corename + CORENAME_MAX_SIZE;
1393 int rc;
1394 int pid_in_pattern = 0;
1395
1396 /* Repeat as long as we have more pattern to process and more output
1397 space */
1398 while (*pat_ptr) {
1399 if (*pat_ptr != '%') {
1400 if (out_ptr == out_end)
1401 goto out;
1402 *out_ptr++ = *pat_ptr++;
1403 } else {
1404 switch (*++pat_ptr) {
1405 case 0:
1406 goto out;
1407 /* Double percent, output one percent */
1408 case '%':
1409 if (out_ptr == out_end)
1410 goto out;
1411 *out_ptr++ = '%';
1412 break;
1413 /* pid */
1414 case 'p':
1415 pid_in_pattern = 1;
1416 rc = snprintf(out_ptr, out_end - out_ptr,
b488893a 1417 "%d", task_tgid_vnr(current));
1da177e4
LT
1418 if (rc > out_end - out_ptr)
1419 goto out;
1420 out_ptr += rc;
1421 break;
1422 /* uid */
1423 case 'u':
1424 rc = snprintf(out_ptr, out_end - out_ptr,
1425 "%d", current->uid);
1426 if (rc > out_end - out_ptr)
1427 goto out;
1428 out_ptr += rc;
1429 break;
1430 /* gid */
1431 case 'g':
1432 rc = snprintf(out_ptr, out_end - out_ptr,
1433 "%d", current->gid);
1434 if (rc > out_end - out_ptr)
1435 goto out;
1436 out_ptr += rc;
1437 break;
1438 /* signal that caused the coredump */
1439 case 's':
1440 rc = snprintf(out_ptr, out_end - out_ptr,
1441 "%ld", signr);
1442 if (rc > out_end - out_ptr)
1443 goto out;
1444 out_ptr += rc;
1445 break;
1446 /* UNIX time of coredump */
1447 case 't': {
1448 struct timeval tv;
1449 do_gettimeofday(&tv);
1450 rc = snprintf(out_ptr, out_end - out_ptr,
1451 "%lu", tv.tv_sec);
1452 if (rc > out_end - out_ptr)
1453 goto out;
1454 out_ptr += rc;
1455 break;
1456 }
1457 /* hostname */
1458 case 'h':
1459 down_read(&uts_sem);
1460 rc = snprintf(out_ptr, out_end - out_ptr,
e9ff3990 1461 "%s", utsname()->nodename);
1da177e4
LT
1462 up_read(&uts_sem);
1463 if (rc > out_end - out_ptr)
1464 goto out;
1465 out_ptr += rc;
1466 break;
1467 /* executable */
1468 case 'e':
1469 rc = snprintf(out_ptr, out_end - out_ptr,
1470 "%s", current->comm);
1471 if (rc > out_end - out_ptr)
1472 goto out;
1473 out_ptr += rc;
1474 break;
74aadce9
NH
1475 /* core limit size */
1476 case 'c':
1477 rc = snprintf(out_ptr, out_end - out_ptr,
1478 "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
1479 if (rc > out_end - out_ptr)
1480 goto out;
1481 out_ptr += rc;
1482 break;
1da177e4
LT
1483 default:
1484 break;
1485 }
1486 ++pat_ptr;
1487 }
1488 }
1489 /* Backward compatibility with core_uses_pid:
1490 *
1491 * If core_pattern does not include a %p (as is the default)
1492 * and core_uses_pid is set, then .%pid will be appended to
c4bbafda
AC
1493 * the filename. Do not do this for piped commands. */
1494 if (!ispipe && !pid_in_pattern
565b9b14 1495 && (core_uses_pid || nr_threads)) {
1da177e4 1496 rc = snprintf(out_ptr, out_end - out_ptr,
b488893a 1497 ".%d", task_tgid_vnr(current));
1da177e4
LT
1498 if (rc > out_end - out_ptr)
1499 goto out;
1500 out_ptr += rc;
1501 }
c4bbafda 1502out:
1da177e4 1503 *out_ptr = 0;
c4bbafda 1504 return ispipe;
1da177e4
LT
1505}
1506
8cd9c249 1507static int zap_process(struct task_struct *start)
aceecc04
ON
1508{
1509 struct task_struct *t;
8cd9c249 1510 int nr = 0;
281de339 1511
d5f70c00
ON
1512 start->signal->flags = SIGNAL_GROUP_EXIT;
1513 start->signal->group_stop_count = 0;
aceecc04
ON
1514
1515 t = start;
1516 do {
1517 if (t != current && t->mm) {
281de339
ON
1518 sigaddset(&t->pending.signal, SIGKILL);
1519 signal_wake_up(t, 1);
8cd9c249 1520 nr++;
aceecc04 1521 }
e4901f92 1522 } while_each_thread(start, t);
8cd9c249
ON
1523
1524 return nr;
aceecc04
ON
1525}
1526
dcf560c5 1527static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
8cd9c249 1528 struct core_state *core_state, int exit_code)
1da177e4
LT
1529{
1530 struct task_struct *g, *p;
5debfa6d 1531 unsigned long flags;
8cd9c249 1532 int nr = -EAGAIN;
dcf560c5
ON
1533
1534 spin_lock_irq(&tsk->sighand->siglock);
ed5d2cac 1535 if (!signal_group_exit(tsk->signal)) {
8cd9c249 1536 mm->core_state = core_state;
dcf560c5 1537 tsk->signal->group_exit_code = exit_code;
8cd9c249 1538 nr = zap_process(tsk);
1da177e4 1539 }
dcf560c5 1540 spin_unlock_irq(&tsk->sighand->siglock);
8cd9c249
ON
1541 if (unlikely(nr < 0))
1542 return nr;
1da177e4 1543
8cd9c249 1544 if (atomic_read(&mm->mm_users) == nr + 1)
5debfa6d 1545 goto done;
e4901f92
ON
1546 /*
1547 * We should find and kill all tasks which use this mm, and we should
999d9fc1 1548 * count them correctly into ->nr_threads. We don't take tasklist
e4901f92
ON
1549 * lock, but this is safe wrt:
1550 *
1551 * fork:
1552 * None of sub-threads can fork after zap_process(leader). All
1553 * processes which were created before this point should be
1554 * visible to zap_threads() because copy_process() adds the new
1555 * process to the tail of init_task.tasks list, and lock/unlock
1556 * of ->siglock provides a memory barrier.
1557 *
1558 * do_exit:
1559 * The caller holds mm->mmap_sem. This means that the task which
1560 * uses this mm can't pass exit_mm(), so it can't exit or clear
1561 * its ->mm.
1562 *
1563 * de_thread:
1564 * It does list_replace_rcu(&leader->tasks, &current->tasks),
1565 * we must see either old or new leader, this does not matter.
1566 * However, it can change p->sighand, so lock_task_sighand(p)
1567 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1568 * it can't fail.
1569 *
1570 * Note also that "g" can be the old leader with ->mm == NULL
1571 * and already unhashed and thus removed from ->thread_group.
1572 * This is OK, __unhash_process()->list_del_rcu() does not
1573 * clear the ->next pointer, we will find the new leader via
1574 * next_thread().
1575 */
7b1c6154 1576 rcu_read_lock();
aceecc04 1577 for_each_process(g) {
5debfa6d
ON
1578 if (g == tsk->group_leader)
1579 continue;
15b9f360
ON
1580 if (g->flags & PF_KTHREAD)
1581 continue;
aceecc04
ON
1582 p = g;
1583 do {
1584 if (p->mm) {
15b9f360 1585 if (unlikely(p->mm == mm)) {
5debfa6d 1586 lock_task_sighand(p, &flags);
8cd9c249 1587 nr += zap_process(p);
5debfa6d
ON
1588 unlock_task_sighand(p, &flags);
1589 }
aceecc04
ON
1590 break;
1591 }
e4901f92 1592 } while_each_thread(g, p);
aceecc04 1593 }
7b1c6154 1594 rcu_read_unlock();
5debfa6d 1595done:
c5f1cc8c 1596 atomic_set(&core_state->nr_threads, nr);
8cd9c249 1597 return nr;
1da177e4
LT
1598}
1599
9d5b327b 1600static int coredump_wait(int exit_code, struct core_state *core_state)
1da177e4 1601{
dcf560c5
ON
1602 struct task_struct *tsk = current;
1603 struct mm_struct *mm = tsk->mm;
dcf560c5 1604 struct completion *vfork_done;
2384f55f 1605 int core_waiters;
1da177e4 1606
9d5b327b 1607 init_completion(&core_state->startup);
b564daf8
ON
1608 core_state->dumper.task = tsk;
1609 core_state->dumper.next = NULL;
9d5b327b 1610 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
2384f55f
ON
1611 up_write(&mm->mmap_sem);
1612
dcf560c5
ON
1613 if (unlikely(core_waiters < 0))
1614 goto fail;
1615
1616 /*
1617 * Make sure nobody is waiting for us to release the VM,
1618 * otherwise we can deadlock when we wait on each other
1619 */
1620 vfork_done = tsk->vfork_done;
1621 if (vfork_done) {
1622 tsk->vfork_done = NULL;
1623 complete(vfork_done);
1624 }
1625
2384f55f 1626 if (core_waiters)
9d5b327b 1627 wait_for_completion(&core_state->startup);
dcf560c5 1628fail:
dcf560c5 1629 return core_waiters;
1da177e4
LT
1630}
1631
a94e2d40
ON
1632static void coredump_finish(struct mm_struct *mm)
1633{
1634 struct core_thread *curr, *next;
1635 struct task_struct *task;
1636
1637 next = mm->core_state->dumper.next;
1638 while ((curr = next) != NULL) {
1639 next = curr->next;
1640 task = curr->task;
1641 /*
1642 * see exit_mm(), curr->task must not see
1643 * ->task == NULL before we read ->next.
1644 */
1645 smp_mb();
1646 curr->task = NULL;
1647 wake_up_process(task);
1648 }
1649
1650 mm->core_state = NULL;
1651}
1652
6c5d5238
KH
1653/*
1654 * set_dumpable converts traditional three-value dumpable to two flags and
1655 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1656 * these bits are not changed atomically. So get_dumpable can observe the
1657 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1658 * return either old dumpable or new one by paying attention to the order of
1659 * modifying the bits.
1660 *
1661 * dumpable | mm->flags (binary)
1662 * old new | initial interim final
1663 * ---------+-----------------------
1664 * 0 1 | 00 01 01
1665 * 0 2 | 00 10(*) 11
1666 * 1 0 | 01 00 00
1667 * 1 2 | 01 11 11
1668 * 2 0 | 11 10(*) 00
1669 * 2 1 | 11 11 01
1670 *
1671 * (*) get_dumpable regards interim value of 10 as 11.
1672 */
1673void set_dumpable(struct mm_struct *mm, int value)
1674{
1675 switch (value) {
1676 case 0:
1677 clear_bit(MMF_DUMPABLE, &mm->flags);
1678 smp_wmb();
1679 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1680 break;
1681 case 1:
1682 set_bit(MMF_DUMPABLE, &mm->flags);
1683 smp_wmb();
1684 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1685 break;
1686 case 2:
1687 set_bit(MMF_DUMP_SECURELY, &mm->flags);
1688 smp_wmb();
1689 set_bit(MMF_DUMPABLE, &mm->flags);
1690 break;
1691 }
1692}
6c5d5238
KH
1693
1694int get_dumpable(struct mm_struct *mm)
1695{
1696 int ret;
1697
1698 ret = mm->flags & 0x3;
1699 return (ret >= 2) ? 2 : ret;
1700}
1701
1da177e4
LT
1702int do_coredump(long signr, int exit_code, struct pt_regs * regs)
1703{
9d5b327b 1704 struct core_state core_state;
1da177e4
LT
1705 char corename[CORENAME_MAX_SIZE + 1];
1706 struct mm_struct *mm = current->mm;
1707 struct linux_binfmt * binfmt;
1708 struct inode * inode;
1709 struct file * file;
1710 int retval = 0;
d6e71144
AC
1711 int fsuid = current->fsuid;
1712 int flag = 0;
d025c9db 1713 int ispipe = 0;
7dc0b22e 1714 unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
74aadce9
NH
1715 char **helper_argv = NULL;
1716 int helper_argc = 0;
1717 char *delimit;
1da177e4 1718
0a4ff8c2
SG
1719 audit_core_dumps(signr);
1720
1da177e4
LT
1721 binfmt = current->binfmt;
1722 if (!binfmt || !binfmt->core_dump)
1723 goto fail;
1724 down_write(&mm->mmap_sem);
00ec99da
RM
1725 /*
1726 * If another thread got here first, or we are not dumpable, bail out.
1727 */
999d9fc1 1728 if (mm->core_state || !get_dumpable(mm)) {
1da177e4
LT
1729 up_write(&mm->mmap_sem);
1730 goto fail;
1731 }
d6e71144
AC
1732
1733 /*
1734 * We cannot trust fsuid as being the "true" uid of the
1735 * process nor do we know its entire history. We only know it
1736 * was tainted so we dump it as root in mode 2.
1737 */
6c5d5238 1738 if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
d6e71144
AC
1739 flag = O_EXCL; /* Stop rewrite attacks */
1740 current->fsuid = 0; /* Dump root private */
1741 }
1291cf41 1742
9d5b327b 1743 retval = coredump_wait(exit_code, &core_state);
dcf560c5 1744 if (retval < 0)
1291cf41 1745 goto fail;
1da177e4
LT
1746
1747 /*
1748 * Clear any false indication of pending signals that might
1749 * be seen by the filesystem code called to write the core file.
1750 */
1da177e4
LT
1751 clear_thread_flag(TIF_SIGPENDING);
1752
1da177e4
LT
1753 /*
1754 * lock_kernel() because format_corename() is controlled by sysctl, which
1755 * uses lock_kernel()
1756 */
1757 lock_kernel();
565b9b14 1758 ispipe = format_corename(corename, retval, signr);
1da177e4 1759 unlock_kernel();
7dc0b22e
NH
1760 /*
1761 * Don't bother to check the RLIMIT_CORE value if core_pattern points
1762 * to a pipe. Since we're not writing directly to the filesystem
1763 * RLIMIT_CORE doesn't really apply, as no actual core file will be
1764 * created unless the pipe reader choses to write out the core file
1765 * at which point file size limits and permissions will be imposed
1766 * as it does with any other process
1767 */
74aadce9 1768 if ((!ispipe) && (core_limit < binfmt->min_coredump))
7dc0b22e
NH
1769 goto fail_unlock;
1770
c4bbafda 1771 if (ispipe) {
74aadce9
NH
1772 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
1773 /* Terminate the string before the first option */
1774 delimit = strchr(corename, ' ');
1775 if (delimit)
1776 *delimit = '\0';
32321137
NH
1777 delimit = strrchr(helper_argv[0], '/');
1778 if (delimit)
1779 delimit++;
1780 else
1781 delimit = helper_argv[0];
1782 if (!strcmp(delimit, current->comm)) {
1783 printk(KERN_NOTICE "Recursive core dump detected, "
1784 "aborting\n");
1785 goto fail_unlock;
1786 }
1787
1788 core_limit = RLIM_INFINITY;
1789
d025c9db 1790 /* SIGPIPE can happen, but it's just never processed */
32321137
NH
1791 if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
1792 &file)) {
d025c9db
AK
1793 printk(KERN_INFO "Core dump to %s pipe failed\n",
1794 corename);
1795 goto fail_unlock;
1796 }
d025c9db
AK
1797 } else
1798 file = filp_open(corename,
6d4df677
AD
1799 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1800 0600);
1da177e4
LT
1801 if (IS_ERR(file))
1802 goto fail_unlock;
0f7fc9e4 1803 inode = file->f_path.dentry->d_inode;
1da177e4
LT
1804 if (inode->i_nlink > 1)
1805 goto close_fail; /* multiple links - don't dump */
0f7fc9e4 1806 if (!ispipe && d_unhashed(file->f_path.dentry))
1da177e4
LT
1807 goto close_fail;
1808
d025c9db
AK
1809 /* AK: actually i see no reason to not allow this for named pipes etc.,
1810 but keep the previous behaviour for now. */
1811 if (!ispipe && !S_ISREG(inode->i_mode))
1da177e4 1812 goto close_fail;
c46f739d
IM
1813 /*
1814 * Dont allow local users get cute and trick others to coredump
1815 * into their pre-created files:
1816 */
1817 if (inode->i_uid != current->fsuid)
1818 goto close_fail;
1da177e4
LT
1819 if (!file->f_op)
1820 goto close_fail;
1821 if (!file->f_op->write)
1822 goto close_fail;
0f7fc9e4 1823 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1da177e4
LT
1824 goto close_fail;
1825
7dc0b22e 1826 retval = binfmt->core_dump(signr, regs, file, core_limit);
1da177e4
LT
1827
1828 if (retval)
1829 current->signal->group_exit_code |= 0x80;
1830close_fail:
1831 filp_close(file, NULL);
1832fail_unlock:
74aadce9
NH
1833 if (helper_argv)
1834 argv_free(helper_argv);
1835
d6e71144 1836 current->fsuid = fsuid;
a94e2d40 1837 coredump_finish(mm);
1da177e4
LT
1838fail:
1839 return retval;
1840}