Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
[linux-2.6-block.git] / fs / exec.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/exec.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * #!-checking implemented by tytso.
9 */
10/*
11 * Demand-loading implemented 01.12.91 - no need to read anything but
12 * the header into memory. The inode of the executable is put into
13 * "current->executable", and page faults do the actual loading. Clean.
14 *
15 * Once more I can proudly say that linux stood up to being changed: it
16 * was less than 2 hours work to get demand-loading completely implemented.
17 *
18 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
19 * current->executable is only used by the procfs. This allows a dispatch
20 * table to check for several different types of binary formats. We keep
21 * trying until we recognize the file or we run out of supported binary
22 * formats.
23 */
24
1da177e4
LT
25#include <linux/slab.h>
26#include <linux/file.h>
9f3acc31 27#include <linux/fdtable.h>
ba92a43d 28#include <linux/mm.h>
1da177e4
LT
29#include <linux/stat.h>
30#include <linux/fcntl.h>
31#include <linux/smp_lock.h>
ba92a43d 32#include <linux/swap.h>
74aadce9 33#include <linux/string.h>
1da177e4 34#include <linux/init.h>
ca5b172b 35#include <linux/pagemap.h>
1da177e4
LT
36#include <linux/highmem.h>
37#include <linux/spinlock.h>
38#include <linux/key.h>
39#include <linux/personality.h>
40#include <linux/binfmts.h>
1da177e4 41#include <linux/utsname.h>
84d73786 42#include <linux/pid_namespace.h>
1da177e4
LT
43#include <linux/module.h>
44#include <linux/namei.h>
45#include <linux/proc_fs.h>
1da177e4
LT
46#include <linux/mount.h>
47#include <linux/security.h>
48#include <linux/syscalls.h>
8f0ab514 49#include <linux/tsacct_kern.h>
9f46080c 50#include <linux/cn_proc.h>
473ae30b 51#include <linux/audit.h>
6341c393 52#include <linux/tracehook.h>
5f4123be 53#include <linux/kmod.h>
6110e3ab 54#include <linux/fsnotify.h>
1da177e4
LT
55
56#include <asm/uaccess.h>
57#include <asm/mmu_context.h>
b6a2fea3 58#include <asm/tlb.h>
a6f76f23 59#include "internal.h"
1da177e4 60
1da177e4 61int core_uses_pid;
71ce92f3 62char core_pattern[CORENAME_MAX_SIZE] = "core";
d6e71144
AC
63int suid_dumpable = 0;
64
1da177e4
LT
65/* The maximal length of core_pattern is also specified in sysctl.c */
66
e4dc1b14 67static LIST_HEAD(formats);
1da177e4
LT
68static DEFINE_RWLOCK(binfmt_lock);
69
70int register_binfmt(struct linux_binfmt * fmt)
71{
1da177e4
LT
72 if (!fmt)
73 return -EINVAL;
1da177e4 74 write_lock(&binfmt_lock);
e4dc1b14 75 list_add(&fmt->lh, &formats);
1da177e4
LT
76 write_unlock(&binfmt_lock);
77 return 0;
78}
79
80EXPORT_SYMBOL(register_binfmt);
81
f6b450d4 82void unregister_binfmt(struct linux_binfmt * fmt)
1da177e4 83{
1da177e4 84 write_lock(&binfmt_lock);
e4dc1b14 85 list_del(&fmt->lh);
1da177e4 86 write_unlock(&binfmt_lock);
1da177e4
LT
87}
88
89EXPORT_SYMBOL(unregister_binfmt);
90
91static inline void put_binfmt(struct linux_binfmt * fmt)
92{
93 module_put(fmt->module);
94}
95
96/*
97 * Note that a shared library must be both readable and executable due to
98 * security reasons.
99 *
100 * Also note that we take the address to load from from the file itself.
101 */
1e7bfb21 102SYSCALL_DEFINE1(uselib, const char __user *, library)
1da177e4 103{
964bd183 104 struct file *file;
1da177e4 105 struct nameidata nd;
964bd183
AV
106 char *tmp = getname(library);
107 int error = PTR_ERR(tmp);
108
109 if (!IS_ERR(tmp)) {
110 error = path_lookup_open(AT_FDCWD, tmp,
111 LOOKUP_FOLLOW, &nd,
112 FMODE_READ|FMODE_EXEC);
113 putname(tmp);
114 }
1da177e4
LT
115 if (error)
116 goto out;
117
118 error = -EINVAL;
4ac91378 119 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
1da177e4
LT
120 goto exit;
121
30524472
AV
122 error = -EACCES;
123 if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
124 goto exit;
125
cb23beb5
CH
126 error = inode_permission(nd.path.dentry->d_inode,
127 MAY_READ | MAY_EXEC | MAY_OPEN);
1da177e4
LT
128 if (error)
129 goto exit;
130
abe8be3a 131 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
1da177e4
LT
132 error = PTR_ERR(file);
133 if (IS_ERR(file))
134 goto out;
135
6110e3ab
EP
136 fsnotify_open(file->f_path.dentry);
137
1da177e4
LT
138 error = -ENOEXEC;
139 if(file->f_op) {
140 struct linux_binfmt * fmt;
141
142 read_lock(&binfmt_lock);
e4dc1b14 143 list_for_each_entry(fmt, &formats, lh) {
1da177e4
LT
144 if (!fmt->load_shlib)
145 continue;
146 if (!try_module_get(fmt->module))
147 continue;
148 read_unlock(&binfmt_lock);
149 error = fmt->load_shlib(file);
150 read_lock(&binfmt_lock);
151 put_binfmt(fmt);
152 if (error != -ENOEXEC)
153 break;
154 }
155 read_unlock(&binfmt_lock);
156 }
157 fput(file);
158out:
159 return error;
160exit:
834f2a4a 161 release_open_intent(&nd);
1d957f9b 162 path_put(&nd.path);
1da177e4
LT
163 goto out;
164}
165
b6a2fea3
OW
166#ifdef CONFIG_MMU
167
168static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
169 int write)
170{
171 struct page *page;
172 int ret;
173
174#ifdef CONFIG_STACK_GROWSUP
175 if (write) {
176 ret = expand_stack_downwards(bprm->vma, pos);
177 if (ret < 0)
178 return NULL;
179 }
180#endif
181 ret = get_user_pages(current, bprm->mm, pos,
182 1, write, 1, &page, NULL);
183 if (ret <= 0)
184 return NULL;
185
186 if (write) {
b6a2fea3 187 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
a64e715f
LT
188 struct rlimit *rlim;
189
190 /*
191 * We've historically supported up to 32 pages (ARG_MAX)
192 * of argument strings even with small stacks
193 */
194 if (size <= ARG_MAX)
195 return page;
b6a2fea3
OW
196
197 /*
198 * Limit to 1/4-th the stack size for the argv+env strings.
199 * This ensures that:
200 * - the remaining binfmt code will not run out of stack space,
201 * - the program will have a reasonable amount of stack left
202 * to work from.
203 */
a64e715f 204 rlim = current->signal->rlim;
b6a2fea3
OW
205 if (size > rlim[RLIMIT_STACK].rlim_cur / 4) {
206 put_page(page);
207 return NULL;
208 }
209 }
210
211 return page;
212}
213
214static void put_arg_page(struct page *page)
215{
216 put_page(page);
217}
218
219static void free_arg_page(struct linux_binprm *bprm, int i)
220{
221}
222
223static void free_arg_pages(struct linux_binprm *bprm)
224{
225}
226
227static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
228 struct page *page)
229{
230 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
231}
232
233static int __bprm_mm_init(struct linux_binprm *bprm)
234{
eaccbfa5 235 int err;
b6a2fea3
OW
236 struct vm_area_struct *vma = NULL;
237 struct mm_struct *mm = bprm->mm;
238
239 bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
240 if (!vma)
eaccbfa5 241 return -ENOMEM;
b6a2fea3
OW
242
243 down_write(&mm->mmap_sem);
244 vma->vm_mm = mm;
245
246 /*
247 * Place the stack at the largest stack address the architecture
248 * supports. Later, we'll move this to an appropriate place. We don't
249 * use STACK_TOP because that can depend on attributes which aren't
250 * configured yet.
251 */
252 vma->vm_end = STACK_TOP_MAX;
253 vma->vm_start = vma->vm_end - PAGE_SIZE;
b6a2fea3 254 vma->vm_flags = VM_STACK_FLAGS;
3ed75eb8 255 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
b6a2fea3 256 err = insert_vm_struct(mm, vma);
eaccbfa5 257 if (err)
b6a2fea3 258 goto err;
b6a2fea3
OW
259
260 mm->stack_vm = mm->total_vm = 1;
261 up_write(&mm->mmap_sem);
b6a2fea3 262 bprm->p = vma->vm_end - sizeof(void *);
b6a2fea3 263 return 0;
b6a2fea3 264err:
eaccbfa5
LFC
265 up_write(&mm->mmap_sem);
266 bprm->vma = NULL;
267 kmem_cache_free(vm_area_cachep, vma);
b6a2fea3
OW
268 return err;
269}
270
271static bool valid_arg_len(struct linux_binprm *bprm, long len)
272{
273 return len <= MAX_ARG_STRLEN;
274}
275
276#else
277
278static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
279 int write)
280{
281 struct page *page;
282
283 page = bprm->page[pos / PAGE_SIZE];
284 if (!page && write) {
285 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
286 if (!page)
287 return NULL;
288 bprm->page[pos / PAGE_SIZE] = page;
289 }
290
291 return page;
292}
293
294static void put_arg_page(struct page *page)
295{
296}
297
298static void free_arg_page(struct linux_binprm *bprm, int i)
299{
300 if (bprm->page[i]) {
301 __free_page(bprm->page[i]);
302 bprm->page[i] = NULL;
303 }
304}
305
306static void free_arg_pages(struct linux_binprm *bprm)
307{
308 int i;
309
310 for (i = 0; i < MAX_ARG_PAGES; i++)
311 free_arg_page(bprm, i);
312}
313
314static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
315 struct page *page)
316{
317}
318
319static int __bprm_mm_init(struct linux_binprm *bprm)
320{
321 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
322 return 0;
323}
324
325static bool valid_arg_len(struct linux_binprm *bprm, long len)
326{
327 return len <= bprm->p;
328}
329
330#endif /* CONFIG_MMU */
331
332/*
333 * Create a new mm_struct and populate it with a temporary stack
334 * vm_area_struct. We don't have enough context at this point to set the stack
335 * flags, permissions, and offset, so we use temporary values. We'll update
336 * them later in setup_arg_pages().
337 */
338int bprm_mm_init(struct linux_binprm *bprm)
339{
340 int err;
341 struct mm_struct *mm = NULL;
342
343 bprm->mm = mm = mm_alloc();
344 err = -ENOMEM;
345 if (!mm)
346 goto err;
347
348 err = init_new_context(current, mm);
349 if (err)
350 goto err;
351
352 err = __bprm_mm_init(bprm);
353 if (err)
354 goto err;
355
356 return 0;
357
358err:
359 if (mm) {
360 bprm->mm = NULL;
361 mmdrop(mm);
362 }
363
364 return err;
365}
366
1da177e4
LT
367/*
368 * count() counts the number of strings in array ARGV.
369 */
370static int count(char __user * __user * argv, int max)
371{
372 int i = 0;
373
374 if (argv != NULL) {
375 for (;;) {
376 char __user * p;
377
378 if (get_user(p, argv))
379 return -EFAULT;
380 if (!p)
381 break;
382 argv++;
362e6663 383 if (i++ >= max)
1da177e4
LT
384 return -E2BIG;
385 cond_resched();
386 }
387 }
388 return i;
389}
390
391/*
b6a2fea3
OW
392 * 'copy_strings()' copies argument/environment strings from the old
393 * processes's memory to the new process's stack. The call to get_user_pages()
394 * ensures the destination page is created and not swapped out.
1da177e4 395 */
75c96f85
AB
396static int copy_strings(int argc, char __user * __user * argv,
397 struct linux_binprm *bprm)
1da177e4
LT
398{
399 struct page *kmapped_page = NULL;
400 char *kaddr = NULL;
b6a2fea3 401 unsigned long kpos = 0;
1da177e4
LT
402 int ret;
403
404 while (argc-- > 0) {
405 char __user *str;
406 int len;
407 unsigned long pos;
408
409 if (get_user(str, argv+argc) ||
b6a2fea3 410 !(len = strnlen_user(str, MAX_ARG_STRLEN))) {
1da177e4
LT
411 ret = -EFAULT;
412 goto out;
413 }
414
b6a2fea3 415 if (!valid_arg_len(bprm, len)) {
1da177e4
LT
416 ret = -E2BIG;
417 goto out;
418 }
419
b6a2fea3 420 /* We're going to work our way backwords. */
1da177e4 421 pos = bprm->p;
b6a2fea3
OW
422 str += len;
423 bprm->p -= len;
1da177e4
LT
424
425 while (len > 0) {
1da177e4 426 int offset, bytes_to_copy;
1da177e4
LT
427
428 offset = pos % PAGE_SIZE;
b6a2fea3
OW
429 if (offset == 0)
430 offset = PAGE_SIZE;
431
432 bytes_to_copy = offset;
433 if (bytes_to_copy > len)
434 bytes_to_copy = len;
435
436 offset -= bytes_to_copy;
437 pos -= bytes_to_copy;
438 str -= bytes_to_copy;
439 len -= bytes_to_copy;
440
441 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
442 struct page *page;
443
444 page = get_arg_page(bprm, pos, 1);
1da177e4 445 if (!page) {
b6a2fea3 446 ret = -E2BIG;
1da177e4
LT
447 goto out;
448 }
1da177e4 449
b6a2fea3
OW
450 if (kmapped_page) {
451 flush_kernel_dcache_page(kmapped_page);
1da177e4 452 kunmap(kmapped_page);
b6a2fea3
OW
453 put_arg_page(kmapped_page);
454 }
1da177e4
LT
455 kmapped_page = page;
456 kaddr = kmap(kmapped_page);
b6a2fea3
OW
457 kpos = pos & PAGE_MASK;
458 flush_arg_page(bprm, kpos, kmapped_page);
1da177e4 459 }
b6a2fea3 460 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
1da177e4
LT
461 ret = -EFAULT;
462 goto out;
463 }
1da177e4
LT
464 }
465 }
466 ret = 0;
467out:
b6a2fea3
OW
468 if (kmapped_page) {
469 flush_kernel_dcache_page(kmapped_page);
1da177e4 470 kunmap(kmapped_page);
b6a2fea3
OW
471 put_arg_page(kmapped_page);
472 }
1da177e4
LT
473 return ret;
474}
475
476/*
477 * Like copy_strings, but get argv and its values from kernel memory.
478 */
479int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
480{
481 int r;
482 mm_segment_t oldfs = get_fs();
483 set_fs(KERNEL_DS);
484 r = copy_strings(argc, (char __user * __user *)argv, bprm);
485 set_fs(oldfs);
486 return r;
487}
1da177e4
LT
488EXPORT_SYMBOL(copy_strings_kernel);
489
490#ifdef CONFIG_MMU
b6a2fea3 491
1da177e4 492/*
b6a2fea3
OW
493 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
494 * the binfmt code determines where the new stack should reside, we shift it to
495 * its final location. The process proceeds as follows:
1da177e4 496 *
b6a2fea3
OW
497 * 1) Use shift to calculate the new vma endpoints.
498 * 2) Extend vma to cover both the old and new ranges. This ensures the
499 * arguments passed to subsequent functions are consistent.
500 * 3) Move vma's page tables to the new range.
501 * 4) Free up any cleared pgd range.
502 * 5) Shrink the vma to cover only the new range.
1da177e4 503 */
b6a2fea3 504static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
1da177e4
LT
505{
506 struct mm_struct *mm = vma->vm_mm;
b6a2fea3
OW
507 unsigned long old_start = vma->vm_start;
508 unsigned long old_end = vma->vm_end;
509 unsigned long length = old_end - old_start;
510 unsigned long new_start = old_start - shift;
511 unsigned long new_end = old_end - shift;
512 struct mmu_gather *tlb;
1da177e4 513
b6a2fea3 514 BUG_ON(new_start > new_end);
1da177e4 515
b6a2fea3
OW
516 /*
517 * ensure there are no vmas between where we want to go
518 * and where we are
519 */
520 if (vma != find_vma(mm, new_start))
521 return -EFAULT;
522
523 /*
524 * cover the whole range: [new_start, old_end)
525 */
526 vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL);
527
528 /*
529 * move the page tables downwards, on failure we rely on
530 * process cleanup to remove whatever mess we made.
531 */
532 if (length != move_page_tables(vma, old_start,
533 vma, new_start, length))
534 return -ENOMEM;
535
536 lru_add_drain();
537 tlb = tlb_gather_mmu(mm, 0);
538 if (new_end > old_start) {
539 /*
540 * when the old and new regions overlap clear from new_end.
541 */
42b77728 542 free_pgd_range(tlb, new_end, old_end, new_end,
b6a2fea3
OW
543 vma->vm_next ? vma->vm_next->vm_start : 0);
544 } else {
545 /*
546 * otherwise, clean from old_start; this is done to not touch
547 * the address space in [new_end, old_start) some architectures
548 * have constraints on va-space that make this illegal (IA64) -
549 * for the others its just a little faster.
550 */
42b77728 551 free_pgd_range(tlb, old_start, old_end, new_end,
b6a2fea3 552 vma->vm_next ? vma->vm_next->vm_start : 0);
1da177e4 553 }
b6a2fea3
OW
554 tlb_finish_mmu(tlb, new_end, old_end);
555
556 /*
557 * shrink the vma to just the new range.
558 */
559 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
560
561 return 0;
1da177e4
LT
562}
563
564#define EXTRA_STACK_VM_PAGES 20 /* random */
565
b6a2fea3
OW
566/*
567 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
568 * the stack is optionally relocated, and some extra space is added.
569 */
1da177e4
LT
570int setup_arg_pages(struct linux_binprm *bprm,
571 unsigned long stack_top,
572 int executable_stack)
573{
b6a2fea3
OW
574 unsigned long ret;
575 unsigned long stack_shift;
1da177e4 576 struct mm_struct *mm = current->mm;
b6a2fea3
OW
577 struct vm_area_struct *vma = bprm->vma;
578 struct vm_area_struct *prev = NULL;
579 unsigned long vm_flags;
580 unsigned long stack_base;
1da177e4
LT
581
582#ifdef CONFIG_STACK_GROWSUP
1da177e4
LT
583 /* Limit stack size to 1GB */
584 stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
585 if (stack_base > (1 << 30))
586 stack_base = 1 << 30;
1da177e4 587
b6a2fea3
OW
588 /* Make sure we didn't let the argument array grow too large. */
589 if (vma->vm_end - vma->vm_start > stack_base)
590 return -ENOMEM;
1da177e4 591
b6a2fea3 592 stack_base = PAGE_ALIGN(stack_top - stack_base);
1da177e4 593
b6a2fea3
OW
594 stack_shift = vma->vm_start - stack_base;
595 mm->arg_start = bprm->p - stack_shift;
596 bprm->p = vma->vm_end - stack_shift;
1da177e4 597#else
b6a2fea3
OW
598 stack_top = arch_align_stack(stack_top);
599 stack_top = PAGE_ALIGN(stack_top);
600 stack_shift = vma->vm_end - stack_top;
601
602 bprm->p -= stack_shift;
1da177e4 603 mm->arg_start = bprm->p;
1da177e4
LT
604#endif
605
1da177e4 606 if (bprm->loader)
b6a2fea3
OW
607 bprm->loader -= stack_shift;
608 bprm->exec -= stack_shift;
1da177e4 609
1da177e4 610 down_write(&mm->mmap_sem);
96a8e13e 611 vm_flags = VM_STACK_FLAGS;
b6a2fea3
OW
612
613 /*
614 * Adjust stack execute permissions; explicitly enable for
615 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
616 * (arch default) otherwise.
617 */
618 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
619 vm_flags |= VM_EXEC;
620 else if (executable_stack == EXSTACK_DISABLE_X)
621 vm_flags &= ~VM_EXEC;
622 vm_flags |= mm->def_flags;
623
624 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
625 vm_flags);
626 if (ret)
627 goto out_unlock;
628 BUG_ON(prev != vma);
629
630 /* Move stack pages down in memory. */
631 if (stack_shift) {
632 ret = shift_arg_pages(vma, stack_shift);
633 if (ret) {
1da177e4 634 up_write(&mm->mmap_sem);
1da177e4
LT
635 return ret;
636 }
1da177e4
LT
637 }
638
b6a2fea3
OW
639#ifdef CONFIG_STACK_GROWSUP
640 stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
641#else
642 stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
643#endif
644 ret = expand_stack(vma, stack_base);
645 if (ret)
646 ret = -EFAULT;
647
648out_unlock:
1da177e4 649 up_write(&mm->mmap_sem);
1da177e4
LT
650 return 0;
651}
1da177e4
LT
652EXPORT_SYMBOL(setup_arg_pages);
653
1da177e4
LT
654#endif /* CONFIG_MMU */
655
656struct file *open_exec(const char *name)
657{
658 struct nameidata nd;
1da177e4 659 struct file *file;
e56b6a5d 660 int err;
1da177e4 661
e56b6a5d
CH
662 err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd,
663 FMODE_READ|FMODE_EXEC);
664 if (err)
665 goto out;
666
667 err = -EACCES;
668 if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
669 goto out_path_put;
670
30524472
AV
671 if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
672 goto out_path_put;
673
cb23beb5 674 err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN);
e56b6a5d
CH
675 if (err)
676 goto out_path_put;
677
678 file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
679 if (IS_ERR(file))
680 return file;
681
6110e3ab
EP
682 fsnotify_open(file->f_path.dentry);
683
e56b6a5d
CH
684 err = deny_write_access(file);
685 if (err) {
686 fput(file);
687 goto out;
1da177e4 688 }
1da177e4 689
e56b6a5d
CH
690 return file;
691
692 out_path_put:
693 release_open_intent(&nd);
694 path_put(&nd.path);
695 out:
696 return ERR_PTR(err);
697}
1da177e4
LT
698EXPORT_SYMBOL(open_exec);
699
700int kernel_read(struct file *file, unsigned long offset,
701 char *addr, unsigned long count)
702{
703 mm_segment_t old_fs;
704 loff_t pos = offset;
705 int result;
706
707 old_fs = get_fs();
708 set_fs(get_ds());
709 /* The cast to a user pointer is valid due to the set_fs() */
710 result = vfs_read(file, (void __user *)addr, count, &pos);
711 set_fs(old_fs);
712 return result;
713}
714
715EXPORT_SYMBOL(kernel_read);
716
717static int exec_mmap(struct mm_struct *mm)
718{
719 struct task_struct *tsk;
720 struct mm_struct * old_mm, *active_mm;
721
722 /* Notify parent that we're no longer interested in the old VM */
723 tsk = current;
724 old_mm = current->mm;
725 mm_release(tsk, old_mm);
726
727 if (old_mm) {
728 /*
729 * Make sure that if there is a core dump in progress
730 * for the old mm, we get out and die instead of going
731 * through with the exec. We must hold mmap_sem around
999d9fc1 732 * checking core_state and changing tsk->mm.
1da177e4
LT
733 */
734 down_read(&old_mm->mmap_sem);
999d9fc1 735 if (unlikely(old_mm->core_state)) {
1da177e4
LT
736 up_read(&old_mm->mmap_sem);
737 return -EINTR;
738 }
739 }
740 task_lock(tsk);
741 active_mm = tsk->active_mm;
742 tsk->mm = mm;
743 tsk->active_mm = mm;
744 activate_mm(active_mm, mm);
745 task_unlock(tsk);
746 arch_pick_mmap_layout(mm);
747 if (old_mm) {
748 up_read(&old_mm->mmap_sem);
7dddb12c 749 BUG_ON(active_mm != old_mm);
31a78f23 750 mm_update_next_owner(old_mm);
1da177e4
LT
751 mmput(old_mm);
752 return 0;
753 }
754 mmdrop(active_mm);
755 return 0;
756}
757
758/*
759 * This function makes sure the current process has its own signal table,
760 * so that flush_signal_handlers can later reset the handlers without
761 * disturbing other processes. (Other processes might share the signal
762 * table via the CLONE_SIGHAND option to clone().)
763 */
858119e1 764static int de_thread(struct task_struct *tsk)
1da177e4
LT
765{
766 struct signal_struct *sig = tsk->signal;
b2c903b8 767 struct sighand_struct *oldsighand = tsk->sighand;
1da177e4
LT
768 spinlock_t *lock = &oldsighand->siglock;
769 int count;
770
aafe6c2a 771 if (thread_group_empty(tsk))
1da177e4
LT
772 goto no_thread_group;
773
774 /*
775 * Kill all other threads in the thread group.
1da177e4 776 */
1da177e4 777 spin_lock_irq(lock);
ed5d2cac 778 if (signal_group_exit(sig)) {
1da177e4
LT
779 /*
780 * Another group action in progress, just
781 * return so that the signal is processed.
782 */
783 spin_unlock_irq(lock);
1da177e4
LT
784 return -EAGAIN;
785 }
ed5d2cac 786 sig->group_exit_task = tsk;
aafe6c2a 787 zap_other_threads(tsk);
1da177e4 788
fea9d175
ON
789 /* Account for the thread group leader hanging around: */
790 count = thread_group_leader(tsk) ? 1 : 2;
6db840fa 791 sig->notify_count = count;
1da177e4 792 while (atomic_read(&sig->count) > count) {
1da177e4
LT
793 __set_current_state(TASK_UNINTERRUPTIBLE);
794 spin_unlock_irq(lock);
795 schedule();
796 spin_lock_irq(lock);
797 }
1da177e4
LT
798 spin_unlock_irq(lock);
799
800 /*
801 * At this point all other threads have exited, all we have to
802 * do is to wait for the thread group leader to become inactive,
803 * and to assume its PID:
804 */
aafe6c2a 805 if (!thread_group_leader(tsk)) {
8187926b 806 struct task_struct *leader = tsk->group_leader;
6db840fa 807
2800d8d1 808 sig->notify_count = -1; /* for exit_notify() */
6db840fa
ON
809 for (;;) {
810 write_lock_irq(&tasklist_lock);
811 if (likely(leader->exit_state))
812 break;
813 __set_current_state(TASK_UNINTERRUPTIBLE);
814 write_unlock_irq(&tasklist_lock);
815 schedule();
816 }
1da177e4 817
f5e90281
RM
818 /*
819 * The only record we have of the real-time age of a
820 * process, regardless of execs it's done, is start_time.
821 * All the past CPU time is accumulated in signal_struct
822 * from sister threads now dead. But in this non-leader
823 * exec, nothing survives from the original leader thread,
824 * whose birth marks the true age of this process now.
825 * When we take on its identity by switching to its PID, we
826 * also take its birthdate (always earlier than our own).
827 */
aafe6c2a 828 tsk->start_time = leader->start_time;
f5e90281 829
bac0abd6
PE
830 BUG_ON(!same_thread_group(leader, tsk));
831 BUG_ON(has_group_leader_pid(tsk));
1da177e4
LT
832 /*
833 * An exec() starts a new thread group with the
834 * TGID of the previous thread group. Rehash the
835 * two threads with a switched PID, and release
836 * the former thread group leader:
837 */
d73d6529
EB
838
839 /* Become a process group leader with the old leader's pid.
c18258c6
EB
840 * The old leader becomes a thread of the this thread group.
841 * Note: The old leader also uses this pid until release_task
d73d6529
EB
842 * is called. Odd but simple and correct.
843 */
aafe6c2a
EB
844 detach_pid(tsk, PIDTYPE_PID);
845 tsk->pid = leader->pid;
3743ca05 846 attach_pid(tsk, PIDTYPE_PID, task_pid(leader));
aafe6c2a
EB
847 transfer_pid(leader, tsk, PIDTYPE_PGID);
848 transfer_pid(leader, tsk, PIDTYPE_SID);
849 list_replace_rcu(&leader->tasks, &tsk->tasks);
1da177e4 850
aafe6c2a
EB
851 tsk->group_leader = tsk;
852 leader->group_leader = tsk;
de12a787 853
aafe6c2a 854 tsk->exit_signal = SIGCHLD;
962b564c
ON
855
856 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
857 leader->exit_state = EXIT_DEAD;
1da177e4 858 write_unlock_irq(&tasklist_lock);
8187926b
ON
859
860 release_task(leader);
ed5d2cac 861 }
1da177e4 862
6db840fa
ON
863 sig->group_exit_task = NULL;
864 sig->notify_count = 0;
1da177e4
LT
865
866no_thread_group:
1da177e4 867 exit_itimers(sig);
cbaffba1 868 flush_itimer_signals();
329f7dba 869
b2c903b8
ON
870 if (atomic_read(&oldsighand->count) != 1) {
871 struct sighand_struct *newsighand;
1da177e4 872 /*
b2c903b8
ON
873 * This ->sighand is shared with the CLONE_SIGHAND
874 * but not CLONE_THREAD task, switch to the new one.
1da177e4 875 */
b2c903b8
ON
876 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
877 if (!newsighand)
878 return -ENOMEM;
879
1da177e4
LT
880 atomic_set(&newsighand->count, 1);
881 memcpy(newsighand->action, oldsighand->action,
882 sizeof(newsighand->action));
883
884 write_lock_irq(&tasklist_lock);
885 spin_lock(&oldsighand->siglock);
aafe6c2a 886 rcu_assign_pointer(tsk->sighand, newsighand);
1da177e4
LT
887 spin_unlock(&oldsighand->siglock);
888 write_unlock_irq(&tasklist_lock);
889
fba2afaa 890 __cleanup_sighand(oldsighand);
1da177e4
LT
891 }
892
aafe6c2a 893 BUG_ON(!thread_group_leader(tsk));
1da177e4
LT
894 return 0;
895}
0840a90d 896
1da177e4
LT
897/*
898 * These functions flushes out all traces of the currently running executable
899 * so that a new one can be started
900 */
858119e1 901static void flush_old_files(struct files_struct * files)
1da177e4
LT
902{
903 long j = -1;
badf1662 904 struct fdtable *fdt;
1da177e4
LT
905
906 spin_lock(&files->file_lock);
907 for (;;) {
908 unsigned long set, i;
909
910 j++;
911 i = j * __NFDBITS;
badf1662 912 fdt = files_fdtable(files);
bbea9f69 913 if (i >= fdt->max_fds)
1da177e4 914 break;
badf1662 915 set = fdt->close_on_exec->fds_bits[j];
1da177e4
LT
916 if (!set)
917 continue;
badf1662 918 fdt->close_on_exec->fds_bits[j] = 0;
1da177e4
LT
919 spin_unlock(&files->file_lock);
920 for ( ; set ; i++,set >>= 1) {
921 if (set & 1) {
922 sys_close(i);
923 }
924 }
925 spin_lock(&files->file_lock);
926
927 }
928 spin_unlock(&files->file_lock);
929}
930
59714d65 931char *get_task_comm(char *buf, struct task_struct *tsk)
1da177e4
LT
932{
933 /* buf must be at least sizeof(tsk->comm) in size */
934 task_lock(tsk);
935 strncpy(buf, tsk->comm, sizeof(tsk->comm));
936 task_unlock(tsk);
59714d65 937 return buf;
1da177e4
LT
938}
939
940void set_task_comm(struct task_struct *tsk, char *buf)
941{
942 task_lock(tsk);
943 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
944 task_unlock(tsk);
945}
946
947int flush_old_exec(struct linux_binprm * bprm)
948{
949 char * name;
950 int i, ch, retval;
1da177e4
LT
951 char tcomm[sizeof(current->comm)];
952
953 /*
954 * Make sure we have a private signal table and that
955 * we are unassociated from the previous thread group.
956 */
957 retval = de_thread(current);
958 if (retval)
959 goto out;
960
925d1c40
MH
961 set_mm_exe_file(bprm->mm, bprm->file);
962
1da177e4
LT
963 /*
964 * Release all of the old mmap stuff
965 */
966 retval = exec_mmap(bprm->mm);
967 if (retval)
fd8328be 968 goto out;
1da177e4
LT
969
970 bprm->mm = NULL; /* We're using it now */
971
972 /* This is the point of no return */
1da177e4
LT
973 current->sas_ss_sp = current->sas_ss_size = 0;
974
da9592ed 975 if (current_euid() == current_uid() && current_egid() == current_gid())
6c5d5238 976 set_dumpable(current->mm, 1);
d6e71144 977 else
6c5d5238 978 set_dumpable(current->mm, suid_dumpable);
d6e71144 979
1da177e4 980 name = bprm->filename;
36772092
PBG
981
982 /* Copies the binary name from after last slash */
1da177e4
LT
983 for (i=0; (ch = *(name++)) != '\0';) {
984 if (ch == '/')
36772092 985 i = 0; /* overwrite what we wrote */
1da177e4
LT
986 else
987 if (i < (sizeof(tcomm) - 1))
988 tcomm[i++] = ch;
989 }
990 tcomm[i] = '\0';
991 set_task_comm(current, tcomm);
992
993 current->flags &= ~PF_RANDOMIZE;
994 flush_thread();
995
0551fbd2
BH
996 /* Set the new mm task size. We have to do that late because it may
997 * depend on TIF_32BIT which is only updated in flush_thread() on
998 * some architectures like powerpc
999 */
1000 current->mm->task_size = TASK_SIZE;
1001
a6f76f23
DH
1002 /* install the new credentials */
1003 if (bprm->cred->uid != current_euid() ||
1004 bprm->cred->gid != current_egid()) {
d2d56c5f
MH
1005 current->pdeath_signal = 0;
1006 } else if (file_permission(bprm->file, MAY_READ) ||
a6f76f23 1007 bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) {
6c5d5238 1008 set_dumpable(current->mm, suid_dumpable);
1da177e4
LT
1009 }
1010
a6f76f23
DH
1011 current->personality &= ~bprm->per_clear;
1012
1da177e4
LT
1013 /* An exec changes our domain. We are no longer part of the thread
1014 group */
1015
1016 current->self_exec_id++;
1017
1018 flush_signal_handlers(current, 0);
1019 flush_old_files(current->files);
1020
1021 return 0;
1022
1da177e4
LT
1023out:
1024 return retval;
1025}
1026
1027EXPORT_SYMBOL(flush_old_exec);
1028
a6f76f23
DH
1029/*
1030 * install the new credentials for this executable
1031 */
1032void install_exec_creds(struct linux_binprm *bprm)
1033{
1034 security_bprm_committing_creds(bprm);
1035
1036 commit_creds(bprm->cred);
1037 bprm->cred = NULL;
1038
1039 /* cred_exec_mutex must be held at least to this point to prevent
1040 * ptrace_attach() from altering our determination of the task's
1041 * credentials; any time after this it may be unlocked */
1042
1043 security_bprm_committed_creds(bprm);
1044}
1045EXPORT_SYMBOL(install_exec_creds);
1046
1047/*
1048 * determine how safe it is to execute the proposed program
1049 * - the caller must hold current->cred_exec_mutex to protect against
1050 * PTRACE_ATTACH
1051 */
0bf2f3ae 1052void check_unsafe_exec(struct linux_binprm *bprm, struct files_struct *files)
a6f76f23 1053{
0bf2f3ae
DH
1054 struct task_struct *p = current, *t;
1055 unsigned long flags;
1056 unsigned n_fs, n_files, n_sighand;
a6f76f23
DH
1057
1058 bprm->unsafe = tracehook_unsafe_exec(p);
1059
0bf2f3ae
DH
1060 n_fs = 1;
1061 n_files = 1;
1062 n_sighand = 1;
1063 lock_task_sighand(p, &flags);
1064 for (t = next_thread(p); t != p; t = next_thread(t)) {
1065 if (t->fs == p->fs)
1066 n_fs++;
1067 if (t->files == files)
1068 n_files++;
1069 n_sighand++;
1070 }
1071
1072 if (atomic_read(&p->fs->count) > n_fs ||
1073 atomic_read(&p->files->count) > n_files ||
1074 atomic_read(&p->sighand->count) > n_sighand)
a6f76f23 1075 bprm->unsafe |= LSM_UNSAFE_SHARE;
0bf2f3ae
DH
1076
1077 unlock_task_sighand(p, &flags);
a6f76f23
DH
1078}
1079
1da177e4
LT
1080/*
1081 * Fill the binprm structure from the inode.
1082 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
a6f76f23
DH
1083 *
1084 * This may be called multiple times for binary chains (scripts for example).
1da177e4
LT
1085 */
1086int prepare_binprm(struct linux_binprm *bprm)
1087{
a6f76f23 1088 umode_t mode;
0f7fc9e4 1089 struct inode * inode = bprm->file->f_path.dentry->d_inode;
1da177e4
LT
1090 int retval;
1091
1092 mode = inode->i_mode;
1da177e4
LT
1093 if (bprm->file->f_op == NULL)
1094 return -EACCES;
1095
a6f76f23
DH
1096 /* clear any previous set[ug]id data from a previous binary */
1097 bprm->cred->euid = current_euid();
1098 bprm->cred->egid = current_egid();
1da177e4 1099
a6f76f23 1100 if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
1da177e4
LT
1101 /* Set-uid? */
1102 if (mode & S_ISUID) {
a6f76f23
DH
1103 bprm->per_clear |= PER_CLEAR_ON_SETID;
1104 bprm->cred->euid = inode->i_uid;
1da177e4
LT
1105 }
1106
1107 /* Set-gid? */
1108 /*
1109 * If setgid is set but no group execute bit then this
1110 * is a candidate for mandatory locking, not a setgid
1111 * executable.
1112 */
1113 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
a6f76f23
DH
1114 bprm->per_clear |= PER_CLEAR_ON_SETID;
1115 bprm->cred->egid = inode->i_gid;
1da177e4
LT
1116 }
1117 }
1118
1119 /* fill in binprm security blob */
a6f76f23 1120 retval = security_bprm_set_creds(bprm);
1da177e4
LT
1121 if (retval)
1122 return retval;
a6f76f23 1123 bprm->cred_prepared = 1;
1da177e4 1124
a6f76f23
DH
1125 memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1126 return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1da177e4
LT
1127}
1128
1129EXPORT_SYMBOL(prepare_binprm);
1130
4fc75ff4
NP
1131/*
1132 * Arguments are '\0' separated strings found at the location bprm->p
1133 * points to; chop off the first by relocating brpm->p to right after
1134 * the first '\0' encountered.
1135 */
b6a2fea3 1136int remove_arg_zero(struct linux_binprm *bprm)
1da177e4 1137{
b6a2fea3
OW
1138 int ret = 0;
1139 unsigned long offset;
1140 char *kaddr;
1141 struct page *page;
4fc75ff4 1142
b6a2fea3
OW
1143 if (!bprm->argc)
1144 return 0;
1da177e4 1145
b6a2fea3
OW
1146 do {
1147 offset = bprm->p & ~PAGE_MASK;
1148 page = get_arg_page(bprm, bprm->p, 0);
1149 if (!page) {
1150 ret = -EFAULT;
1151 goto out;
1152 }
1153 kaddr = kmap_atomic(page, KM_USER0);
4fc75ff4 1154
b6a2fea3
OW
1155 for (; offset < PAGE_SIZE && kaddr[offset];
1156 offset++, bprm->p++)
1157 ;
4fc75ff4 1158
b6a2fea3
OW
1159 kunmap_atomic(kaddr, KM_USER0);
1160 put_arg_page(page);
4fc75ff4 1161
b6a2fea3
OW
1162 if (offset == PAGE_SIZE)
1163 free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1164 } while (offset == PAGE_SIZE);
4fc75ff4 1165
b6a2fea3
OW
1166 bprm->p++;
1167 bprm->argc--;
1168 ret = 0;
4fc75ff4 1169
b6a2fea3
OW
1170out:
1171 return ret;
1da177e4 1172}
1da177e4
LT
1173EXPORT_SYMBOL(remove_arg_zero);
1174
1175/*
1176 * cycle the list of binary formats handler, until one recognizes the image
1177 */
1178int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1179{
85f33466 1180 unsigned int depth = bprm->recursion_depth;
1da177e4
LT
1181 int try,retval;
1182 struct linux_binfmt *fmt;
3bfacef4 1183
1da177e4
LT
1184 retval = security_bprm_check(bprm);
1185 if (retval)
1186 return retval;
1187
1188 /* kernel module loader fixup */
1189 /* so we don't try to load run modprobe in kernel space. */
1190 set_fs(USER_DS);
473ae30b
AV
1191
1192 retval = audit_bprm(bprm);
1193 if (retval)
1194 return retval;
1195
1da177e4
LT
1196 retval = -ENOENT;
1197 for (try=0; try<2; try++) {
1198 read_lock(&binfmt_lock);
e4dc1b14 1199 list_for_each_entry(fmt, &formats, lh) {
1da177e4
LT
1200 int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
1201 if (!fn)
1202 continue;
1203 if (!try_module_get(fmt->module))
1204 continue;
1205 read_unlock(&binfmt_lock);
1206 retval = fn(bprm, regs);
85f33466
RM
1207 /*
1208 * Restore the depth counter to its starting value
1209 * in this call, so we don't have to rely on every
1210 * load_binary function to restore it on return.
1211 */
1212 bprm->recursion_depth = depth;
1da177e4 1213 if (retval >= 0) {
85f33466
RM
1214 if (depth == 0)
1215 tracehook_report_exec(fmt, bprm, regs);
1da177e4
LT
1216 put_binfmt(fmt);
1217 allow_write_access(bprm->file);
1218 if (bprm->file)
1219 fput(bprm->file);
1220 bprm->file = NULL;
1221 current->did_exec = 1;
9f46080c 1222 proc_exec_connector(current);
1da177e4
LT
1223 return retval;
1224 }
1225 read_lock(&binfmt_lock);
1226 put_binfmt(fmt);
1227 if (retval != -ENOEXEC || bprm->mm == NULL)
1228 break;
1229 if (!bprm->file) {
1230 read_unlock(&binfmt_lock);
1231 return retval;
1232 }
1233 }
1234 read_unlock(&binfmt_lock);
1235 if (retval != -ENOEXEC || bprm->mm == NULL) {
1236 break;
5f4123be
JB
1237#ifdef CONFIG_MODULES
1238 } else {
1da177e4
LT
1239#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1240 if (printable(bprm->buf[0]) &&
1241 printable(bprm->buf[1]) &&
1242 printable(bprm->buf[2]) &&
1243 printable(bprm->buf[3]))
1244 break; /* -ENOEXEC */
1245 request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
1246#endif
1247 }
1248 }
1249 return retval;
1250}
1251
1252EXPORT_SYMBOL(search_binary_handler);
1253
08a6fac1
AV
1254void free_bprm(struct linux_binprm *bprm)
1255{
1256 free_arg_pages(bprm);
a6f76f23
DH
1257 if (bprm->cred)
1258 abort_creds(bprm->cred);
08a6fac1
AV
1259 kfree(bprm);
1260}
1261
1da177e4
LT
1262/*
1263 * sys_execve() executes a new program.
1264 */
1265int do_execve(char * filename,
1266 char __user *__user *argv,
1267 char __user *__user *envp,
1268 struct pt_regs * regs)
1269{
1270 struct linux_binprm *bprm;
1271 struct file *file;
3b125388 1272 struct files_struct *displaced;
1da177e4 1273 int retval;
1da177e4 1274
3b125388 1275 retval = unshare_files(&displaced);
fd8328be
AV
1276 if (retval)
1277 goto out_ret;
1278
1da177e4 1279 retval = -ENOMEM;
11b0b5ab 1280 bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1da177e4 1281 if (!bprm)
fd8328be 1282 goto out_files;
1da177e4 1283
a6f76f23
DH
1284 retval = mutex_lock_interruptible(&current->cred_exec_mutex);
1285 if (retval < 0)
1286 goto out_free;
1287
1288 retval = -ENOMEM;
1289 bprm->cred = prepare_exec_creds();
1290 if (!bprm->cred)
1291 goto out_unlock;
0bf2f3ae 1292 check_unsafe_exec(bprm, displaced);
a6f76f23 1293
1da177e4
LT
1294 file = open_exec(filename);
1295 retval = PTR_ERR(file);
1296 if (IS_ERR(file))
a6f76f23 1297 goto out_unlock;
1da177e4
LT
1298
1299 sched_exec();
1300
1da177e4
LT
1301 bprm->file = file;
1302 bprm->filename = filename;
1303 bprm->interp = filename;
1da177e4 1304
b6a2fea3
OW
1305 retval = bprm_mm_init(bprm);
1306 if (retval)
1307 goto out_file;
1da177e4 1308
b6a2fea3 1309 bprm->argc = count(argv, MAX_ARG_STRINGS);
1da177e4 1310 if ((retval = bprm->argc) < 0)
a6f76f23 1311 goto out;
1da177e4 1312
b6a2fea3 1313 bprm->envc = count(envp, MAX_ARG_STRINGS);
1da177e4 1314 if ((retval = bprm->envc) < 0)
1da177e4
LT
1315 goto out;
1316
1317 retval = prepare_binprm(bprm);
1318 if (retval < 0)
1319 goto out;
1320
1321 retval = copy_strings_kernel(1, &bprm->filename, bprm);
1322 if (retval < 0)
1323 goto out;
1324
1325 bprm->exec = bprm->p;
1326 retval = copy_strings(bprm->envc, envp, bprm);
1327 if (retval < 0)
1328 goto out;
1329
1330 retval = copy_strings(bprm->argc, argv, bprm);
1331 if (retval < 0)
1332 goto out;
1333
7b34e428 1334 current->flags &= ~PF_KTHREAD;
1da177e4 1335 retval = search_binary_handler(bprm,regs);
a6f76f23
DH
1336 if (retval < 0)
1337 goto out;
1da177e4 1338
a6f76f23
DH
1339 /* execve succeeded */
1340 mutex_unlock(&current->cred_exec_mutex);
1341 acct_update_integrals(current);
1342 free_bprm(bprm);
1343 if (displaced)
1344 put_files_struct(displaced);
1345 return retval;
1da177e4 1346
a6f76f23 1347out:
1da177e4 1348 if (bprm->mm)
b6a2fea3 1349 mmput (bprm->mm);
1da177e4
LT
1350
1351out_file:
1352 if (bprm->file) {
1353 allow_write_access(bprm->file);
1354 fput(bprm->file);
1355 }
a6f76f23
DH
1356
1357out_unlock:
1358 mutex_unlock(&current->cred_exec_mutex);
1359
1360out_free:
08a6fac1 1361 free_bprm(bprm);
1da177e4 1362
fd8328be 1363out_files:
3b125388
AV
1364 if (displaced)
1365 reset_files_struct(displaced);
1da177e4
LT
1366out_ret:
1367 return retval;
1368}
1369
1370int set_binfmt(struct linux_binfmt *new)
1371{
1372 struct linux_binfmt *old = current->binfmt;
1373
1374 if (new) {
1375 if (!try_module_get(new->module))
1376 return -1;
1377 }
1378 current->binfmt = new;
1379 if (old)
1380 module_put(old->module);
1381 return 0;
1382}
1383
1384EXPORT_SYMBOL(set_binfmt);
1385
1da177e4
LT
1386/* format_corename will inspect the pattern parameter, and output a
1387 * name into corename, which must have space for at least
1388 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
1389 */
6409324b 1390static int format_corename(char *corename, long signr)
1da177e4 1391{
86a264ab 1392 const struct cred *cred = current_cred();
565b9b14
ON
1393 const char *pat_ptr = core_pattern;
1394 int ispipe = (*pat_ptr == '|');
1da177e4
LT
1395 char *out_ptr = corename;
1396 char *const out_end = corename + CORENAME_MAX_SIZE;
1397 int rc;
1398 int pid_in_pattern = 0;
1399
1400 /* Repeat as long as we have more pattern to process and more output
1401 space */
1402 while (*pat_ptr) {
1403 if (*pat_ptr != '%') {
1404 if (out_ptr == out_end)
1405 goto out;
1406 *out_ptr++ = *pat_ptr++;
1407 } else {
1408 switch (*++pat_ptr) {
1409 case 0:
1410 goto out;
1411 /* Double percent, output one percent */
1412 case '%':
1413 if (out_ptr == out_end)
1414 goto out;
1415 *out_ptr++ = '%';
1416 break;
1417 /* pid */
1418 case 'p':
1419 pid_in_pattern = 1;
1420 rc = snprintf(out_ptr, out_end - out_ptr,
b488893a 1421 "%d", task_tgid_vnr(current));
1da177e4
LT
1422 if (rc > out_end - out_ptr)
1423 goto out;
1424 out_ptr += rc;
1425 break;
1426 /* uid */
1427 case 'u':
1428 rc = snprintf(out_ptr, out_end - out_ptr,
86a264ab 1429 "%d", cred->uid);
1da177e4
LT
1430 if (rc > out_end - out_ptr)
1431 goto out;
1432 out_ptr += rc;
1433 break;
1434 /* gid */
1435 case 'g':
1436 rc = snprintf(out_ptr, out_end - out_ptr,
86a264ab 1437 "%d", cred->gid);
1da177e4
LT
1438 if (rc > out_end - out_ptr)
1439 goto out;
1440 out_ptr += rc;
1441 break;
1442 /* signal that caused the coredump */
1443 case 's':
1444 rc = snprintf(out_ptr, out_end - out_ptr,
1445 "%ld", signr);
1446 if (rc > out_end - out_ptr)
1447 goto out;
1448 out_ptr += rc;
1449 break;
1450 /* UNIX time of coredump */
1451 case 't': {
1452 struct timeval tv;
1453 do_gettimeofday(&tv);
1454 rc = snprintf(out_ptr, out_end - out_ptr,
1455 "%lu", tv.tv_sec);
1456 if (rc > out_end - out_ptr)
1457 goto out;
1458 out_ptr += rc;
1459 break;
1460 }
1461 /* hostname */
1462 case 'h':
1463 down_read(&uts_sem);
1464 rc = snprintf(out_ptr, out_end - out_ptr,
e9ff3990 1465 "%s", utsname()->nodename);
1da177e4
LT
1466 up_read(&uts_sem);
1467 if (rc > out_end - out_ptr)
1468 goto out;
1469 out_ptr += rc;
1470 break;
1471 /* executable */
1472 case 'e':
1473 rc = snprintf(out_ptr, out_end - out_ptr,
1474 "%s", current->comm);
1475 if (rc > out_end - out_ptr)
1476 goto out;
1477 out_ptr += rc;
1478 break;
74aadce9
NH
1479 /* core limit size */
1480 case 'c':
1481 rc = snprintf(out_ptr, out_end - out_ptr,
1482 "%lu", current->signal->rlim[RLIMIT_CORE].rlim_cur);
1483 if (rc > out_end - out_ptr)
1484 goto out;
1485 out_ptr += rc;
1486 break;
1da177e4
LT
1487 default:
1488 break;
1489 }
1490 ++pat_ptr;
1491 }
1492 }
1493 /* Backward compatibility with core_uses_pid:
1494 *
1495 * If core_pattern does not include a %p (as is the default)
1496 * and core_uses_pid is set, then .%pid will be appended to
c4bbafda 1497 * the filename. Do not do this for piped commands. */
6409324b 1498 if (!ispipe && !pid_in_pattern && core_uses_pid) {
1da177e4 1499 rc = snprintf(out_ptr, out_end - out_ptr,
b488893a 1500 ".%d", task_tgid_vnr(current));
1da177e4
LT
1501 if (rc > out_end - out_ptr)
1502 goto out;
1503 out_ptr += rc;
1504 }
c4bbafda 1505out:
1da177e4 1506 *out_ptr = 0;
c4bbafda 1507 return ispipe;
1da177e4
LT
1508}
1509
8cd9c249 1510static int zap_process(struct task_struct *start)
aceecc04
ON
1511{
1512 struct task_struct *t;
8cd9c249 1513 int nr = 0;
281de339 1514
d5f70c00
ON
1515 start->signal->flags = SIGNAL_GROUP_EXIT;
1516 start->signal->group_stop_count = 0;
aceecc04
ON
1517
1518 t = start;
1519 do {
1520 if (t != current && t->mm) {
281de339
ON
1521 sigaddset(&t->pending.signal, SIGKILL);
1522 signal_wake_up(t, 1);
8cd9c249 1523 nr++;
aceecc04 1524 }
e4901f92 1525 } while_each_thread(start, t);
8cd9c249
ON
1526
1527 return nr;
aceecc04
ON
1528}
1529
dcf560c5 1530static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
8cd9c249 1531 struct core_state *core_state, int exit_code)
1da177e4
LT
1532{
1533 struct task_struct *g, *p;
5debfa6d 1534 unsigned long flags;
8cd9c249 1535 int nr = -EAGAIN;
dcf560c5
ON
1536
1537 spin_lock_irq(&tsk->sighand->siglock);
ed5d2cac 1538 if (!signal_group_exit(tsk->signal)) {
8cd9c249 1539 mm->core_state = core_state;
dcf560c5 1540 tsk->signal->group_exit_code = exit_code;
8cd9c249 1541 nr = zap_process(tsk);
1da177e4 1542 }
dcf560c5 1543 spin_unlock_irq(&tsk->sighand->siglock);
8cd9c249
ON
1544 if (unlikely(nr < 0))
1545 return nr;
1da177e4 1546
8cd9c249 1547 if (atomic_read(&mm->mm_users) == nr + 1)
5debfa6d 1548 goto done;
e4901f92
ON
1549 /*
1550 * We should find and kill all tasks which use this mm, and we should
999d9fc1 1551 * count them correctly into ->nr_threads. We don't take tasklist
e4901f92
ON
1552 * lock, but this is safe wrt:
1553 *
1554 * fork:
1555 * None of sub-threads can fork after zap_process(leader). All
1556 * processes which were created before this point should be
1557 * visible to zap_threads() because copy_process() adds the new
1558 * process to the tail of init_task.tasks list, and lock/unlock
1559 * of ->siglock provides a memory barrier.
1560 *
1561 * do_exit:
1562 * The caller holds mm->mmap_sem. This means that the task which
1563 * uses this mm can't pass exit_mm(), so it can't exit or clear
1564 * its ->mm.
1565 *
1566 * de_thread:
1567 * It does list_replace_rcu(&leader->tasks, &current->tasks),
1568 * we must see either old or new leader, this does not matter.
1569 * However, it can change p->sighand, so lock_task_sighand(p)
1570 * must be used. Since p->mm != NULL and we hold ->mmap_sem
1571 * it can't fail.
1572 *
1573 * Note also that "g" can be the old leader with ->mm == NULL
1574 * and already unhashed and thus removed from ->thread_group.
1575 * This is OK, __unhash_process()->list_del_rcu() does not
1576 * clear the ->next pointer, we will find the new leader via
1577 * next_thread().
1578 */
7b1c6154 1579 rcu_read_lock();
aceecc04 1580 for_each_process(g) {
5debfa6d
ON
1581 if (g == tsk->group_leader)
1582 continue;
15b9f360
ON
1583 if (g->flags & PF_KTHREAD)
1584 continue;
aceecc04
ON
1585 p = g;
1586 do {
1587 if (p->mm) {
15b9f360 1588 if (unlikely(p->mm == mm)) {
5debfa6d 1589 lock_task_sighand(p, &flags);
8cd9c249 1590 nr += zap_process(p);
5debfa6d
ON
1591 unlock_task_sighand(p, &flags);
1592 }
aceecc04
ON
1593 break;
1594 }
e4901f92 1595 } while_each_thread(g, p);
aceecc04 1596 }
7b1c6154 1597 rcu_read_unlock();
5debfa6d 1598done:
c5f1cc8c 1599 atomic_set(&core_state->nr_threads, nr);
8cd9c249 1600 return nr;
1da177e4
LT
1601}
1602
9d5b327b 1603static int coredump_wait(int exit_code, struct core_state *core_state)
1da177e4 1604{
dcf560c5
ON
1605 struct task_struct *tsk = current;
1606 struct mm_struct *mm = tsk->mm;
dcf560c5 1607 struct completion *vfork_done;
2384f55f 1608 int core_waiters;
1da177e4 1609
9d5b327b 1610 init_completion(&core_state->startup);
b564daf8
ON
1611 core_state->dumper.task = tsk;
1612 core_state->dumper.next = NULL;
9d5b327b 1613 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
2384f55f
ON
1614 up_write(&mm->mmap_sem);
1615
dcf560c5
ON
1616 if (unlikely(core_waiters < 0))
1617 goto fail;
1618
1619 /*
1620 * Make sure nobody is waiting for us to release the VM,
1621 * otherwise we can deadlock when we wait on each other
1622 */
1623 vfork_done = tsk->vfork_done;
1624 if (vfork_done) {
1625 tsk->vfork_done = NULL;
1626 complete(vfork_done);
1627 }
1628
2384f55f 1629 if (core_waiters)
9d5b327b 1630 wait_for_completion(&core_state->startup);
dcf560c5 1631fail:
dcf560c5 1632 return core_waiters;
1da177e4
LT
1633}
1634
a94e2d40
ON
1635static void coredump_finish(struct mm_struct *mm)
1636{
1637 struct core_thread *curr, *next;
1638 struct task_struct *task;
1639
1640 next = mm->core_state->dumper.next;
1641 while ((curr = next) != NULL) {
1642 next = curr->next;
1643 task = curr->task;
1644 /*
1645 * see exit_mm(), curr->task must not see
1646 * ->task == NULL before we read ->next.
1647 */
1648 smp_mb();
1649 curr->task = NULL;
1650 wake_up_process(task);
1651 }
1652
1653 mm->core_state = NULL;
1654}
1655
6c5d5238
KH
1656/*
1657 * set_dumpable converts traditional three-value dumpable to two flags and
1658 * stores them into mm->flags. It modifies lower two bits of mm->flags, but
1659 * these bits are not changed atomically. So get_dumpable can observe the
1660 * intermediate state. To avoid doing unexpected behavior, get get_dumpable
1661 * return either old dumpable or new one by paying attention to the order of
1662 * modifying the bits.
1663 *
1664 * dumpable | mm->flags (binary)
1665 * old new | initial interim final
1666 * ---------+-----------------------
1667 * 0 1 | 00 01 01
1668 * 0 2 | 00 10(*) 11
1669 * 1 0 | 01 00 00
1670 * 1 2 | 01 11 11
1671 * 2 0 | 11 10(*) 00
1672 * 2 1 | 11 11 01
1673 *
1674 * (*) get_dumpable regards interim value of 10 as 11.
1675 */
1676void set_dumpable(struct mm_struct *mm, int value)
1677{
1678 switch (value) {
1679 case 0:
1680 clear_bit(MMF_DUMPABLE, &mm->flags);
1681 smp_wmb();
1682 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1683 break;
1684 case 1:
1685 set_bit(MMF_DUMPABLE, &mm->flags);
1686 smp_wmb();
1687 clear_bit(MMF_DUMP_SECURELY, &mm->flags);
1688 break;
1689 case 2:
1690 set_bit(MMF_DUMP_SECURELY, &mm->flags);
1691 smp_wmb();
1692 set_bit(MMF_DUMPABLE, &mm->flags);
1693 break;
1694 }
1695}
6c5d5238
KH
1696
1697int get_dumpable(struct mm_struct *mm)
1698{
1699 int ret;
1700
1701 ret = mm->flags & 0x3;
1702 return (ret >= 2) ? 2 : ret;
1703}
1704
8cd3ac3a 1705void do_coredump(long signr, int exit_code, struct pt_regs *regs)
1da177e4 1706{
9d5b327b 1707 struct core_state core_state;
1da177e4
LT
1708 char corename[CORENAME_MAX_SIZE + 1];
1709 struct mm_struct *mm = current->mm;
1710 struct linux_binfmt * binfmt;
1711 struct inode * inode;
1712 struct file * file;
d84f4f99
DH
1713 const struct cred *old_cred;
1714 struct cred *cred;
1da177e4 1715 int retval = 0;
d6e71144 1716 int flag = 0;
d025c9db 1717 int ispipe = 0;
7dc0b22e 1718 unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
74aadce9
NH
1719 char **helper_argv = NULL;
1720 int helper_argc = 0;
1721 char *delimit;
1da177e4 1722
0a4ff8c2
SG
1723 audit_core_dumps(signr);
1724
1da177e4
LT
1725 binfmt = current->binfmt;
1726 if (!binfmt || !binfmt->core_dump)
1727 goto fail;
d84f4f99
DH
1728
1729 cred = prepare_creds();
1730 if (!cred) {
1731 retval = -ENOMEM;
1732 goto fail;
1733 }
1734
1da177e4 1735 down_write(&mm->mmap_sem);
00ec99da
RM
1736 /*
1737 * If another thread got here first, or we are not dumpable, bail out.
1738 */
999d9fc1 1739 if (mm->core_state || !get_dumpable(mm)) {
1da177e4 1740 up_write(&mm->mmap_sem);
d84f4f99 1741 put_cred(cred);
1da177e4
LT
1742 goto fail;
1743 }
d6e71144
AC
1744
1745 /*
1746 * We cannot trust fsuid as being the "true" uid of the
1747 * process nor do we know its entire history. We only know it
1748 * was tainted so we dump it as root in mode 2.
1749 */
6c5d5238 1750 if (get_dumpable(mm) == 2) { /* Setuid core dump mode */
d6e71144 1751 flag = O_EXCL; /* Stop rewrite attacks */
d84f4f99 1752 cred->fsuid = 0; /* Dump root private */
d6e71144 1753 }
1291cf41 1754
9d5b327b 1755 retval = coredump_wait(exit_code, &core_state);
d84f4f99
DH
1756 if (retval < 0) {
1757 put_cred(cred);
1291cf41 1758 goto fail;
d84f4f99
DH
1759 }
1760
1761 old_cred = override_creds(cred);
1da177e4
LT
1762
1763 /*
1764 * Clear any false indication of pending signals that might
1765 * be seen by the filesystem code called to write the core file.
1766 */
1da177e4
LT
1767 clear_thread_flag(TIF_SIGPENDING);
1768
1da177e4
LT
1769 /*
1770 * lock_kernel() because format_corename() is controlled by sysctl, which
1771 * uses lock_kernel()
1772 */
1773 lock_kernel();
6409324b 1774 ispipe = format_corename(corename, signr);
1da177e4 1775 unlock_kernel();
7dc0b22e
NH
1776 /*
1777 * Don't bother to check the RLIMIT_CORE value if core_pattern points
1778 * to a pipe. Since we're not writing directly to the filesystem
1779 * RLIMIT_CORE doesn't really apply, as no actual core file will be
1780 * created unless the pipe reader choses to write out the core file
1781 * at which point file size limits and permissions will be imposed
1782 * as it does with any other process
1783 */
74aadce9 1784 if ((!ispipe) && (core_limit < binfmt->min_coredump))
7dc0b22e
NH
1785 goto fail_unlock;
1786
c4bbafda 1787 if (ispipe) {
74aadce9 1788 helper_argv = argv_split(GFP_KERNEL, corename+1, &helper_argc);
350eaf79
TH
1789 if (!helper_argv) {
1790 printk(KERN_WARNING "%s failed to allocate memory\n",
1791 __func__);
1792 goto fail_unlock;
1793 }
74aadce9
NH
1794 /* Terminate the string before the first option */
1795 delimit = strchr(corename, ' ');
1796 if (delimit)
1797 *delimit = '\0';
32321137
NH
1798 delimit = strrchr(helper_argv[0], '/');
1799 if (delimit)
1800 delimit++;
1801 else
1802 delimit = helper_argv[0];
1803 if (!strcmp(delimit, current->comm)) {
1804 printk(KERN_NOTICE "Recursive core dump detected, "
1805 "aborting\n");
1806 goto fail_unlock;
1807 }
1808
1809 core_limit = RLIM_INFINITY;
1810
d025c9db 1811 /* SIGPIPE can happen, but it's just never processed */
32321137
NH
1812 if (call_usermodehelper_pipe(corename+1, helper_argv, NULL,
1813 &file)) {
d025c9db
AK
1814 printk(KERN_INFO "Core dump to %s pipe failed\n",
1815 corename);
1816 goto fail_unlock;
1817 }
d025c9db
AK
1818 } else
1819 file = filp_open(corename,
6d4df677
AD
1820 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
1821 0600);
1da177e4
LT
1822 if (IS_ERR(file))
1823 goto fail_unlock;
0f7fc9e4 1824 inode = file->f_path.dentry->d_inode;
1da177e4
LT
1825 if (inode->i_nlink > 1)
1826 goto close_fail; /* multiple links - don't dump */
0f7fc9e4 1827 if (!ispipe && d_unhashed(file->f_path.dentry))
1da177e4
LT
1828 goto close_fail;
1829
d025c9db
AK
1830 /* AK: actually i see no reason to not allow this for named pipes etc.,
1831 but keep the previous behaviour for now. */
1832 if (!ispipe && !S_ISREG(inode->i_mode))
1da177e4 1833 goto close_fail;
c46f739d
IM
1834 /*
1835 * Dont allow local users get cute and trick others to coredump
1836 * into their pre-created files:
1837 */
da9592ed 1838 if (inode->i_uid != current_fsuid())
c46f739d 1839 goto close_fail;
1da177e4
LT
1840 if (!file->f_op)
1841 goto close_fail;
1842 if (!file->f_op->write)
1843 goto close_fail;
0f7fc9e4 1844 if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
1da177e4
LT
1845 goto close_fail;
1846
7dc0b22e 1847 retval = binfmt->core_dump(signr, regs, file, core_limit);
1da177e4
LT
1848
1849 if (retval)
1850 current->signal->group_exit_code |= 0x80;
1851close_fail:
1852 filp_close(file, NULL);
1853fail_unlock:
74aadce9
NH
1854 if (helper_argv)
1855 argv_free(helper_argv);
1856
d84f4f99
DH
1857 revert_creds(old_cred);
1858 put_cred(cred);
a94e2d40 1859 coredump_finish(mm);
1da177e4 1860fail:
8cd3ac3a 1861 return;
1da177e4 1862}