Task Control Groups: add procfs interface
[linux-2.6-block.git] / kernel / fork.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */
13
1da177e4
LT
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/unistd.h>
1da177e4
LT
17#include <linux/module.h>
18#include <linux/vmalloc.h>
19#include <linux/completion.h>
6b3286ed 20#include <linux/mnt_namespace.h>
1da177e4
LT
21#include <linux/personality.h>
22#include <linux/mempolicy.h>
23#include <linux/sem.h>
24#include <linux/file.h>
25#include <linux/key.h>
26#include <linux/binfmts.h>
27#include <linux/mman.h>
28#include <linux/fs.h>
ab516013 29#include <linux/nsproxy.h>
c59ede7b 30#include <linux/capability.h>
1da177e4
LT
31#include <linux/cpu.h>
32#include <linux/cpuset.h>
b4f48b63 33#include <linux/cgroup.h>
1da177e4
LT
34#include <linux/security.h>
35#include <linux/swap.h>
36#include <linux/syscalls.h>
37#include <linux/jiffies.h>
38#include <linux/futex.h>
7c3ab738 39#include <linux/task_io_accounting_ops.h>
ab2af1f5 40#include <linux/rcupdate.h>
1da177e4
LT
41#include <linux/ptrace.h>
42#include <linux/mount.h>
43#include <linux/audit.h>
44#include <linux/profile.h>
45#include <linux/rmap.h>
46#include <linux/acct.h>
8f0ab514 47#include <linux/tsacct_kern.h>
9f46080c 48#include <linux/cn_proc.h>
ba96a0c8 49#include <linux/freezer.h>
ca74e92b 50#include <linux/delayacct.h>
ad4ecbcb 51#include <linux/taskstats_kern.h>
0a425405 52#include <linux/random.h>
522ed776 53#include <linux/tty.h>
1da177e4
LT
54
55#include <asm/pgtable.h>
56#include <asm/pgalloc.h>
57#include <asm/uaccess.h>
58#include <asm/mmu_context.h>
59#include <asm/cacheflush.h>
60#include <asm/tlbflush.h>
61
62/*
63 * Protected counters by write_lock_irq(&tasklist_lock)
64 */
65unsigned long total_forks; /* Handle normal Linux uptimes. */
66int nr_threads; /* The idle threads do not count.. */
67
68int max_threads; /* tunable limit on nr_threads */
69
70DEFINE_PER_CPU(unsigned long, process_counts) = 0;
71
c59923a1 72__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
1da177e4
LT
73
74int nr_processes(void)
75{
76 int cpu;
77 int total = 0;
78
79 for_each_online_cpu(cpu)
80 total += per_cpu(process_counts, cpu);
81
82 return total;
83}
84
85#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
86# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
87# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
e18b890b 88static struct kmem_cache *task_struct_cachep;
1da177e4
LT
89#endif
90
91/* SLAB cache for signal_struct structures (tsk->signal) */
e18b890b 92static struct kmem_cache *signal_cachep;
1da177e4
LT
93
94/* SLAB cache for sighand_struct structures (tsk->sighand) */
e18b890b 95struct kmem_cache *sighand_cachep;
1da177e4
LT
96
97/* SLAB cache for files_struct structures (tsk->files) */
e18b890b 98struct kmem_cache *files_cachep;
1da177e4
LT
99
100/* SLAB cache for fs_struct structures (tsk->fs) */
e18b890b 101struct kmem_cache *fs_cachep;
1da177e4
LT
102
103/* SLAB cache for vm_area_struct structures */
e18b890b 104struct kmem_cache *vm_area_cachep;
1da177e4
LT
105
106/* SLAB cache for mm_struct structures (tsk->mm) */
e18b890b 107static struct kmem_cache *mm_cachep;
1da177e4
LT
108
109void free_task(struct task_struct *tsk)
110{
3e26c149 111 prop_local_destroy_single(&tsk->dirties);
f7e4217b 112 free_thread_info(tsk->stack);
23f78d4a 113 rt_mutex_debug_task_free(tsk);
1da177e4
LT
114 free_task_struct(tsk);
115}
116EXPORT_SYMBOL(free_task);
117
158d9ebd 118void __put_task_struct(struct task_struct *tsk)
1da177e4
LT
119{
120 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
121 WARN_ON(atomic_read(&tsk->usage));
122 WARN_ON(tsk == current);
123
1da177e4
LT
124 security_task_free(tsk);
125 free_uid(tsk->user);
126 put_group_info(tsk->group_info);
35df17c5 127 delayacct_tsk_free(tsk);
1da177e4
LT
128
129 if (!profile_handoff_task(tsk))
130 free_task(tsk);
131}
132
133void __init fork_init(unsigned long mempages)
134{
135#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
136#ifndef ARCH_MIN_TASKALIGN
137#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
138#endif
139 /* create a slab on which task_structs can be allocated */
140 task_struct_cachep =
141 kmem_cache_create("task_struct", sizeof(struct task_struct),
20c2df83 142 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
1da177e4
LT
143#endif
144
145 /*
146 * The default maximum number of threads is set to a safe
147 * value: the thread structures can take up at most half
148 * of memory.
149 */
150 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
151
152 /*
153 * we need to allow at least 20 threads to boot a system
154 */
155 if(max_threads < 20)
156 max_threads = 20;
157
158 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
159 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
160 init_task.signal->rlim[RLIMIT_SIGPENDING] =
161 init_task.signal->rlim[RLIMIT_NPROC];
162}
163
164static struct task_struct *dup_task_struct(struct task_struct *orig)
165{
166 struct task_struct *tsk;
167 struct thread_info *ti;
3e26c149 168 int err;
1da177e4
LT
169
170 prepare_to_copy(orig);
171
172 tsk = alloc_task_struct();
173 if (!tsk)
174 return NULL;
175
176 ti = alloc_thread_info(tsk);
177 if (!ti) {
178 free_task_struct(tsk);
179 return NULL;
180 }
181
1da177e4 182 *tsk = *orig;
f7e4217b 183 tsk->stack = ti;
3e26c149
PZ
184
185 err = prop_local_init_single(&tsk->dirties);
186 if (err) {
187 free_thread_info(ti);
188 free_task_struct(tsk);
189 return NULL;
190 }
191
10ebffde 192 setup_thread_stack(tsk, orig);
1da177e4 193
0a425405
AV
194#ifdef CONFIG_CC_STACKPROTECTOR
195 tsk->stack_canary = get_random_int();
196#endif
197
1da177e4
LT
198 /* One for us, one for whoever does the "release_task()" (usually parent) */
199 atomic_set(&tsk->usage,2);
4b5d37ac 200 atomic_set(&tsk->fs_excl, 0);
6c5c9341 201#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 202 tsk->btrace_seq = 0;
6c5c9341 203#endif
a0aa7f68 204 tsk->splice_pipe = NULL;
1da177e4
LT
205 return tsk;
206}
207
208#ifdef CONFIG_MMU
fd3e42fc 209static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1da177e4 210{
fd3e42fc 211 struct vm_area_struct *mpnt, *tmp, **pprev;
1da177e4
LT
212 struct rb_node **rb_link, *rb_parent;
213 int retval;
214 unsigned long charge;
215 struct mempolicy *pol;
216
217 down_write(&oldmm->mmap_sem);
ec8c0446 218 flush_cache_dup_mm(oldmm);
ad339451
IM
219 /*
220 * Not linked in yet - no deadlock potential:
221 */
222 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
7ee78232 223
1da177e4
LT
224 mm->locked_vm = 0;
225 mm->mmap = NULL;
226 mm->mmap_cache = NULL;
227 mm->free_area_cache = oldmm->mmap_base;
1363c3cd 228 mm->cached_hole_size = ~0UL;
1da177e4 229 mm->map_count = 0;
1da177e4
LT
230 cpus_clear(mm->cpu_vm_mask);
231 mm->mm_rb = RB_ROOT;
232 rb_link = &mm->mm_rb.rb_node;
233 rb_parent = NULL;
234 pprev = &mm->mmap;
235
fd3e42fc 236 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
1da177e4
LT
237 struct file *file;
238
239 if (mpnt->vm_flags & VM_DONTCOPY) {
3b6bfcdb
HD
240 long pages = vma_pages(mpnt);
241 mm->total_vm -= pages;
ab50b8ed 242 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
3b6bfcdb 243 -pages);
1da177e4
LT
244 continue;
245 }
246 charge = 0;
247 if (mpnt->vm_flags & VM_ACCOUNT) {
248 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
249 if (security_vm_enough_memory(len))
250 goto fail_nomem;
251 charge = len;
252 }
e94b1766 253 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
254 if (!tmp)
255 goto fail_nomem;
256 *tmp = *mpnt;
257 pol = mpol_copy(vma_policy(mpnt));
258 retval = PTR_ERR(pol);
259 if (IS_ERR(pol))
260 goto fail_nomem_policy;
261 vma_set_policy(tmp, pol);
262 tmp->vm_flags &= ~VM_LOCKED;
263 tmp->vm_mm = mm;
264 tmp->vm_next = NULL;
265 anon_vma_link(tmp);
266 file = tmp->vm_file;
267 if (file) {
f3a43f3f 268 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
269 get_file(file);
270 if (tmp->vm_flags & VM_DENYWRITE)
271 atomic_dec(&inode->i_writecount);
23ff4440 272
1da177e4
LT
273 /* insert tmp into the share list, just after mpnt */
274 spin_lock(&file->f_mapping->i_mmap_lock);
275 tmp->vm_truncate_count = mpnt->vm_truncate_count;
276 flush_dcache_mmap_lock(file->f_mapping);
277 vma_prio_tree_add(tmp, mpnt);
278 flush_dcache_mmap_unlock(file->f_mapping);
279 spin_unlock(&file->f_mapping->i_mmap_lock);
280 }
281
282 /*
7ee78232 283 * Link in the new vma and copy the page table entries.
1da177e4 284 */
1da177e4
LT
285 *pprev = tmp;
286 pprev = &tmp->vm_next;
287
288 __vma_link_rb(mm, tmp, rb_link, rb_parent);
289 rb_link = &tmp->vm_rb.rb_right;
290 rb_parent = &tmp->vm_rb;
291
292 mm->map_count++;
0b0db14c 293 retval = copy_page_range(mm, oldmm, mpnt);
1da177e4
LT
294
295 if (tmp->vm_ops && tmp->vm_ops->open)
296 tmp->vm_ops->open(tmp);
297
298 if (retval)
299 goto out;
300 }
d6dd61c8
JF
301 /* a new mm has just been created */
302 arch_dup_mmap(oldmm, mm);
1da177e4 303 retval = 0;
1da177e4 304out:
7ee78232 305 up_write(&mm->mmap_sem);
fd3e42fc 306 flush_tlb_mm(oldmm);
1da177e4
LT
307 up_write(&oldmm->mmap_sem);
308 return retval;
309fail_nomem_policy:
310 kmem_cache_free(vm_area_cachep, tmp);
311fail_nomem:
312 retval = -ENOMEM;
313 vm_unacct_memory(charge);
314 goto out;
315}
316
317static inline int mm_alloc_pgd(struct mm_struct * mm)
318{
319 mm->pgd = pgd_alloc(mm);
320 if (unlikely(!mm->pgd))
321 return -ENOMEM;
322 return 0;
323}
324
325static inline void mm_free_pgd(struct mm_struct * mm)
326{
327 pgd_free(mm->pgd);
328}
329#else
330#define dup_mmap(mm, oldmm) (0)
331#define mm_alloc_pgd(mm) (0)
332#define mm_free_pgd(mm)
333#endif /* CONFIG_MMU */
334
23ff4440 335__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
1da177e4 336
e94b1766 337#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
1da177e4
LT
338#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
339
340#include <linux/init_task.h>
341
342static struct mm_struct * mm_init(struct mm_struct * mm)
343{
344 atomic_set(&mm->mm_users, 1);
345 atomic_set(&mm->mm_count, 1);
346 init_rwsem(&mm->mmap_sem);
347 INIT_LIST_HEAD(&mm->mmlist);
3cb4a0bb
KH
348 mm->flags = (current->mm) ? current->mm->flags
349 : MMF_DUMP_FILTER_DEFAULT;
1da177e4
LT
350 mm->core_waiters = 0;
351 mm->nr_ptes = 0;
4294621f 352 set_mm_counter(mm, file_rss, 0);
404351e6 353 set_mm_counter(mm, anon_rss, 0);
1da177e4
LT
354 spin_lock_init(&mm->page_table_lock);
355 rwlock_init(&mm->ioctx_list_lock);
356 mm->ioctx_list = NULL;
1da177e4 357 mm->free_area_cache = TASK_UNMAPPED_BASE;
1363c3cd 358 mm->cached_hole_size = ~0UL;
1da177e4
LT
359
360 if (likely(!mm_alloc_pgd(mm))) {
361 mm->def_flags = 0;
362 return mm;
363 }
364 free_mm(mm);
365 return NULL;
366}
367
368/*
369 * Allocate and initialize an mm_struct.
370 */
371struct mm_struct * mm_alloc(void)
372{
373 struct mm_struct * mm;
374
375 mm = allocate_mm();
376 if (mm) {
377 memset(mm, 0, sizeof(*mm));
378 mm = mm_init(mm);
379 }
380 return mm;
381}
382
383/*
384 * Called when the last reference to the mm
385 * is dropped: either by a lazy thread or by
386 * mmput. Free the page directory and the mm.
387 */
388void fastcall __mmdrop(struct mm_struct *mm)
389{
390 BUG_ON(mm == &init_mm);
391 mm_free_pgd(mm);
392 destroy_context(mm);
393 free_mm(mm);
394}
395
396/*
397 * Decrement the use count and release all resources for an mm.
398 */
399void mmput(struct mm_struct *mm)
400{
0ae26f1b
AM
401 might_sleep();
402
1da177e4
LT
403 if (atomic_dec_and_test(&mm->mm_users)) {
404 exit_aio(mm);
405 exit_mmap(mm);
406 if (!list_empty(&mm->mmlist)) {
407 spin_lock(&mmlist_lock);
408 list_del(&mm->mmlist);
409 spin_unlock(&mmlist_lock);
410 }
411 put_swap_token(mm);
412 mmdrop(mm);
413 }
414}
415EXPORT_SYMBOL_GPL(mmput);
416
417/**
418 * get_task_mm - acquire a reference to the task's mm
419 *
420 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
421 * this kernel workthread has transiently adopted a user mm with use_mm,
422 * to do its AIO) is not set and if so returns a reference to it, after
423 * bumping up the use count. User must release the mm via mmput()
424 * after use. Typically used by /proc and ptrace.
425 */
426struct mm_struct *get_task_mm(struct task_struct *task)
427{
428 struct mm_struct *mm;
429
430 task_lock(task);
431 mm = task->mm;
432 if (mm) {
433 if (task->flags & PF_BORROWED_MM)
434 mm = NULL;
435 else
436 atomic_inc(&mm->mm_users);
437 }
438 task_unlock(task);
439 return mm;
440}
441EXPORT_SYMBOL_GPL(get_task_mm);
442
443/* Please note the differences between mmput and mm_release.
444 * mmput is called whenever we stop holding onto a mm_struct,
445 * error success whatever.
446 *
447 * mm_release is called after a mm_struct has been removed
448 * from the current process.
449 *
450 * This difference is important for error handling, when we
451 * only half set up a mm_struct for a new process and need to restore
452 * the old one. Because we mmput the new mm_struct before
453 * restoring the old one. . .
454 * Eric Biederman 10 January 1998
455 */
456void mm_release(struct task_struct *tsk, struct mm_struct *mm)
457{
458 struct completion *vfork_done = tsk->vfork_done;
459
460 /* Get rid of any cached register state */
461 deactivate_mm(tsk, mm);
462
463 /* notify parent sleeping on vfork() */
464 if (vfork_done) {
465 tsk->vfork_done = NULL;
466 complete(vfork_done);
467 }
fec1d011
RM
468
469 /*
470 * If we're exiting normally, clear a user-space tid field if
471 * requested. We leave this alone when dying by signal, to leave
472 * the value intact in a core dump, and to save the unnecessary
473 * trouble otherwise. Userland only wants this done for a sys_exit.
474 */
475 if (tsk->clear_child_tid
476 && !(tsk->flags & PF_SIGNALED)
477 && atomic_read(&mm->mm_users) > 1) {
1da177e4
LT
478 u32 __user * tidptr = tsk->clear_child_tid;
479 tsk->clear_child_tid = NULL;
480
481 /*
482 * We don't check the error code - if userspace has
483 * not set up a proper pointer then tough luck.
484 */
485 put_user(0, tidptr);
486 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
487 }
488}
489
a0a7ec30
JD
490/*
491 * Allocate a new mm structure and copy contents from the
492 * mm structure of the passed in task structure.
493 */
494static struct mm_struct *dup_mm(struct task_struct *tsk)
495{
496 struct mm_struct *mm, *oldmm = current->mm;
497 int err;
498
499 if (!oldmm)
500 return NULL;
501
502 mm = allocate_mm();
503 if (!mm)
504 goto fail_nomem;
505
506 memcpy(mm, oldmm, sizeof(*mm));
507
7602bdf2
AC
508 /* Initializing for Swap token stuff */
509 mm->token_priority = 0;
510 mm->last_interval = 0;
511
a0a7ec30
JD
512 if (!mm_init(mm))
513 goto fail_nomem;
514
515 if (init_new_context(tsk, mm))
516 goto fail_nocontext;
517
518 err = dup_mmap(mm, oldmm);
519 if (err)
520 goto free_pt;
521
522 mm->hiwater_rss = get_mm_rss(mm);
523 mm->hiwater_vm = mm->total_vm;
524
525 return mm;
526
527free_pt:
528 mmput(mm);
529
530fail_nomem:
531 return NULL;
532
533fail_nocontext:
534 /*
535 * If init_new_context() failed, we cannot use mmput() to free the mm
536 * because it calls destroy_context()
537 */
538 mm_free_pgd(mm);
539 free_mm(mm);
540 return NULL;
541}
542
1da177e4
LT
543static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
544{
545 struct mm_struct * mm, *oldmm;
546 int retval;
547
548 tsk->min_flt = tsk->maj_flt = 0;
549 tsk->nvcsw = tsk->nivcsw = 0;
550
551 tsk->mm = NULL;
552 tsk->active_mm = NULL;
553
554 /*
555 * Are we cloning a kernel thread?
556 *
557 * We need to steal a active VM for that..
558 */
559 oldmm = current->mm;
560 if (!oldmm)
561 return 0;
562
563 if (clone_flags & CLONE_VM) {
564 atomic_inc(&oldmm->mm_users);
565 mm = oldmm;
1da177e4
LT
566 goto good_mm;
567 }
568
569 retval = -ENOMEM;
a0a7ec30 570 mm = dup_mm(tsk);
1da177e4
LT
571 if (!mm)
572 goto fail_nomem;
573
1da177e4 574good_mm:
7602bdf2
AC
575 /* Initializing for Swap token stuff */
576 mm->token_priority = 0;
577 mm->last_interval = 0;
578
1da177e4
LT
579 tsk->mm = mm;
580 tsk->active_mm = mm;
581 return 0;
582
1da177e4
LT
583fail_nomem:
584 return retval;
1da177e4
LT
585}
586
587static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
588{
589 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
590 /* We don't need to lock fs - think why ;-) */
591 if (fs) {
592 atomic_set(&fs->count, 1);
593 rwlock_init(&fs->lock);
594 fs->umask = old->umask;
595 read_lock(&old->lock);
596 fs->rootmnt = mntget(old->rootmnt);
597 fs->root = dget(old->root);
598 fs->pwdmnt = mntget(old->pwdmnt);
599 fs->pwd = dget(old->pwd);
600 if (old->altroot) {
601 fs->altrootmnt = mntget(old->altrootmnt);
602 fs->altroot = dget(old->altroot);
603 } else {
604 fs->altrootmnt = NULL;
605 fs->altroot = NULL;
606 }
607 read_unlock(&old->lock);
608 }
609 return fs;
610}
611
612struct fs_struct *copy_fs_struct(struct fs_struct *old)
613{
614 return __copy_fs_struct(old);
615}
616
617EXPORT_SYMBOL_GPL(copy_fs_struct);
618
619static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
620{
621 if (clone_flags & CLONE_FS) {
622 atomic_inc(&current->fs->count);
623 return 0;
624 }
625 tsk->fs = __copy_fs_struct(current->fs);
626 if (!tsk->fs)
627 return -ENOMEM;
628 return 0;
629}
630
ab2af1f5 631static int count_open_files(struct fdtable *fdt)
1da177e4 632{
bbea9f69 633 int size = fdt->max_fds;
1da177e4
LT
634 int i;
635
636 /* Find the last open fd */
637 for (i = size/(8*sizeof(long)); i > 0; ) {
badf1662 638 if (fdt->open_fds->fds_bits[--i])
1da177e4
LT
639 break;
640 }
641 i = (i+1) * 8 * sizeof(long);
642 return i;
643}
644
badf1662
DS
645static struct files_struct *alloc_files(void)
646{
647 struct files_struct *newf;
648 struct fdtable *fdt;
649
e94b1766 650 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
badf1662
DS
651 if (!newf)
652 goto out;
653
654 atomic_set(&newf->count, 1);
655
656 spin_lock_init(&newf->file_lock);
0c9e63fd 657 newf->next_fd = 0;
ab2af1f5 658 fdt = &newf->fdtab;
badf1662 659 fdt->max_fds = NR_OPEN_DEFAULT;
0c9e63fd
ED
660 fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
661 fdt->open_fds = (fd_set *)&newf->open_fds_init;
badf1662 662 fdt->fd = &newf->fd_array[0];
ab2af1f5 663 INIT_RCU_HEAD(&fdt->rcu);
ab2af1f5
DS
664 fdt->next = NULL;
665 rcu_assign_pointer(newf->fdt, fdt);
badf1662
DS
666out:
667 return newf;
668}
669
a016f338
JD
670/*
671 * Allocate a new files structure and copy contents from the
672 * passed in files structure.
6e667260 673 * errorp will be valid only when the returned files_struct is NULL.
a016f338
JD
674 */
675static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
1da177e4 676{
a016f338 677 struct files_struct *newf;
1da177e4 678 struct file **old_fds, **new_fds;
bbea9f69 679 int open_files, size, i;
badf1662 680 struct fdtable *old_fdt, *new_fdt;
1da177e4 681
6e667260 682 *errorp = -ENOMEM;
badf1662
DS
683 newf = alloc_files();
684 if (!newf)
1da177e4
LT
685 goto out;
686
1da177e4 687 spin_lock(&oldf->file_lock);
badf1662
DS
688 old_fdt = files_fdtable(oldf);
689 new_fdt = files_fdtable(newf);
ab2af1f5 690 open_files = count_open_files(old_fdt);
1da177e4
LT
691
692 /*
bbea9f69
VL
693 * Check whether we need to allocate a larger fd array and fd set.
694 * Note: we're not a clone task, so the open count won't change.
1da177e4 695 */
badf1662
DS
696 if (open_files > new_fdt->max_fds) {
697 new_fdt->max_fds = 0;
1da177e4
LT
698 spin_unlock(&oldf->file_lock);
699 spin_lock(&newf->file_lock);
a016f338 700 *errorp = expand_files(newf, open_files-1);
1da177e4 701 spin_unlock(&newf->file_lock);
a016f338 702 if (*errorp < 0)
1da177e4 703 goto out_release;
ab2af1f5
DS
704 new_fdt = files_fdtable(newf);
705 /*
706 * Reacquire the oldf lock and a pointer to its fd table
707 * who knows it may have a new bigger fd table. We need
708 * the latest pointer.
709 */
1da177e4 710 spin_lock(&oldf->file_lock);
ab2af1f5 711 old_fdt = files_fdtable(oldf);
1da177e4
LT
712 }
713
badf1662
DS
714 old_fds = old_fdt->fd;
715 new_fds = new_fdt->fd;
1da177e4 716
f3d19c90
VL
717 memcpy(new_fdt->open_fds->fds_bits,
718 old_fdt->open_fds->fds_bits, open_files/8);
719 memcpy(new_fdt->close_on_exec->fds_bits,
720 old_fdt->close_on_exec->fds_bits, open_files/8);
1da177e4
LT
721
722 for (i = open_files; i != 0; i--) {
723 struct file *f = *old_fds++;
724 if (f) {
725 get_file(f);
726 } else {
727 /*
728 * The fd may be claimed in the fd bitmap but not yet
729 * instantiated in the files array if a sibling thread
730 * is partway through open(). So make sure that this
731 * fd is available to the new process.
732 */
badf1662 733 FD_CLR(open_files - i, new_fdt->open_fds);
1da177e4 734 }
ab2af1f5 735 rcu_assign_pointer(*new_fds++, f);
1da177e4
LT
736 }
737 spin_unlock(&oldf->file_lock);
738
739 /* compute the remainder to be cleared */
badf1662 740 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
1da177e4 741
23ff4440
DW
742 /* This is long word aligned thus could use a optimized version */
743 memset(new_fds, 0, size);
1da177e4 744
bbea9f69
VL
745 if (new_fdt->max_fds > open_files) {
746 int left = (new_fdt->max_fds-open_files)/8;
1da177e4
LT
747 int start = open_files / (8 * sizeof(unsigned long));
748
badf1662
DS
749 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
750 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
1da177e4
LT
751 }
752
a016f338 753 return newf;
1da177e4
LT
754
755out_release:
1da177e4 756 kmem_cache_free(files_cachep, newf);
f3d19c90 757out:
42862298 758 return NULL;
1da177e4
LT
759}
760
a016f338
JD
761static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
762{
763 struct files_struct *oldf, *newf;
764 int error = 0;
765
766 /*
767 * A background process may not have any files ...
768 */
769 oldf = current->files;
770 if (!oldf)
771 goto out;
772
773 if (clone_flags & CLONE_FILES) {
774 atomic_inc(&oldf->count);
775 goto out;
776 }
777
778 /*
779 * Note: we may be using current for both targets (See exec.c)
780 * This works because we cache current->files (old) as oldf. Don't
781 * break this.
782 */
783 tsk->files = NULL;
a016f338
JD
784 newf = dup_fd(oldf, &error);
785 if (!newf)
786 goto out;
787
788 tsk->files = newf;
789 error = 0;
790out:
791 return error;
792}
793
1da177e4
LT
794/*
795 * Helper to unshare the files of the current task.
796 * We don't want to expose copy_files internals to
797 * the exec layer of the kernel.
798 */
799
800int unshare_files(void)
801{
802 struct files_struct *files = current->files;
803 int rc;
804
910dea7f 805 BUG_ON(!files);
1da177e4
LT
806
807 /* This can race but the race causes us to copy when we don't
808 need to and drop the copy */
809 if(atomic_read(&files->count) == 1)
810 {
811 atomic_inc(&files->count);
812 return 0;
813 }
814 rc = copy_files(0, current);
815 if(rc)
816 current->files = files;
817 return rc;
818}
819
820EXPORT_SYMBOL(unshare_files);
821
822static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
823{
824 struct sighand_struct *sig;
825
826 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
827 atomic_inc(&current->sighand->count);
828 return 0;
829 }
830 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
e56d0903 831 rcu_assign_pointer(tsk->sighand, sig);
1da177e4
LT
832 if (!sig)
833 return -ENOMEM;
1da177e4
LT
834 atomic_set(&sig->count, 1);
835 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
836 return 0;
837}
838
a7e5328a 839void __cleanup_sighand(struct sighand_struct *sighand)
c81addc9 840{
c81addc9
ON
841 if (atomic_dec_and_test(&sighand->count))
842 kmem_cache_free(sighand_cachep, sighand);
843}
844
1da177e4
LT
845static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
846{
847 struct signal_struct *sig;
848 int ret;
849
850 if (clone_flags & CLONE_THREAD) {
851 atomic_inc(&current->signal->count);
852 atomic_inc(&current->signal->live);
853 return 0;
854 }
855 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
856 tsk->signal = sig;
857 if (!sig)
858 return -ENOMEM;
859
860 ret = copy_thread_group_keys(tsk);
861 if (ret < 0) {
862 kmem_cache_free(signal_cachep, sig);
863 return ret;
864 }
865
866 atomic_set(&sig->count, 1);
867 atomic_set(&sig->live, 1);
868 init_waitqueue_head(&sig->wait_chldexit);
869 sig->flags = 0;
870 sig->group_exit_code = 0;
871 sig->group_exit_task = NULL;
872 sig->group_stop_count = 0;
873 sig->curr_target = NULL;
874 init_sigpending(&sig->shared_pending);
875 INIT_LIST_HEAD(&sig->posix_timers);
876
c9cb2e3d 877 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2ff678b8 878 sig->it_real_incr.tv64 = 0;
1da177e4 879 sig->real_timer.function = it_real_fn;
05cfb614 880 sig->tsk = tsk;
1da177e4
LT
881
882 sig->it_virt_expires = cputime_zero;
883 sig->it_virt_incr = cputime_zero;
884 sig->it_prof_expires = cputime_zero;
885 sig->it_prof_incr = cputime_zero;
886
1da177e4 887 sig->leader = 0; /* session leadership doesn't inherit */
ab521dc0 888 sig->tty_old_pgrp = NULL;
1da177e4
LT
889
890 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
9ac52315
LV
891 sig->gtime = cputime_zero;
892 sig->cgtime = cputime_zero;
1da177e4
LT
893 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
894 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
6eaeeaba 895 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
172ba844 896 sig->sum_sched_runtime = 0;
1da177e4
LT
897 INIT_LIST_HEAD(&sig->cpu_timers[0]);
898 INIT_LIST_HEAD(&sig->cpu_timers[1]);
899 INIT_LIST_HEAD(&sig->cpu_timers[2]);
ad4ecbcb 900 taskstats_tgid_init(sig);
1da177e4
LT
901
902 task_lock(current->group_leader);
903 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
904 task_unlock(current->group_leader);
905
906 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
907 /*
908 * New sole thread in the process gets an expiry time
909 * of the whole CPU time limit.
910 */
911 tsk->it_prof_expires =
912 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
913 }
0e464814 914 acct_init_pacct(&sig->pacct);
1da177e4 915
522ed776
MT
916 tty_audit_fork(sig);
917
1da177e4
LT
918 return 0;
919}
920
6b3934ef
ON
921void __cleanup_signal(struct signal_struct *sig)
922{
923 exit_thread_group_keys(sig);
924 kmem_cache_free(signal_cachep, sig);
925}
926
927static inline void cleanup_signal(struct task_struct *tsk)
928{
929 struct signal_struct *sig = tsk->signal;
930
931 atomic_dec(&sig->live);
932
933 if (atomic_dec_and_test(&sig->count))
934 __cleanup_signal(sig);
935}
936
1da177e4
LT
937static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
938{
939 unsigned long new_flags = p->flags;
940
83144186 941 new_flags &= ~PF_SUPERPRIV;
1da177e4
LT
942 new_flags |= PF_FORKNOEXEC;
943 if (!(clone_flags & CLONE_PTRACE))
944 p->ptrace = 0;
945 p->flags = new_flags;
2e131895 946 clear_freeze_flag(p);
1da177e4
LT
947}
948
949asmlinkage long sys_set_tid_address(int __user *tidptr)
950{
951 current->clear_child_tid = tidptr;
952
953 return current->pid;
954}
955
23f78d4a
IM
956static inline void rt_mutex_init_task(struct task_struct *p)
957{
23f78d4a 958 spin_lock_init(&p->pi_lock);
e29e175b 959#ifdef CONFIG_RT_MUTEXES
23f78d4a
IM
960 plist_head_init(&p->pi_waiters, &p->pi_lock);
961 p->pi_blocked_on = NULL;
23f78d4a
IM
962#endif
963}
964
1da177e4
LT
965/*
966 * This creates a new process as a copy of the old one,
967 * but does not actually start it yet.
968 *
969 * It copies the registers, and all the appropriate
970 * parts of the process environment (as per the clone
971 * flags). The actual kick-off is left to the caller.
972 */
36c8b586
IM
973static struct task_struct *copy_process(unsigned long clone_flags,
974 unsigned long stack_start,
975 struct pt_regs *regs,
976 unsigned long stack_size,
977 int __user *parent_tidptr,
978 int __user *child_tidptr,
85868995 979 struct pid *pid)
1da177e4
LT
980{
981 int retval;
982 struct task_struct *p = NULL;
b4f48b63 983 int cgroup_callbacks_done = 0;
1da177e4
LT
984
985 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
986 return ERR_PTR(-EINVAL);
987
988 /*
989 * Thread groups must share signals as well, and detached threads
990 * can only be started up within the thread group.
991 */
992 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
993 return ERR_PTR(-EINVAL);
994
995 /*
996 * Shared signal handlers imply shared VM. By way of the above,
997 * thread groups also imply shared VM. Blocking this case allows
998 * for various simplifications in other code.
999 */
1000 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1001 return ERR_PTR(-EINVAL);
1002
1003 retval = security_task_create(clone_flags);
1004 if (retval)
1005 goto fork_out;
1006
1007 retval = -ENOMEM;
1008 p = dup_task_struct(current);
1009 if (!p)
1010 goto fork_out;
1011
bea493a0
PZ
1012 rt_mutex_init_task(p);
1013
de30a2b3
IM
1014#ifdef CONFIG_TRACE_IRQFLAGS
1015 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1016 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1017#endif
1da177e4
LT
1018 retval = -EAGAIN;
1019 if (atomic_read(&p->user->processes) >=
1020 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
1021 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
acce292c 1022 p->user != current->nsproxy->user_ns->root_user)
1da177e4
LT
1023 goto bad_fork_free;
1024 }
1025
1026 atomic_inc(&p->user->__count);
1027 atomic_inc(&p->user->processes);
1028 get_group_info(p->group_info);
1029
1030 /*
1031 * If multiple threads are within copy_process(), then this check
1032 * triggers too late. This doesn't hurt, the check is only there
1033 * to stop root fork bombs.
1034 */
1035 if (nr_threads >= max_threads)
1036 goto bad_fork_cleanup_count;
1037
a1261f54 1038 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1da177e4
LT
1039 goto bad_fork_cleanup_count;
1040
1041 if (p->binfmt && !try_module_get(p->binfmt->module))
1042 goto bad_fork_cleanup_put_domain;
1043
1044 p->did_exec = 0;
ca74e92b 1045 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1da177e4 1046 copy_flags(clone_flags, p);
85868995 1047 p->pid = pid_nr(pid);
1da177e4
LT
1048 retval = -EFAULT;
1049 if (clone_flags & CLONE_PARENT_SETTID)
1050 if (put_user(p->pid, parent_tidptr))
35df17c5 1051 goto bad_fork_cleanup_delays_binfmt;
1da177e4 1052
1da177e4
LT
1053 INIT_LIST_HEAD(&p->children);
1054 INIT_LIST_HEAD(&p->sibling);
1055 p->vfork_done = NULL;
1056 spin_lock_init(&p->alloc_lock);
1da177e4
LT
1057
1058 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1059 init_sigpending(&p->pending);
1060
1061 p->utime = cputime_zero;
1062 p->stime = cputime_zero;
9ac52315 1063 p->gtime = cputime_zero;
c66f08be
MN
1064 p->utimescaled = cputime_zero;
1065 p->stimescaled = cputime_zero;
172ba844 1066
4b98d11b 1067#ifdef CONFIG_TASK_XACCT
1da177e4
LT
1068 p->rchar = 0; /* I/O counter: bytes read */
1069 p->wchar = 0; /* I/O counter: bytes written */
1070 p->syscr = 0; /* I/O counter: read syscalls */
1071 p->syscw = 0; /* I/O counter: write syscalls */
4b98d11b 1072#endif
7c3ab738 1073 task_io_accounting_init(p);
1da177e4
LT
1074 acct_clear_integrals(p);
1075
23ff4440 1076 p->it_virt_expires = cputime_zero;
1da177e4 1077 p->it_prof_expires = cputime_zero;
23ff4440
DW
1078 p->it_sched_expires = 0;
1079 INIT_LIST_HEAD(&p->cpu_timers[0]);
1080 INIT_LIST_HEAD(&p->cpu_timers[1]);
1081 INIT_LIST_HEAD(&p->cpu_timers[2]);
1da177e4
LT
1082
1083 p->lock_depth = -1; /* -1 = no lock */
1084 do_posix_clock_monotonic_gettime(&p->start_time);
924b42d5
TJ
1085 p->real_start_time = p->start_time;
1086 monotonic_to_bootbased(&p->real_start_time);
57c521ce 1087#ifdef CONFIG_SECURITY
1da177e4 1088 p->security = NULL;
57c521ce 1089#endif
1da177e4 1090 p->io_context = NULL;
1da177e4 1091 p->audit_context = NULL;
b4b26418 1092 cpuset_fork(p);
b4f48b63 1093 cgroup_fork(p);
1da177e4
LT
1094#ifdef CONFIG_NUMA
1095 p->mempolicy = mpol_copy(p->mempolicy);
1096 if (IS_ERR(p->mempolicy)) {
1097 retval = PTR_ERR(p->mempolicy);
1098 p->mempolicy = NULL;
b4f48b63 1099 goto bad_fork_cleanup_cgroup;
1da177e4 1100 }
c61afb18 1101 mpol_fix_fork_child_flag(p);
1da177e4 1102#endif
de30a2b3
IM
1103#ifdef CONFIG_TRACE_IRQFLAGS
1104 p->irq_events = 0;
b36e4758
RK
1105#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1106 p->hardirqs_enabled = 1;
1107#else
de30a2b3 1108 p->hardirqs_enabled = 0;
b36e4758 1109#endif
de30a2b3
IM
1110 p->hardirq_enable_ip = 0;
1111 p->hardirq_enable_event = 0;
1112 p->hardirq_disable_ip = _THIS_IP_;
1113 p->hardirq_disable_event = 0;
1114 p->softirqs_enabled = 1;
1115 p->softirq_enable_ip = _THIS_IP_;
1116 p->softirq_enable_event = 0;
1117 p->softirq_disable_ip = 0;
1118 p->softirq_disable_event = 0;
1119 p->hardirq_context = 0;
1120 p->softirq_context = 0;
1121#endif
fbb9ce95
IM
1122#ifdef CONFIG_LOCKDEP
1123 p->lockdep_depth = 0; /* no locks held yet */
1124 p->curr_chain_key = 0;
1125 p->lockdep_recursion = 0;
1126#endif
1da177e4 1127
408894ee
IM
1128#ifdef CONFIG_DEBUG_MUTEXES
1129 p->blocked_on = NULL; /* not blocked yet */
1130#endif
1131
1da177e4
LT
1132 p->tgid = p->pid;
1133 if (clone_flags & CLONE_THREAD)
1134 p->tgid = current->tgid;
1135
1136 if ((retval = security_task_alloc(p)))
1137 goto bad_fork_cleanup_policy;
1138 if ((retval = audit_alloc(p)))
1139 goto bad_fork_cleanup_security;
1140 /* copy all the process information */
1141 if ((retval = copy_semundo(clone_flags, p)))
1142 goto bad_fork_cleanup_audit;
1143 if ((retval = copy_files(clone_flags, p)))
1144 goto bad_fork_cleanup_semundo;
1145 if ((retval = copy_fs(clone_flags, p)))
1146 goto bad_fork_cleanup_files;
1147 if ((retval = copy_sighand(clone_flags, p)))
1148 goto bad_fork_cleanup_fs;
1149 if ((retval = copy_signal(clone_flags, p)))
1150 goto bad_fork_cleanup_sighand;
1151 if ((retval = copy_mm(clone_flags, p)))
1152 goto bad_fork_cleanup_signal;
1153 if ((retval = copy_keys(clone_flags, p)))
1154 goto bad_fork_cleanup_mm;
ab516013 1155 if ((retval = copy_namespaces(clone_flags, p)))
1da177e4
LT
1156 goto bad_fork_cleanup_keys;
1157 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1158 if (retval)
1651e14e 1159 goto bad_fork_cleanup_namespaces;
1da177e4
LT
1160
1161 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1162 /*
1163 * Clear TID on mm_release()?
1164 */
1165 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
42b2dd0a 1166#ifdef CONFIG_FUTEX
8f17d3a5
IM
1167 p->robust_list = NULL;
1168#ifdef CONFIG_COMPAT
1169 p->compat_robust_list = NULL;
1170#endif
c87e2837
IM
1171 INIT_LIST_HEAD(&p->pi_state_list);
1172 p->pi_state_cache = NULL;
42b2dd0a 1173#endif
f9a3879a
GM
1174 /*
1175 * sigaltstack should be cleared when sharing the same VM
1176 */
1177 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1178 p->sas_ss_sp = p->sas_ss_size = 0;
1179
1da177e4
LT
1180 /*
1181 * Syscall tracing should be turned off in the child regardless
1182 * of CLONE_PTRACE.
1183 */
1184 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
ed75e8d5
LV
1185#ifdef TIF_SYSCALL_EMU
1186 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1187#endif
1da177e4
LT
1188
1189 /* Our parent execution domain becomes current domain
1190 These must match for thread signalling to apply */
1da177e4
LT
1191 p->parent_exec_id = p->self_exec_id;
1192
1193 /* ok, now we should be set up.. */
1194 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1195 p->pdeath_signal = 0;
1196 p->exit_state = 0;
1197
1da177e4
LT
1198 /*
1199 * Ok, make it visible to the rest of the system.
1200 * We dont wake it up yet.
1201 */
1202 p->group_leader = p;
47e65328 1203 INIT_LIST_HEAD(&p->thread_group);
1da177e4
LT
1204 INIT_LIST_HEAD(&p->ptrace_children);
1205 INIT_LIST_HEAD(&p->ptrace_list);
1206
476d139c
NP
1207 /* Perform scheduler related setup. Assign this task to a CPU. */
1208 sched_fork(p, clone_flags);
1209
b4f48b63
PM
1210 /* Now that the task is set up, run cgroup callbacks if
1211 * necessary. We need to run them before the task is visible
1212 * on the tasklist. */
1213 cgroup_fork_callbacks(p);
1214 cgroup_callbacks_done = 1;
1215
1da177e4
LT
1216 /* Need tasklist lock for parent etc handling! */
1217 write_lock_irq(&tasklist_lock);
1218
5b160f5e
ON
1219 /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
1220 p->ioprio = current->ioprio;
1221
1da177e4 1222 /*
476d139c
NP
1223 * The task hasn't been attached yet, so its cpus_allowed mask will
1224 * not be changed, nor will its assigned CPU.
1225 *
1226 * The cpus_allowed mask of the parent may have changed after it was
1227 * copied first time - so re-copy it here, then check the child's CPU
1228 * to ensure it is on a valid CPU (and if not, just force it back to
1229 * parent's CPU). This avoids alot of nasty races.
1da177e4
LT
1230 */
1231 p->cpus_allowed = current->cpus_allowed;
26ff6ad9
SV
1232 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1233 !cpu_online(task_cpu(p))))
476d139c 1234 set_task_cpu(p, smp_processor_id());
1da177e4 1235
1da177e4
LT
1236 /* CLONE_PARENT re-uses the old parent */
1237 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1238 p->real_parent = current->real_parent;
1239 else
1240 p->real_parent = current;
1241 p->parent = p->real_parent;
1242
3f17da69 1243 spin_lock(&current->sighand->siglock);
4a2c7a78
ON
1244
1245 /*
1246 * Process group and session signals need to be delivered to just the
1247 * parent before the fork or both the parent and the child after the
1248 * fork. Restart if a signal comes in before we add the new process to
1249 * it's process group.
1250 * A fatal signal pending means that current will exit, so the new
1251 * thread can't slip out of an OOM kill (or normal SIGKILL).
1252 */
23ff4440 1253 recalc_sigpending();
4a2c7a78
ON
1254 if (signal_pending(current)) {
1255 spin_unlock(&current->sighand->siglock);
1256 write_unlock_irq(&tasklist_lock);
1257 retval = -ERESTARTNOINTR;
1651e14e 1258 goto bad_fork_cleanup_namespaces;
4a2c7a78
ON
1259 }
1260
1da177e4 1261 if (clone_flags & CLONE_THREAD) {
1da177e4 1262 p->group_leader = current->group_leader;
47e65328 1263 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1da177e4 1264
1da177e4
LT
1265 if (!cputime_eq(current->signal->it_virt_expires,
1266 cputime_zero) ||
1267 !cputime_eq(current->signal->it_prof_expires,
1268 cputime_zero) ||
1269 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1270 !list_empty(&current->signal->cpu_timers[0]) ||
1271 !list_empty(&current->signal->cpu_timers[1]) ||
1272 !list_empty(&current->signal->cpu_timers[2])) {
1273 /*
1274 * Have child wake up on its first tick to check
1275 * for process CPU timers.
1276 */
1277 p->it_prof_expires = jiffies_to_cputime(1);
1278 }
1da177e4
LT
1279 }
1280
73b9ebfe
ON
1281 if (likely(p->pid)) {
1282 add_parent(p);
1283 if (unlikely(p->ptrace & PT_PTRACED))
1284 __ptrace_link(p, current->parent);
1285
1286 if (thread_group_leader(p)) {
1287 p->signal->tty = current->signal->tty;
0800d308 1288 p->signal->pgrp = process_group(current);
1ec320af 1289 set_signal_session(p->signal, process_session(current));
0800d308
SB
1290 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1291 attach_pid(p, PIDTYPE_SID, task_session(current));
73b9ebfe 1292
5e85d4ab 1293 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1da177e4 1294 __get_cpu_var(process_counts)++;
73b9ebfe 1295 }
85868995 1296 attach_pid(p, PIDTYPE_PID, pid);
73b9ebfe 1297 nr_threads++;
1da177e4
LT
1298 }
1299
1da177e4 1300 total_forks++;
3f17da69 1301 spin_unlock(&current->sighand->siglock);
1da177e4 1302 write_unlock_irq(&tasklist_lock);
c13cf856 1303 proc_fork_connector(p);
1da177e4
LT
1304 return p;
1305
ab516013 1306bad_fork_cleanup_namespaces:
444f378b 1307 exit_task_namespaces(p);
1da177e4
LT
1308bad_fork_cleanup_keys:
1309 exit_keys(p);
1310bad_fork_cleanup_mm:
1311 if (p->mm)
1312 mmput(p->mm);
1313bad_fork_cleanup_signal:
6b3934ef 1314 cleanup_signal(p);
1da177e4 1315bad_fork_cleanup_sighand:
a7e5328a 1316 __cleanup_sighand(p->sighand);
1da177e4
LT
1317bad_fork_cleanup_fs:
1318 exit_fs(p); /* blocking */
1319bad_fork_cleanup_files:
1320 exit_files(p); /* blocking */
1321bad_fork_cleanup_semundo:
1322 exit_sem(p);
1323bad_fork_cleanup_audit:
1324 audit_free(p);
1325bad_fork_cleanup_security:
1326 security_task_free(p);
1327bad_fork_cleanup_policy:
1328#ifdef CONFIG_NUMA
1329 mpol_free(p->mempolicy);
b4f48b63 1330bad_fork_cleanup_cgroup:
1da177e4 1331#endif
b4b26418 1332 cpuset_exit(p);
b4f48b63 1333 cgroup_exit(p, cgroup_callbacks_done);
35df17c5
SN
1334bad_fork_cleanup_delays_binfmt:
1335 delayacct_tsk_free(p);
1da177e4
LT
1336 if (p->binfmt)
1337 module_put(p->binfmt->module);
1338bad_fork_cleanup_put_domain:
a1261f54 1339 module_put(task_thread_info(p)->exec_domain->module);
1da177e4
LT
1340bad_fork_cleanup_count:
1341 put_group_info(p->group_info);
1342 atomic_dec(&p->user->processes);
1343 free_uid(p->user);
1344bad_fork_free:
1345 free_task(p);
fe7d37d1
ON
1346fork_out:
1347 return ERR_PTR(retval);
1da177e4
LT
1348}
1349
f95d47ca 1350noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1da177e4
LT
1351{
1352 memset(regs, 0, sizeof(struct pt_regs));
1353 return regs;
1354}
1355
9abcf40b 1356struct task_struct * __cpuinit fork_idle(int cpu)
1da177e4 1357{
36c8b586 1358 struct task_struct *task;
1da177e4
LT
1359 struct pt_regs regs;
1360
85868995
SB
1361 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL,
1362 &init_struct_pid);
753ca4f3
AM
1363 if (!IS_ERR(task))
1364 init_idle(task, cpu);
73b9ebfe 1365
1da177e4
LT
1366 return task;
1367}
1368
1369static inline int fork_traceflag (unsigned clone_flags)
1370{
1371 if (clone_flags & CLONE_UNTRACED)
1372 return 0;
1373 else if (clone_flags & CLONE_VFORK) {
1374 if (current->ptrace & PT_TRACE_VFORK)
1375 return PTRACE_EVENT_VFORK;
1376 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1377 if (current->ptrace & PT_TRACE_CLONE)
1378 return PTRACE_EVENT_CLONE;
1379 } else if (current->ptrace & PT_TRACE_FORK)
1380 return PTRACE_EVENT_FORK;
1381
1382 return 0;
1383}
1384
1385/*
1386 * Ok, this is the main fork-routine.
1387 *
1388 * It copies the process, and if successful kick-starts
1389 * it and waits for it to finish using the VM if required.
1390 */
1391long do_fork(unsigned long clone_flags,
1392 unsigned long stack_start,
1393 struct pt_regs *regs,
1394 unsigned long stack_size,
1395 int __user *parent_tidptr,
1396 int __user *child_tidptr)
1397{
1398 struct task_struct *p;
1399 int trace = 0;
92476d7f
EB
1400 struct pid *pid = alloc_pid();
1401 long nr;
1da177e4 1402
92476d7f 1403 if (!pid)
1da177e4 1404 return -EAGAIN;
92476d7f 1405 nr = pid->nr;
1da177e4
LT
1406 if (unlikely(current->ptrace)) {
1407 trace = fork_traceflag (clone_flags);
1408 if (trace)
1409 clone_flags |= CLONE_PTRACE;
1410 }
1411
85868995 1412 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
1da177e4
LT
1413 /*
1414 * Do this prior waking up the new thread - the thread pointer
1415 * might get invalid after that point, if the thread exits quickly.
1416 */
1417 if (!IS_ERR(p)) {
1418 struct completion vfork;
1419
1420 if (clone_flags & CLONE_VFORK) {
1421 p->vfork_done = &vfork;
1422 init_completion(&vfork);
1423 }
1424
1425 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1426 /*
1427 * We'll start up with an immediate SIGSTOP.
1428 */
1429 sigaddset(&p->pending.signal, SIGSTOP);
1430 set_tsk_thread_flag(p, TIF_SIGPENDING);
1431 }
1432
1433 if (!(clone_flags & CLONE_STOPPED))
1434 wake_up_new_task(p, clone_flags);
1435 else
1436 p->state = TASK_STOPPED;
1437
1438 if (unlikely (trace)) {
92476d7f 1439 current->ptrace_message = nr;
1da177e4
LT
1440 ptrace_notify ((trace << 8) | SIGTRAP);
1441 }
1442
1443 if (clone_flags & CLONE_VFORK) {
ba96a0c8 1444 freezer_do_not_count();
1da177e4 1445 wait_for_completion(&vfork);
ba96a0c8 1446 freezer_count();
9f59ce5d
CE
1447 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
1448 current->ptrace_message = nr;
1da177e4 1449 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
9f59ce5d 1450 }
1da177e4
LT
1451 }
1452 } else {
92476d7f
EB
1453 free_pid(pid);
1454 nr = PTR_ERR(p);
1da177e4 1455 }
92476d7f 1456 return nr;
1da177e4
LT
1457}
1458
5fd63b30
RT
1459#ifndef ARCH_MIN_MMSTRUCT_ALIGN
1460#define ARCH_MIN_MMSTRUCT_ALIGN 0
1461#endif
1462
4ba9b9d0 1463static void sighand_ctor(struct kmem_cache *cachep, void *data)
aa1757f9
ON
1464{
1465 struct sighand_struct *sighand = data;
1466
a35afb83 1467 spin_lock_init(&sighand->siglock);
b8fceee1 1468 init_waitqueue_head(&sighand->signalfd_wqh);
aa1757f9
ON
1469}
1470
1da177e4
LT
1471void __init proc_caches_init(void)
1472{
1473 sighand_cachep = kmem_cache_create("sighand_cache",
1474 sizeof(struct sighand_struct), 0,
aa1757f9 1475 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
20c2df83 1476 sighand_ctor);
1da177e4
LT
1477 signal_cachep = kmem_cache_create("signal_cache",
1478 sizeof(struct signal_struct), 0,
20c2df83
PM
1479 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1480 files_cachep = kmem_cache_create("files_cache",
1da177e4 1481 sizeof(struct files_struct), 0,
20c2df83
PM
1482 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1483 fs_cachep = kmem_cache_create("fs_cache",
1da177e4 1484 sizeof(struct fs_struct), 0,
20c2df83 1485 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4
LT
1486 vm_area_cachep = kmem_cache_create("vm_area_struct",
1487 sizeof(struct vm_area_struct), 0,
20c2df83 1488 SLAB_PANIC, NULL);
1da177e4 1489 mm_cachep = kmem_cache_create("mm_struct",
5fd63b30 1490 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
20c2df83 1491 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 1492}
cf2e340f 1493
cf2e340f
JD
1494/*
1495 * Check constraints on flags passed to the unshare system call and
1496 * force unsharing of additional process context as appropriate.
1497 */
1498static inline void check_unshare_flags(unsigned long *flags_ptr)
1499{
1500 /*
1501 * If unsharing a thread from a thread group, must also
1502 * unshare vm.
1503 */
1504 if (*flags_ptr & CLONE_THREAD)
1505 *flags_ptr |= CLONE_VM;
1506
1507 /*
1508 * If unsharing vm, must also unshare signal handlers.
1509 */
1510 if (*flags_ptr & CLONE_VM)
1511 *flags_ptr |= CLONE_SIGHAND;
1512
1513 /*
1514 * If unsharing signal handlers and the task was created
1515 * using CLONE_THREAD, then must unshare the thread
1516 */
1517 if ((*flags_ptr & CLONE_SIGHAND) &&
1518 (atomic_read(&current->signal->count) > 1))
1519 *flags_ptr |= CLONE_THREAD;
1520
1521 /*
1522 * If unsharing namespace, must also unshare filesystem information.
1523 */
1524 if (*flags_ptr & CLONE_NEWNS)
1525 *flags_ptr |= CLONE_FS;
1526}
1527
1528/*
1529 * Unsharing of tasks created with CLONE_THREAD is not supported yet
1530 */
1531static int unshare_thread(unsigned long unshare_flags)
1532{
1533 if (unshare_flags & CLONE_THREAD)
1534 return -EINVAL;
1535
1536 return 0;
1537}
1538
1539/*
99d1419d 1540 * Unshare the filesystem structure if it is being shared
cf2e340f
JD
1541 */
1542static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1543{
1544 struct fs_struct *fs = current->fs;
1545
1546 if ((unshare_flags & CLONE_FS) &&
99d1419d
JD
1547 (fs && atomic_read(&fs->count) > 1)) {
1548 *new_fsp = __copy_fs_struct(current->fs);
1549 if (!*new_fsp)
1550 return -ENOMEM;
1551 }
cf2e340f
JD
1552
1553 return 0;
1554}
1555
cf2e340f 1556/*
dae3c5a0 1557 * Unsharing of sighand is not supported yet
cf2e340f
JD
1558 */
1559static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
1560{
1561 struct sighand_struct *sigh = current->sighand;
1562
dae3c5a0 1563 if ((unshare_flags & CLONE_SIGHAND) && atomic_read(&sigh->count) > 1)
cf2e340f
JD
1564 return -EINVAL;
1565 else
1566 return 0;
1567}
1568
1569/*
a0a7ec30 1570 * Unshare vm if it is being shared
cf2e340f
JD
1571 */
1572static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
1573{
1574 struct mm_struct *mm = current->mm;
1575
1576 if ((unshare_flags & CLONE_VM) &&
a0a7ec30 1577 (mm && atomic_read(&mm->mm_users) > 1)) {
2d61b867 1578 return -EINVAL;
a0a7ec30 1579 }
cf2e340f
JD
1580
1581 return 0;
cf2e340f
JD
1582}
1583
1584/*
a016f338 1585 * Unshare file descriptor table if it is being shared
cf2e340f
JD
1586 */
1587static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1588{
1589 struct files_struct *fd = current->files;
a016f338 1590 int error = 0;
cf2e340f
JD
1591
1592 if ((unshare_flags & CLONE_FILES) &&
a016f338
JD
1593 (fd && atomic_read(&fd->count) > 1)) {
1594 *new_fdp = dup_fd(fd, &error);
1595 if (!*new_fdp)
1596 return error;
1597 }
cf2e340f
JD
1598
1599 return 0;
1600}
1601
1602/*
1603 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
1604 * supported yet
1605 */
1606static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
1607{
1608 if (unshare_flags & CLONE_SYSVSEM)
1609 return -EINVAL;
1610
1611 return 0;
1612}
1613
1614/*
1615 * unshare allows a process to 'unshare' part of the process
1616 * context which was originally shared using clone. copy_*
1617 * functions used by do_fork() cannot be used here directly
1618 * because they modify an inactive task_struct that is being
1619 * constructed. Here we are modifying the current, active,
1620 * task_struct.
1621 */
1622asmlinkage long sys_unshare(unsigned long unshare_flags)
1623{
1624 int err = 0;
1625 struct fs_struct *fs, *new_fs = NULL;
dae3c5a0 1626 struct sighand_struct *new_sigh = NULL;
cf2e340f
JD
1627 struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
1628 struct files_struct *fd, *new_fd = NULL;
1629 struct sem_undo_list *new_ulist = NULL;
c0b2fc31 1630 struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
cf2e340f
JD
1631
1632 check_unshare_flags(&unshare_flags);
1633
06f9d4f9
EB
1634 /* Return -EINVAL for all unsupported flags */
1635 err = -EINVAL;
1636 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
25b21cb2 1637 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
9dd776b6
EB
1638 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
1639 CLONE_NEWNET))
06f9d4f9
EB
1640 goto bad_unshare_out;
1641
cf2e340f
JD
1642 if ((err = unshare_thread(unshare_flags)))
1643 goto bad_unshare_out;
1644 if ((err = unshare_fs(unshare_flags, &new_fs)))
1645 goto bad_unshare_cleanup_thread;
cf2e340f 1646 if ((err = unshare_sighand(unshare_flags, &new_sigh)))
e3222c4e 1647 goto bad_unshare_cleanup_fs;
cf2e340f
JD
1648 if ((err = unshare_vm(unshare_flags, &new_mm)))
1649 goto bad_unshare_cleanup_sigh;
1650 if ((err = unshare_fd(unshare_flags, &new_fd)))
1651 goto bad_unshare_cleanup_vm;
1652 if ((err = unshare_semundo(unshare_flags, &new_ulist)))
1653 goto bad_unshare_cleanup_fd;
e3222c4e
BP
1654 if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
1655 new_fs)))
071df104 1656 goto bad_unshare_cleanup_semundo;
c0b2fc31 1657
e3222c4e 1658 if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) {
ab516013 1659
cf2e340f 1660 task_lock(current);
c0b2fc31
SH
1661
1662 if (new_nsproxy) {
e3222c4e 1663 old_nsproxy = current->nsproxy;
c0b2fc31
SH
1664 current->nsproxy = new_nsproxy;
1665 new_nsproxy = old_nsproxy;
1666 }
cf2e340f
JD
1667
1668 if (new_fs) {
1669 fs = current->fs;
1670 current->fs = new_fs;
1671 new_fs = fs;
1672 }
1673
cf2e340f
JD
1674 if (new_mm) {
1675 mm = current->mm;
1676 active_mm = current->active_mm;
1677 current->mm = new_mm;
1678 current->active_mm = new_mm;
1679 activate_mm(active_mm, new_mm);
1680 new_mm = mm;
1681 }
1682
1683 if (new_fd) {
1684 fd = current->files;
1685 current->files = new_fd;
1686 new_fd = fd;
1687 }
1688
1689 task_unlock(current);
1690 }
1691
c0b2fc31 1692 if (new_nsproxy)
444f378b 1693 put_nsproxy(new_nsproxy);
c0b2fc31 1694
ab516013 1695bad_unshare_cleanup_semundo:
cf2e340f
JD
1696bad_unshare_cleanup_fd:
1697 if (new_fd)
1698 put_files_struct(new_fd);
1699
1700bad_unshare_cleanup_vm:
1701 if (new_mm)
1702 mmput(new_mm);
1703
1704bad_unshare_cleanup_sigh:
1705 if (new_sigh)
1706 if (atomic_dec_and_test(&new_sigh->count))
1707 kmem_cache_free(sighand_cachep, new_sigh);
1708
cf2e340f
JD
1709bad_unshare_cleanup_fs:
1710 if (new_fs)
1711 put_fs_struct(new_fs);
1712
1713bad_unshare_cleanup_thread:
1714bad_unshare_out:
1715 return err;
1716}