[PATCH] m68k: introduce task_thread_info
[linux-2.6-block.git] / kernel / fork.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/fork.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */
13
14#include <linux/config.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/unistd.h>
18#include <linux/smp_lock.h>
19#include <linux/module.h>
20#include <linux/vmalloc.h>
21#include <linux/completion.h>
22#include <linux/namespace.h>
23#include <linux/personality.h>
24#include <linux/mempolicy.h>
25#include <linux/sem.h>
26#include <linux/file.h>
27#include <linux/key.h>
28#include <linux/binfmts.h>
29#include <linux/mman.h>
30#include <linux/fs.h>
31#include <linux/cpu.h>
32#include <linux/cpuset.h>
33#include <linux/security.h>
34#include <linux/swap.h>
35#include <linux/syscalls.h>
36#include <linux/jiffies.h>
37#include <linux/futex.h>
ab2af1f5 38#include <linux/rcupdate.h>
1da177e4
LT
39#include <linux/ptrace.h>
40#include <linux/mount.h>
41#include <linux/audit.h>
42#include <linux/profile.h>
43#include <linux/rmap.h>
44#include <linux/acct.h>
9f46080c 45#include <linux/cn_proc.h>
1da177e4
LT
46
47#include <asm/pgtable.h>
48#include <asm/pgalloc.h>
49#include <asm/uaccess.h>
50#include <asm/mmu_context.h>
51#include <asm/cacheflush.h>
52#include <asm/tlbflush.h>
53
54/*
55 * Protected counters by write_lock_irq(&tasklist_lock)
56 */
57unsigned long total_forks; /* Handle normal Linux uptimes. */
58int nr_threads; /* The idle threads do not count.. */
59
60int max_threads; /* tunable limit on nr_threads */
61
62DEFINE_PER_CPU(unsigned long, process_counts) = 0;
63
64 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
65
66EXPORT_SYMBOL(tasklist_lock);
67
68int nr_processes(void)
69{
70 int cpu;
71 int total = 0;
72
73 for_each_online_cpu(cpu)
74 total += per_cpu(process_counts, cpu);
75
76 return total;
77}
78
79#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
80# define alloc_task_struct() kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
81# define free_task_struct(tsk) kmem_cache_free(task_struct_cachep, (tsk))
82static kmem_cache_t *task_struct_cachep;
83#endif
84
85/* SLAB cache for signal_struct structures (tsk->signal) */
86kmem_cache_t *signal_cachep;
87
88/* SLAB cache for sighand_struct structures (tsk->sighand) */
89kmem_cache_t *sighand_cachep;
90
91/* SLAB cache for files_struct structures (tsk->files) */
92kmem_cache_t *files_cachep;
93
94/* SLAB cache for fs_struct structures (tsk->fs) */
95kmem_cache_t *fs_cachep;
96
97/* SLAB cache for vm_area_struct structures */
98kmem_cache_t *vm_area_cachep;
99
100/* SLAB cache for mm_struct structures (tsk->mm) */
101static kmem_cache_t *mm_cachep;
102
103void free_task(struct task_struct *tsk)
104{
105 free_thread_info(tsk->thread_info);
106 free_task_struct(tsk);
107}
108EXPORT_SYMBOL(free_task);
109
110void __put_task_struct(struct task_struct *tsk)
111{
112 WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
113 WARN_ON(atomic_read(&tsk->usage));
114 WARN_ON(tsk == current);
115
116 if (unlikely(tsk->audit_context))
117 audit_free(tsk);
118 security_task_free(tsk);
119 free_uid(tsk->user);
120 put_group_info(tsk->group_info);
121
122 if (!profile_handoff_task(tsk))
123 free_task(tsk);
124}
125
126void __init fork_init(unsigned long mempages)
127{
128#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
129#ifndef ARCH_MIN_TASKALIGN
130#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
131#endif
132 /* create a slab on which task_structs can be allocated */
133 task_struct_cachep =
134 kmem_cache_create("task_struct", sizeof(struct task_struct),
135 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
136#endif
137
138 /*
139 * The default maximum number of threads is set to a safe
140 * value: the thread structures can take up at most half
141 * of memory.
142 */
143 max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
144
145 /*
146 * we need to allow at least 20 threads to boot a system
147 */
148 if(max_threads < 20)
149 max_threads = 20;
150
151 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
152 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
153 init_task.signal->rlim[RLIMIT_SIGPENDING] =
154 init_task.signal->rlim[RLIMIT_NPROC];
155}
156
157static struct task_struct *dup_task_struct(struct task_struct *orig)
158{
159 struct task_struct *tsk;
160 struct thread_info *ti;
161
162 prepare_to_copy(orig);
163
164 tsk = alloc_task_struct();
165 if (!tsk)
166 return NULL;
167
168 ti = alloc_thread_info(tsk);
169 if (!ti) {
170 free_task_struct(tsk);
171 return NULL;
172 }
173
174 *ti = *orig->thread_info;
175 *tsk = *orig;
176 tsk->thread_info = ti;
177 ti->task = tsk;
178
179 /* One for us, one for whoever does the "release_task()" (usually parent) */
180 atomic_set(&tsk->usage,2);
4b5d37ac 181 atomic_set(&tsk->fs_excl, 0);
1da177e4
LT
182 return tsk;
183}
184
185#ifdef CONFIG_MMU
fd3e42fc 186static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1da177e4 187{
fd3e42fc 188 struct vm_area_struct *mpnt, *tmp, **pprev;
1da177e4
LT
189 struct rb_node **rb_link, *rb_parent;
190 int retval;
191 unsigned long charge;
192 struct mempolicy *pol;
193
194 down_write(&oldmm->mmap_sem);
fd3e42fc 195 flush_cache_mm(oldmm);
7ee78232
HD
196 down_write(&mm->mmap_sem);
197
1da177e4
LT
198 mm->locked_vm = 0;
199 mm->mmap = NULL;
200 mm->mmap_cache = NULL;
201 mm->free_area_cache = oldmm->mmap_base;
1363c3cd 202 mm->cached_hole_size = ~0UL;
1da177e4 203 mm->map_count = 0;
1da177e4
LT
204 cpus_clear(mm->cpu_vm_mask);
205 mm->mm_rb = RB_ROOT;
206 rb_link = &mm->mm_rb.rb_node;
207 rb_parent = NULL;
208 pprev = &mm->mmap;
209
fd3e42fc 210 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
1da177e4
LT
211 struct file *file;
212
213 if (mpnt->vm_flags & VM_DONTCOPY) {
3b6bfcdb
HD
214 long pages = vma_pages(mpnt);
215 mm->total_vm -= pages;
ab50b8ed 216 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
3b6bfcdb 217 -pages);
1da177e4
LT
218 continue;
219 }
220 charge = 0;
221 if (mpnt->vm_flags & VM_ACCOUNT) {
222 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
223 if (security_vm_enough_memory(len))
224 goto fail_nomem;
225 charge = len;
226 }
227 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
228 if (!tmp)
229 goto fail_nomem;
230 *tmp = *mpnt;
231 pol = mpol_copy(vma_policy(mpnt));
232 retval = PTR_ERR(pol);
233 if (IS_ERR(pol))
234 goto fail_nomem_policy;
235 vma_set_policy(tmp, pol);
236 tmp->vm_flags &= ~VM_LOCKED;
237 tmp->vm_mm = mm;
238 tmp->vm_next = NULL;
239 anon_vma_link(tmp);
240 file = tmp->vm_file;
241 if (file) {
242 struct inode *inode = file->f_dentry->d_inode;
243 get_file(file);
244 if (tmp->vm_flags & VM_DENYWRITE)
245 atomic_dec(&inode->i_writecount);
246
247 /* insert tmp into the share list, just after mpnt */
248 spin_lock(&file->f_mapping->i_mmap_lock);
249 tmp->vm_truncate_count = mpnt->vm_truncate_count;
250 flush_dcache_mmap_lock(file->f_mapping);
251 vma_prio_tree_add(tmp, mpnt);
252 flush_dcache_mmap_unlock(file->f_mapping);
253 spin_unlock(&file->f_mapping->i_mmap_lock);
254 }
255
256 /*
7ee78232 257 * Link in the new vma and copy the page table entries.
1da177e4 258 */
1da177e4
LT
259 *pprev = tmp;
260 pprev = &tmp->vm_next;
261
262 __vma_link_rb(mm, tmp, rb_link, rb_parent);
263 rb_link = &tmp->vm_rb.rb_right;
264 rb_parent = &tmp->vm_rb;
265
266 mm->map_count++;
fd3e42fc 267 retval = copy_page_range(mm, oldmm, tmp);
1da177e4
LT
268
269 if (tmp->vm_ops && tmp->vm_ops->open)
270 tmp->vm_ops->open(tmp);
271
272 if (retval)
273 goto out;
274 }
275 retval = 0;
1da177e4 276out:
7ee78232 277 up_write(&mm->mmap_sem);
fd3e42fc 278 flush_tlb_mm(oldmm);
1da177e4
LT
279 up_write(&oldmm->mmap_sem);
280 return retval;
281fail_nomem_policy:
282 kmem_cache_free(vm_area_cachep, tmp);
283fail_nomem:
284 retval = -ENOMEM;
285 vm_unacct_memory(charge);
286 goto out;
287}
288
289static inline int mm_alloc_pgd(struct mm_struct * mm)
290{
291 mm->pgd = pgd_alloc(mm);
292 if (unlikely(!mm->pgd))
293 return -ENOMEM;
294 return 0;
295}
296
297static inline void mm_free_pgd(struct mm_struct * mm)
298{
299 pgd_free(mm->pgd);
300}
301#else
302#define dup_mmap(mm, oldmm) (0)
303#define mm_alloc_pgd(mm) (0)
304#define mm_free_pgd(mm)
305#endif /* CONFIG_MMU */
306
307 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
308
309#define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
310#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
311
312#include <linux/init_task.h>
313
314static struct mm_struct * mm_init(struct mm_struct * mm)
315{
316 atomic_set(&mm->mm_users, 1);
317 atomic_set(&mm->mm_count, 1);
318 init_rwsem(&mm->mmap_sem);
319 INIT_LIST_HEAD(&mm->mmlist);
320 mm->core_waiters = 0;
321 mm->nr_ptes = 0;
4294621f 322 set_mm_counter(mm, file_rss, 0);
404351e6 323 set_mm_counter(mm, anon_rss, 0);
1da177e4
LT
324 spin_lock_init(&mm->page_table_lock);
325 rwlock_init(&mm->ioctx_list_lock);
326 mm->ioctx_list = NULL;
327 mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
328 mm->free_area_cache = TASK_UNMAPPED_BASE;
1363c3cd 329 mm->cached_hole_size = ~0UL;
1da177e4
LT
330
331 if (likely(!mm_alloc_pgd(mm))) {
332 mm->def_flags = 0;
333 return mm;
334 }
335 free_mm(mm);
336 return NULL;
337}
338
339/*
340 * Allocate and initialize an mm_struct.
341 */
342struct mm_struct * mm_alloc(void)
343{
344 struct mm_struct * mm;
345
346 mm = allocate_mm();
347 if (mm) {
348 memset(mm, 0, sizeof(*mm));
349 mm = mm_init(mm);
350 }
351 return mm;
352}
353
354/*
355 * Called when the last reference to the mm
356 * is dropped: either by a lazy thread or by
357 * mmput. Free the page directory and the mm.
358 */
359void fastcall __mmdrop(struct mm_struct *mm)
360{
361 BUG_ON(mm == &init_mm);
362 mm_free_pgd(mm);
363 destroy_context(mm);
364 free_mm(mm);
365}
366
367/*
368 * Decrement the use count and release all resources for an mm.
369 */
370void mmput(struct mm_struct *mm)
371{
372 if (atomic_dec_and_test(&mm->mm_users)) {
373 exit_aio(mm);
374 exit_mmap(mm);
375 if (!list_empty(&mm->mmlist)) {
376 spin_lock(&mmlist_lock);
377 list_del(&mm->mmlist);
378 spin_unlock(&mmlist_lock);
379 }
380 put_swap_token(mm);
381 mmdrop(mm);
382 }
383}
384EXPORT_SYMBOL_GPL(mmput);
385
386/**
387 * get_task_mm - acquire a reference to the task's mm
388 *
389 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning
390 * this kernel workthread has transiently adopted a user mm with use_mm,
391 * to do its AIO) is not set and if so returns a reference to it, after
392 * bumping up the use count. User must release the mm via mmput()
393 * after use. Typically used by /proc and ptrace.
394 */
395struct mm_struct *get_task_mm(struct task_struct *task)
396{
397 struct mm_struct *mm;
398
399 task_lock(task);
400 mm = task->mm;
401 if (mm) {
402 if (task->flags & PF_BORROWED_MM)
403 mm = NULL;
404 else
405 atomic_inc(&mm->mm_users);
406 }
407 task_unlock(task);
408 return mm;
409}
410EXPORT_SYMBOL_GPL(get_task_mm);
411
412/* Please note the differences between mmput and mm_release.
413 * mmput is called whenever we stop holding onto a mm_struct,
414 * error success whatever.
415 *
416 * mm_release is called after a mm_struct has been removed
417 * from the current process.
418 *
419 * This difference is important for error handling, when we
420 * only half set up a mm_struct for a new process and need to restore
421 * the old one. Because we mmput the new mm_struct before
422 * restoring the old one. . .
423 * Eric Biederman 10 January 1998
424 */
425void mm_release(struct task_struct *tsk, struct mm_struct *mm)
426{
427 struct completion *vfork_done = tsk->vfork_done;
428
429 /* Get rid of any cached register state */
430 deactivate_mm(tsk, mm);
431
432 /* notify parent sleeping on vfork() */
433 if (vfork_done) {
434 tsk->vfork_done = NULL;
435 complete(vfork_done);
436 }
437 if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
438 u32 __user * tidptr = tsk->clear_child_tid;
439 tsk->clear_child_tid = NULL;
440
441 /*
442 * We don't check the error code - if userspace has
443 * not set up a proper pointer then tough luck.
444 */
445 put_user(0, tidptr);
446 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
447 }
448}
449
450static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
451{
452 struct mm_struct * mm, *oldmm;
453 int retval;
454
455 tsk->min_flt = tsk->maj_flt = 0;
456 tsk->nvcsw = tsk->nivcsw = 0;
457
458 tsk->mm = NULL;
459 tsk->active_mm = NULL;
460
461 /*
462 * Are we cloning a kernel thread?
463 *
464 * We need to steal a active VM for that..
465 */
466 oldmm = current->mm;
467 if (!oldmm)
468 return 0;
469
470 if (clone_flags & CLONE_VM) {
471 atomic_inc(&oldmm->mm_users);
472 mm = oldmm;
1da177e4
LT
473 goto good_mm;
474 }
475
476 retval = -ENOMEM;
477 mm = allocate_mm();
478 if (!mm)
479 goto fail_nomem;
480
481 /* Copy the current MM stuff.. */
482 memcpy(mm, oldmm, sizeof(*mm));
483 if (!mm_init(mm))
484 goto fail_nomem;
485
486 if (init_new_context(tsk,mm))
487 goto fail_nocontext;
488
489 retval = dup_mmap(mm, oldmm);
490 if (retval)
491 goto free_pt;
492
4294621f 493 mm->hiwater_rss = get_mm_rss(mm);
1da177e4
LT
494 mm->hiwater_vm = mm->total_vm;
495
496good_mm:
497 tsk->mm = mm;
498 tsk->active_mm = mm;
499 return 0;
500
501free_pt:
502 mmput(mm);
503fail_nomem:
504 return retval;
505
506fail_nocontext:
507 /*
508 * If init_new_context() failed, we cannot use mmput() to free the mm
509 * because it calls destroy_context()
510 */
511 mm_free_pgd(mm);
512 free_mm(mm);
513 return retval;
514}
515
516static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
517{
518 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
519 /* We don't need to lock fs - think why ;-) */
520 if (fs) {
521 atomic_set(&fs->count, 1);
522 rwlock_init(&fs->lock);
523 fs->umask = old->umask;
524 read_lock(&old->lock);
525 fs->rootmnt = mntget(old->rootmnt);
526 fs->root = dget(old->root);
527 fs->pwdmnt = mntget(old->pwdmnt);
528 fs->pwd = dget(old->pwd);
529 if (old->altroot) {
530 fs->altrootmnt = mntget(old->altrootmnt);
531 fs->altroot = dget(old->altroot);
532 } else {
533 fs->altrootmnt = NULL;
534 fs->altroot = NULL;
535 }
536 read_unlock(&old->lock);
537 }
538 return fs;
539}
540
541struct fs_struct *copy_fs_struct(struct fs_struct *old)
542{
543 return __copy_fs_struct(old);
544}
545
546EXPORT_SYMBOL_GPL(copy_fs_struct);
547
548static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
549{
550 if (clone_flags & CLONE_FS) {
551 atomic_inc(&current->fs->count);
552 return 0;
553 }
554 tsk->fs = __copy_fs_struct(current->fs);
555 if (!tsk->fs)
556 return -ENOMEM;
557 return 0;
558}
559
ab2af1f5 560static int count_open_files(struct fdtable *fdt)
1da177e4 561{
ab2af1f5 562 int size = fdt->max_fdset;
1da177e4
LT
563 int i;
564
565 /* Find the last open fd */
566 for (i = size/(8*sizeof(long)); i > 0; ) {
badf1662 567 if (fdt->open_fds->fds_bits[--i])
1da177e4
LT
568 break;
569 }
570 i = (i+1) * 8 * sizeof(long);
571 return i;
572}
573
badf1662
DS
574static struct files_struct *alloc_files(void)
575{
576 struct files_struct *newf;
577 struct fdtable *fdt;
578
579 newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
580 if (!newf)
581 goto out;
582
583 atomic_set(&newf->count, 1);
584
585 spin_lock_init(&newf->file_lock);
ab2af1f5 586 fdt = &newf->fdtab;
badf1662
DS
587 fdt->next_fd = 0;
588 fdt->max_fds = NR_OPEN_DEFAULT;
589 fdt->max_fdset = __FD_SETSIZE;
590 fdt->close_on_exec = &newf->close_on_exec_init;
591 fdt->open_fds = &newf->open_fds_init;
592 fdt->fd = &newf->fd_array[0];
ab2af1f5
DS
593 INIT_RCU_HEAD(&fdt->rcu);
594 fdt->free_files = NULL;
595 fdt->next = NULL;
596 rcu_assign_pointer(newf->fdt, fdt);
badf1662
DS
597out:
598 return newf;
599}
600
1da177e4
LT
601static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
602{
603 struct files_struct *oldf, *newf;
604 struct file **old_fds, **new_fds;
605 int open_files, size, i, error = 0, expand;
badf1662 606 struct fdtable *old_fdt, *new_fdt;
1da177e4
LT
607
608 /*
609 * A background process may not have any files ...
610 */
611 oldf = current->files;
612 if (!oldf)
613 goto out;
614
615 if (clone_flags & CLONE_FILES) {
616 atomic_inc(&oldf->count);
617 goto out;
618 }
619
620 /*
621 * Note: we may be using current for both targets (See exec.c)
622 * This works because we cache current->files (old) as oldf. Don't
623 * break this.
624 */
625 tsk->files = NULL;
626 error = -ENOMEM;
badf1662
DS
627 newf = alloc_files();
628 if (!newf)
1da177e4
LT
629 goto out;
630
1da177e4 631 spin_lock(&oldf->file_lock);
badf1662
DS
632 old_fdt = files_fdtable(oldf);
633 new_fdt = files_fdtable(newf);
634 size = old_fdt->max_fdset;
ab2af1f5 635 open_files = count_open_files(old_fdt);
1da177e4
LT
636 expand = 0;
637
638 /*
639 * Check whether we need to allocate a larger fd array or fd set.
640 * Note: we're not a clone task, so the open count won't change.
641 */
badf1662
DS
642 if (open_files > new_fdt->max_fdset) {
643 new_fdt->max_fdset = 0;
1da177e4
LT
644 expand = 1;
645 }
badf1662
DS
646 if (open_files > new_fdt->max_fds) {
647 new_fdt->max_fds = 0;
1da177e4
LT
648 expand = 1;
649 }
650
651 /* if the old fdset gets grown now, we'll only copy up to "size" fds */
652 if (expand) {
653 spin_unlock(&oldf->file_lock);
654 spin_lock(&newf->file_lock);
655 error = expand_files(newf, open_files-1);
656 spin_unlock(&newf->file_lock);
657 if (error < 0)
658 goto out_release;
ab2af1f5
DS
659 new_fdt = files_fdtable(newf);
660 /*
661 * Reacquire the oldf lock and a pointer to its fd table
662 * who knows it may have a new bigger fd table. We need
663 * the latest pointer.
664 */
1da177e4 665 spin_lock(&oldf->file_lock);
ab2af1f5 666 old_fdt = files_fdtable(oldf);
1da177e4
LT
667 }
668
badf1662
DS
669 old_fds = old_fdt->fd;
670 new_fds = new_fdt->fd;
1da177e4 671
badf1662
DS
672 memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
673 memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
1da177e4
LT
674
675 for (i = open_files; i != 0; i--) {
676 struct file *f = *old_fds++;
677 if (f) {
678 get_file(f);
679 } else {
680 /*
681 * The fd may be claimed in the fd bitmap but not yet
682 * instantiated in the files array if a sibling thread
683 * is partway through open(). So make sure that this
684 * fd is available to the new process.
685 */
badf1662 686 FD_CLR(open_files - i, new_fdt->open_fds);
1da177e4 687 }
ab2af1f5 688 rcu_assign_pointer(*new_fds++, f);
1da177e4
LT
689 }
690 spin_unlock(&oldf->file_lock);
691
692 /* compute the remainder to be cleared */
badf1662 693 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
1da177e4
LT
694
695 /* This is long word aligned thus could use a optimized version */
696 memset(new_fds, 0, size);
697
badf1662
DS
698 if (new_fdt->max_fdset > open_files) {
699 int left = (new_fdt->max_fdset-open_files)/8;
1da177e4
LT
700 int start = open_files / (8 * sizeof(unsigned long));
701
badf1662
DS
702 memset(&new_fdt->open_fds->fds_bits[start], 0, left);
703 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
1da177e4
LT
704 }
705
706 tsk->files = newf;
707 error = 0;
708out:
709 return error;
710
711out_release:
badf1662
DS
712 free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
713 free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
714 free_fd_array(new_fdt->fd, new_fdt->max_fds);
1da177e4
LT
715 kmem_cache_free(files_cachep, newf);
716 goto out;
717}
718
719/*
720 * Helper to unshare the files of the current task.
721 * We don't want to expose copy_files internals to
722 * the exec layer of the kernel.
723 */
724
725int unshare_files(void)
726{
727 struct files_struct *files = current->files;
728 int rc;
729
730 if(!files)
731 BUG();
732
733 /* This can race but the race causes us to copy when we don't
734 need to and drop the copy */
735 if(atomic_read(&files->count) == 1)
736 {
737 atomic_inc(&files->count);
738 return 0;
739 }
740 rc = copy_files(0, current);
741 if(rc)
742 current->files = files;
743 return rc;
744}
745
746EXPORT_SYMBOL(unshare_files);
747
748static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
749{
750 struct sighand_struct *sig;
751
752 if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
753 atomic_inc(&current->sighand->count);
754 return 0;
755 }
756 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
757 tsk->sighand = sig;
758 if (!sig)
759 return -ENOMEM;
760 spin_lock_init(&sig->siglock);
761 atomic_set(&sig->count, 1);
762 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
763 return 0;
764}
765
766static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
767{
768 struct signal_struct *sig;
769 int ret;
770
771 if (clone_flags & CLONE_THREAD) {
772 atomic_inc(&current->signal->count);
773 atomic_inc(&current->signal->live);
774 return 0;
775 }
776 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
777 tsk->signal = sig;
778 if (!sig)
779 return -ENOMEM;
780
781 ret = copy_thread_group_keys(tsk);
782 if (ret < 0) {
783 kmem_cache_free(signal_cachep, sig);
784 return ret;
785 }
786
787 atomic_set(&sig->count, 1);
788 atomic_set(&sig->live, 1);
789 init_waitqueue_head(&sig->wait_chldexit);
790 sig->flags = 0;
791 sig->group_exit_code = 0;
792 sig->group_exit_task = NULL;
793 sig->group_stop_count = 0;
794 sig->curr_target = NULL;
795 init_sigpending(&sig->shared_pending);
796 INIT_LIST_HEAD(&sig->posix_timers);
797
798 sig->it_real_value = sig->it_real_incr = 0;
799 sig->real_timer.function = it_real_fn;
800 sig->real_timer.data = (unsigned long) tsk;
801 init_timer(&sig->real_timer);
802
803 sig->it_virt_expires = cputime_zero;
804 sig->it_virt_incr = cputime_zero;
805 sig->it_prof_expires = cputime_zero;
806 sig->it_prof_incr = cputime_zero;
807
808 sig->tty = current->signal->tty;
809 sig->pgrp = process_group(current);
810 sig->session = current->signal->session;
811 sig->leader = 0; /* session leadership doesn't inherit */
812 sig->tty_old_pgrp = 0;
813
814 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
815 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
816 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
817 sig->sched_time = 0;
818 INIT_LIST_HEAD(&sig->cpu_timers[0]);
819 INIT_LIST_HEAD(&sig->cpu_timers[1]);
820 INIT_LIST_HEAD(&sig->cpu_timers[2]);
821
822 task_lock(current->group_leader);
823 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
824 task_unlock(current->group_leader);
825
826 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
827 /*
828 * New sole thread in the process gets an expiry time
829 * of the whole CPU time limit.
830 */
831 tsk->it_prof_expires =
832 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
833 }
834
835 return 0;
836}
837
838static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
839{
840 unsigned long new_flags = p->flags;
841
d1209d04 842 new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
1da177e4
LT
843 new_flags |= PF_FORKNOEXEC;
844 if (!(clone_flags & CLONE_PTRACE))
845 p->ptrace = 0;
846 p->flags = new_flags;
847}
848
849asmlinkage long sys_set_tid_address(int __user *tidptr)
850{
851 current->clear_child_tid = tidptr;
852
853 return current->pid;
854}
855
856/*
857 * This creates a new process as a copy of the old one,
858 * but does not actually start it yet.
859 *
860 * It copies the registers, and all the appropriate
861 * parts of the process environment (as per the clone
862 * flags). The actual kick-off is left to the caller.
863 */
864static task_t *copy_process(unsigned long clone_flags,
865 unsigned long stack_start,
866 struct pt_regs *regs,
867 unsigned long stack_size,
868 int __user *parent_tidptr,
869 int __user *child_tidptr,
870 int pid)
871{
872 int retval;
873 struct task_struct *p = NULL;
874
875 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
876 return ERR_PTR(-EINVAL);
877
878 /*
879 * Thread groups must share signals as well, and detached threads
880 * can only be started up within the thread group.
881 */
882 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
883 return ERR_PTR(-EINVAL);
884
885 /*
886 * Shared signal handlers imply shared VM. By way of the above,
887 * thread groups also imply shared VM. Blocking this case allows
888 * for various simplifications in other code.
889 */
890 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
891 return ERR_PTR(-EINVAL);
892
893 retval = security_task_create(clone_flags);
894 if (retval)
895 goto fork_out;
896
897 retval = -ENOMEM;
898 p = dup_task_struct(current);
899 if (!p)
900 goto fork_out;
901
902 retval = -EAGAIN;
903 if (atomic_read(&p->user->processes) >=
904 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
905 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
906 p->user != &root_user)
907 goto bad_fork_free;
908 }
909
910 atomic_inc(&p->user->__count);
911 atomic_inc(&p->user->processes);
912 get_group_info(p->group_info);
913
914 /*
915 * If multiple threads are within copy_process(), then this check
916 * triggers too late. This doesn't hurt, the check is only there
917 * to stop root fork bombs.
918 */
919 if (nr_threads >= max_threads)
920 goto bad_fork_cleanup_count;
921
a1261f54 922 if (!try_module_get(task_thread_info(p)->exec_domain->module))
1da177e4
LT
923 goto bad_fork_cleanup_count;
924
925 if (p->binfmt && !try_module_get(p->binfmt->module))
926 goto bad_fork_cleanup_put_domain;
927
928 p->did_exec = 0;
929 copy_flags(clone_flags, p);
930 p->pid = pid;
931 retval = -EFAULT;
932 if (clone_flags & CLONE_PARENT_SETTID)
933 if (put_user(p->pid, parent_tidptr))
934 goto bad_fork_cleanup;
935
936 p->proc_dentry = NULL;
937
938 INIT_LIST_HEAD(&p->children);
939 INIT_LIST_HEAD(&p->sibling);
940 p->vfork_done = NULL;
941 spin_lock_init(&p->alloc_lock);
942 spin_lock_init(&p->proc_lock);
943
944 clear_tsk_thread_flag(p, TIF_SIGPENDING);
945 init_sigpending(&p->pending);
946
947 p->utime = cputime_zero;
948 p->stime = cputime_zero;
949 p->sched_time = 0;
950 p->rchar = 0; /* I/O counter: bytes read */
951 p->wchar = 0; /* I/O counter: bytes written */
952 p->syscr = 0; /* I/O counter: read syscalls */
953 p->syscw = 0; /* I/O counter: write syscalls */
954 acct_clear_integrals(p);
955
956 p->it_virt_expires = cputime_zero;
957 p->it_prof_expires = cputime_zero;
958 p->it_sched_expires = 0;
959 INIT_LIST_HEAD(&p->cpu_timers[0]);
960 INIT_LIST_HEAD(&p->cpu_timers[1]);
961 INIT_LIST_HEAD(&p->cpu_timers[2]);
962
963 p->lock_depth = -1; /* -1 = no lock */
964 do_posix_clock_monotonic_gettime(&p->start_time);
965 p->security = NULL;
966 p->io_context = NULL;
967 p->io_wait = NULL;
968 p->audit_context = NULL;
969#ifdef CONFIG_NUMA
970 p->mempolicy = mpol_copy(p->mempolicy);
971 if (IS_ERR(p->mempolicy)) {
972 retval = PTR_ERR(p->mempolicy);
973 p->mempolicy = NULL;
974 goto bad_fork_cleanup;
975 }
976#endif
977
978 p->tgid = p->pid;
979 if (clone_flags & CLONE_THREAD)
980 p->tgid = current->tgid;
981
982 if ((retval = security_task_alloc(p)))
983 goto bad_fork_cleanup_policy;
984 if ((retval = audit_alloc(p)))
985 goto bad_fork_cleanup_security;
986 /* copy all the process information */
987 if ((retval = copy_semundo(clone_flags, p)))
988 goto bad_fork_cleanup_audit;
989 if ((retval = copy_files(clone_flags, p)))
990 goto bad_fork_cleanup_semundo;
991 if ((retval = copy_fs(clone_flags, p)))
992 goto bad_fork_cleanup_files;
993 if ((retval = copy_sighand(clone_flags, p)))
994 goto bad_fork_cleanup_fs;
995 if ((retval = copy_signal(clone_flags, p)))
996 goto bad_fork_cleanup_sighand;
997 if ((retval = copy_mm(clone_flags, p)))
998 goto bad_fork_cleanup_signal;
999 if ((retval = copy_keys(clone_flags, p)))
1000 goto bad_fork_cleanup_mm;
1001 if ((retval = copy_namespace(clone_flags, p)))
1002 goto bad_fork_cleanup_keys;
1003 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
1004 if (retval)
1005 goto bad_fork_cleanup_namespace;
1006
1007 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1008 /*
1009 * Clear TID on mm_release()?
1010 */
1011 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1012
1013 /*
1014 * Syscall tracing should be turned off in the child regardless
1015 * of CLONE_PTRACE.
1016 */
1017 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
ed75e8d5
LV
1018#ifdef TIF_SYSCALL_EMU
1019 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1020#endif
1da177e4
LT
1021
1022 /* Our parent execution domain becomes current domain
1023 These must match for thread signalling to apply */
1024
1025 p->parent_exec_id = p->self_exec_id;
1026
1027 /* ok, now we should be set up.. */
1028 p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1029 p->pdeath_signal = 0;
1030 p->exit_state = 0;
1031
1da177e4
LT
1032 /*
1033 * Ok, make it visible to the rest of the system.
1034 * We dont wake it up yet.
1035 */
1036 p->group_leader = p;
1037 INIT_LIST_HEAD(&p->ptrace_children);
1038 INIT_LIST_HEAD(&p->ptrace_list);
1039
476d139c
NP
1040 /* Perform scheduler related setup. Assign this task to a CPU. */
1041 sched_fork(p, clone_flags);
1042
1da177e4
LT
1043 /* Need tasklist lock for parent etc handling! */
1044 write_lock_irq(&tasklist_lock);
1045
1046 /*
476d139c
NP
1047 * The task hasn't been attached yet, so its cpus_allowed mask will
1048 * not be changed, nor will its assigned CPU.
1049 *
1050 * The cpus_allowed mask of the parent may have changed after it was
1051 * copied first time - so re-copy it here, then check the child's CPU
1052 * to ensure it is on a valid CPU (and if not, just force it back to
1053 * parent's CPU). This avoids alot of nasty races.
1da177e4
LT
1054 */
1055 p->cpus_allowed = current->cpus_allowed;
26ff6ad9
SV
1056 if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
1057 !cpu_online(task_cpu(p))))
476d139c 1058 set_task_cpu(p, smp_processor_id());
1da177e4
LT
1059
1060 /*
1061 * Check for pending SIGKILL! The new thread should not be allowed
1062 * to slip out of an OOM kill. (or normal SIGKILL.)
1063 */
1064 if (sigismember(&current->pending.signal, SIGKILL)) {
1065 write_unlock_irq(&tasklist_lock);
1066 retval = -EINTR;
1067 goto bad_fork_cleanup_namespace;
1068 }
1069
1070 /* CLONE_PARENT re-uses the old parent */
1071 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1072 p->real_parent = current->real_parent;
1073 else
1074 p->real_parent = current;
1075 p->parent = p->real_parent;
1076
1077 if (clone_flags & CLONE_THREAD) {
1078 spin_lock(&current->sighand->siglock);
1079 /*
1080 * Important: if an exit-all has been started then
1081 * do not create this new thread - the whole thread
1082 * group is supposed to exit anyway.
1083 */
1084 if (current->signal->flags & SIGNAL_GROUP_EXIT) {
1085 spin_unlock(&current->sighand->siglock);
1086 write_unlock_irq(&tasklist_lock);
1087 retval = -EAGAIN;
1088 goto bad_fork_cleanup_namespace;
1089 }
1090 p->group_leader = current->group_leader;
1091
1092 if (current->signal->group_stop_count > 0) {
1093 /*
1094 * There is an all-stop in progress for the group.
1095 * We ourselves will stop as soon as we check signals.
1096 * Make the new thread part of that group stop too.
1097 */
1098 current->signal->group_stop_count++;
1099 set_tsk_thread_flag(p, TIF_SIGPENDING);
1100 }
1101
1102 if (!cputime_eq(current->signal->it_virt_expires,
1103 cputime_zero) ||
1104 !cputime_eq(current->signal->it_prof_expires,
1105 cputime_zero) ||
1106 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1107 !list_empty(&current->signal->cpu_timers[0]) ||
1108 !list_empty(&current->signal->cpu_timers[1]) ||
1109 !list_empty(&current->signal->cpu_timers[2])) {
1110 /*
1111 * Have child wake up on its first tick to check
1112 * for process CPU timers.
1113 */
1114 p->it_prof_expires = jiffies_to_cputime(1);
1115 }
1116
1117 spin_unlock(&current->sighand->siglock);
1118 }
1119
22e2c507
JA
1120 /*
1121 * inherit ioprio
1122 */
1123 p->ioprio = current->ioprio;
1124
1da177e4
LT
1125 SET_LINKS(p);
1126 if (unlikely(p->ptrace & PT_PTRACED))
1127 __ptrace_link(p, current->parent);
1128
1129 cpuset_fork(p);
1130
1131 attach_pid(p, PIDTYPE_PID, p->pid);
1132 attach_pid(p, PIDTYPE_TGID, p->tgid);
1133 if (thread_group_leader(p)) {
1134 attach_pid(p, PIDTYPE_PGID, process_group(p));
1135 attach_pid(p, PIDTYPE_SID, p->signal->session);
1136 if (p->pid)
1137 __get_cpu_var(process_counts)++;
1138 }
1139
9f46080c 1140 proc_fork_connector(p);
b0d62e6d
JB
1141 if (!current->signal->tty && p->signal->tty)
1142 p->signal->tty = NULL;
1143
1da177e4
LT
1144 nr_threads++;
1145 total_forks++;
1146 write_unlock_irq(&tasklist_lock);
1147 retval = 0;
1148
1149fork_out:
1150 if (retval)
1151 return ERR_PTR(retval);
1152 return p;
1153
1154bad_fork_cleanup_namespace:
1155 exit_namespace(p);
1156bad_fork_cleanup_keys:
1157 exit_keys(p);
1158bad_fork_cleanup_mm:
1159 if (p->mm)
1160 mmput(p->mm);
1161bad_fork_cleanup_signal:
1162 exit_signal(p);
1163bad_fork_cleanup_sighand:
1164 exit_sighand(p);
1165bad_fork_cleanup_fs:
1166 exit_fs(p); /* blocking */
1167bad_fork_cleanup_files:
1168 exit_files(p); /* blocking */
1169bad_fork_cleanup_semundo:
1170 exit_sem(p);
1171bad_fork_cleanup_audit:
1172 audit_free(p);
1173bad_fork_cleanup_security:
1174 security_task_free(p);
1175bad_fork_cleanup_policy:
1176#ifdef CONFIG_NUMA
1177 mpol_free(p->mempolicy);
1178#endif
1179bad_fork_cleanup:
1180 if (p->binfmt)
1181 module_put(p->binfmt->module);
1182bad_fork_cleanup_put_domain:
a1261f54 1183 module_put(task_thread_info(p)->exec_domain->module);
1da177e4
LT
1184bad_fork_cleanup_count:
1185 put_group_info(p->group_info);
1186 atomic_dec(&p->user->processes);
1187 free_uid(p->user);
1188bad_fork_free:
1189 free_task(p);
1190 goto fork_out;
1191}
1192
1193struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1194{
1195 memset(regs, 0, sizeof(struct pt_regs));
1196 return regs;
1197}
1198
1199task_t * __devinit fork_idle(int cpu)
1200{
1201 task_t *task;
1202 struct pt_regs regs;
1203
1204 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1205 if (!task)
1206 return ERR_PTR(-ENOMEM);
1207 init_idle(task, cpu);
1208 unhash_process(task);
1209 return task;
1210}
1211
1212static inline int fork_traceflag (unsigned clone_flags)
1213{
1214 if (clone_flags & CLONE_UNTRACED)
1215 return 0;
1216 else if (clone_flags & CLONE_VFORK) {
1217 if (current->ptrace & PT_TRACE_VFORK)
1218 return PTRACE_EVENT_VFORK;
1219 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1220 if (current->ptrace & PT_TRACE_CLONE)
1221 return PTRACE_EVENT_CLONE;
1222 } else if (current->ptrace & PT_TRACE_FORK)
1223 return PTRACE_EVENT_FORK;
1224
1225 return 0;
1226}
1227
1228/*
1229 * Ok, this is the main fork-routine.
1230 *
1231 * It copies the process, and if successful kick-starts
1232 * it and waits for it to finish using the VM if required.
1233 */
1234long do_fork(unsigned long clone_flags,
1235 unsigned long stack_start,
1236 struct pt_regs *regs,
1237 unsigned long stack_size,
1238 int __user *parent_tidptr,
1239 int __user *child_tidptr)
1240{
1241 struct task_struct *p;
1242 int trace = 0;
1243 long pid = alloc_pidmap();
1244
1245 if (pid < 0)
1246 return -EAGAIN;
1247 if (unlikely(current->ptrace)) {
1248 trace = fork_traceflag (clone_flags);
1249 if (trace)
1250 clone_flags |= CLONE_PTRACE;
1251 }
1252
1253 p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid);
1254 /*
1255 * Do this prior waking up the new thread - the thread pointer
1256 * might get invalid after that point, if the thread exits quickly.
1257 */
1258 if (!IS_ERR(p)) {
1259 struct completion vfork;
1260
1261 if (clone_flags & CLONE_VFORK) {
1262 p->vfork_done = &vfork;
1263 init_completion(&vfork);
1264 }
1265
1266 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
1267 /*
1268 * We'll start up with an immediate SIGSTOP.
1269 */
1270 sigaddset(&p->pending.signal, SIGSTOP);
1271 set_tsk_thread_flag(p, TIF_SIGPENDING);
1272 }
1273
1274 if (!(clone_flags & CLONE_STOPPED))
1275 wake_up_new_task(p, clone_flags);
1276 else
1277 p->state = TASK_STOPPED;
1278
1279 if (unlikely (trace)) {
1280 current->ptrace_message = pid;
1281 ptrace_notify ((trace << 8) | SIGTRAP);
1282 }
1283
1284 if (clone_flags & CLONE_VFORK) {
1285 wait_for_completion(&vfork);
1286 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE))
1287 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1288 }
1289 } else {
1290 free_pidmap(pid);
1291 pid = PTR_ERR(p);
1292 }
1293 return pid;
1294}
1295
1296void __init proc_caches_init(void)
1297{
1298 sighand_cachep = kmem_cache_create("sighand_cache",
1299 sizeof(struct sighand_struct), 0,
1300 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1301 signal_cachep = kmem_cache_create("signal_cache",
1302 sizeof(struct signal_struct), 0,
1303 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1304 files_cachep = kmem_cache_create("files_cache",
1305 sizeof(struct files_struct), 0,
1306 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1307 fs_cachep = kmem_cache_create("fs_cache",
1308 sizeof(struct fs_struct), 0,
1309 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1310 vm_area_cachep = kmem_cache_create("vm_area_struct",
1311 sizeof(struct vm_area_struct), 0,
1312 SLAB_PANIC, NULL, NULL);
1313 mm_cachep = kmem_cache_create("mm_struct",
1314 sizeof(struct mm_struct), 0,
1315 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1316}