Merge tag 'kbuild-fixes-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/masah...
[linux-block.git] / kernel / fork.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/fork.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8/*
9 * 'fork.c' contains the help-routines for the 'fork' system call
10 * (see also entry.S and others).
11 * Fork is rather simple, once you get the hang of it, but the memory
12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
13 */
14
b3e58382 15#include <linux/anon_inodes.h>
1da177e4 16#include <linux/slab.h>
4eb5aaa3 17#include <linux/sched/autogroup.h>
6e84f315 18#include <linux/sched/mm.h>
f7ccbae4 19#include <linux/sched/coredump.h>
8703e8a4 20#include <linux/sched/user.h>
6a3827d7 21#include <linux/sched/numa_balancing.h>
03441a34 22#include <linux/sched/stat.h>
29930025 23#include <linux/sched/task.h>
68db0cf1 24#include <linux/sched/task_stack.h>
32ef5517 25#include <linux/sched/cputime.h>
b3e58382 26#include <linux/seq_file.h>
037741a6 27#include <linux/rtmutex.h>
1da177e4
LT
28#include <linux/init.h>
29#include <linux/unistd.h>
1da177e4
LT
30#include <linux/module.h>
31#include <linux/vmalloc.h>
32#include <linux/completion.h>
1da177e4
LT
33#include <linux/personality.h>
34#include <linux/mempolicy.h>
35#include <linux/sem.h>
36#include <linux/file.h>
9f3acc31 37#include <linux/fdtable.h>
da9cbc87 38#include <linux/iocontext.h>
1da177e4 39#include <linux/key.h>
50b5e49c 40#include <linux/kmsan.h>
1da177e4
LT
41#include <linux/binfmts.h>
42#include <linux/mman.h>
cddb8a5c 43#include <linux/mmu_notifier.h>
1da177e4 44#include <linux/fs.h>
615d6e87 45#include <linux/mm.h>
17fca131 46#include <linux/mm_inline.h>
ab516013 47#include <linux/nsproxy.h>
c59ede7b 48#include <linux/capability.h>
1da177e4 49#include <linux/cpu.h>
b4f48b63 50#include <linux/cgroup.h>
1da177e4 51#include <linux/security.h>
a1e78772 52#include <linux/hugetlb.h>
e2cfabdf 53#include <linux/seccomp.h>
1da177e4
LT
54#include <linux/swap.h>
55#include <linux/syscalls.h>
a2bef835 56#include <linux/syscall_user_dispatch.h>
1da177e4
LT
57#include <linux/jiffies.h>
58#include <linux/futex.h>
8141c7f3 59#include <linux/compat.h>
207205a2 60#include <linux/kthread.h>
7c3ab738 61#include <linux/task_io_accounting_ops.h>
ab2af1f5 62#include <linux/rcupdate.h>
1da177e4
LT
63#include <linux/ptrace.h>
64#include <linux/mount.h>
65#include <linux/audit.h>
78fb7466 66#include <linux/memcontrol.h>
f201ae23 67#include <linux/ftrace.h>
5e2bf014 68#include <linux/proc_fs.h>
1da177e4
LT
69#include <linux/profile.h>
70#include <linux/rmap.h>
f8af4da3 71#include <linux/ksm.h>
1da177e4 72#include <linux/acct.h>
893e26e6 73#include <linux/userfaultfd_k.h>
8f0ab514 74#include <linux/tsacct_kern.h>
9f46080c 75#include <linux/cn_proc.h>
ba96a0c8 76#include <linux/freezer.h>
ca74e92b 77#include <linux/delayacct.h>
ad4ecbcb 78#include <linux/taskstats_kern.h>
522ed776 79#include <linux/tty.h>
5ad4e53b 80#include <linux/fs_struct.h>
7c9f8861 81#include <linux/magic.h>
cdd6c482 82#include <linux/perf_event.h>
42c4ab41 83#include <linux/posix-timers.h>
8e7cac79 84#include <linux/user-return-notifier.h>
3d5992d2 85#include <linux/oom.h>
ba76149f 86#include <linux/khugepaged.h>
d80e731e 87#include <linux/signalfd.h>
0326f5a9 88#include <linux/uprobes.h>
a27bb332 89#include <linux/aio.h>
52f5684c 90#include <linux/compiler.h>
16db3d3f 91#include <linux/sysctl.h>
5c9a8750 92#include <linux/kcov.h>
d83a7cb3 93#include <linux/livepatch.h>
48ac3c18 94#include <linux/thread_info.h>
afaef01c 95#include <linux/stackleak.h>
eafb149e 96#include <linux/kasan.h>
d08b9f0c 97#include <linux/scs.h>
0f212204 98#include <linux/io_uring.h>
a10787e6 99#include <linux/bpf.h>
b3883a9a 100#include <linux/stackprotector.h>
fd593511 101#include <linux/user_events.h>
cd389115 102#include <linux/iommu.h>
932562a6 103#include <linux/rseq.h>
64bef697 104#include <uapi/linux/pidfd.h>
cb12fd8e 105#include <linux/pidfs.h>
1da177e4 106
1da177e4 107#include <asm/pgalloc.h>
7c0f6ba6 108#include <linux/uaccess.h>
1da177e4
LT
109#include <asm/mmu_context.h>
110#include <asm/cacheflush.h>
111#include <asm/tlbflush.h>
112
ad8d75ff
SR
113#include <trace/events/sched.h>
114
43d2b113
KH
115#define CREATE_TRACE_POINTS
116#include <trace/events/task.h>
117
ac1b398d
HS
118/*
119 * Minimum number of threads to boot the kernel
120 */
121#define MIN_THREADS 20
122
123/*
124 * Maximum number of threads
125 */
126#define MAX_THREADS FUTEX_TID_MASK
127
1da177e4
LT
128/*
129 * Protected counters by write_lock_irq(&tasklist_lock)
130 */
131unsigned long total_forks; /* Handle normal Linux uptimes. */
fb0a685c 132int nr_threads; /* The idle threads do not count.. */
1da177e4 133
8856ae4d 134static int max_threads; /* tunable limit on nr_threads */
1da177e4 135
8495f7e6
SPP
136#define NAMED_ARRAY_INDEX(x) [x] = __stringify(x)
137
138static const char * const resident_page_types[] = {
139 NAMED_ARRAY_INDEX(MM_FILEPAGES),
140 NAMED_ARRAY_INDEX(MM_ANONPAGES),
141 NAMED_ARRAY_INDEX(MM_SWAPENTS),
142 NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
143};
144
1da177e4
LT
145DEFINE_PER_CPU(unsigned long, process_counts) = 0;
146
c59923a1 147__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
db1466b3
PM
148
149#ifdef CONFIG_PROVE_RCU
150int lockdep_tasklist_lock_is_held(void)
151{
152 return lockdep_is_held(&tasklist_lock);
153}
154EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
155#endif /* #ifdef CONFIG_PROVE_RCU */
1da177e4
LT
156
157int nr_processes(void)
158{
159 int cpu;
160 int total = 0;
161
1d510750 162 for_each_possible_cpu(cpu)
1da177e4
LT
163 total += per_cpu(process_counts, cpu);
164
165 return total;
166}
167
f19b9f74
AM
168void __weak arch_release_task_struct(struct task_struct *tsk)
169{
170}
171
e18b890b 172static struct kmem_cache *task_struct_cachep;
41101809
TG
173
174static inline struct task_struct *alloc_task_struct_node(int node)
175{
176 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
177}
178
41101809
TG
179static inline void free_task_struct(struct task_struct *tsk)
180{
41101809
TG
181 kmem_cache_free(task_struct_cachep, tsk);
182}
41101809 183
0d15d74a
TG
184/*
185 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
186 * kmemcache based allocator.
187 */
ba14a194 188# if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
ac496bf4 189
be9a2277 190# ifdef CONFIG_VMAP_STACK
ac496bf4
AL
191/*
192 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
193 * flush. Try to minimize the number of calls by caching stacks.
194 */
195#define NR_CACHED_STACKS 2
196static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
19659c59 197
e540bf31
SAS
198struct vm_stack {
199 struct rcu_head rcu;
200 struct vm_struct *stack_vm_area;
201};
202
203static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
204{
205 unsigned int i;
206
207 for (i = 0; i < NR_CACHED_STACKS; i++) {
208 if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL)
209 continue;
210 return true;
211 }
212 return false;
213}
214
215static void thread_stack_free_rcu(struct rcu_head *rh)
216{
217 struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
218
219 if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
220 return;
221
222 vfree(vm_stack);
223}
224
225static void thread_stack_delayed_free(struct task_struct *tsk)
226{
227 struct vm_stack *vm_stack = tsk->stack;
228
229 vm_stack->stack_vm_area = tsk->stack_vm_area;
230 call_rcu(&vm_stack->rcu, thread_stack_free_rcu);
231}
232
19659c59
HR
233static int free_vm_stack_cache(unsigned int cpu)
234{
235 struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
236 int i;
237
238 for (i = 0; i < NR_CACHED_STACKS; i++) {
239 struct vm_struct *vm_stack = cached_vm_stacks[i];
240
241 if (!vm_stack)
242 continue;
243
244 vfree(vm_stack->addr);
245 cached_vm_stacks[i] = NULL;
246 }
247
248 return 0;
249}
ac496bf4 250
1a03d3f1 251static int memcg_charge_kernel_stack(struct vm_struct *vm)
b69c49b7 252{
f1c1a9ee
SAS
253 int i;
254 int ret;
4e2f6342 255 int nr_charged = 0;
f1c1a9ee 256
f1c1a9ee
SAS
257 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
258
259 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
260 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
261 if (ret)
262 goto err;
4e2f6342 263 nr_charged++;
f1c1a9ee
SAS
264 }
265 return 0;
266err:
4e2f6342 267 for (i = 0; i < nr_charged; i++)
f1c1a9ee
SAS
268 memcg_kmem_uncharge_page(vm->pages[i], 0);
269 return ret;
270}
271
7865aba3 272static int alloc_thread_stack_node(struct task_struct *tsk, int node)
b69c49b7 273{
1a03d3f1 274 struct vm_struct *vm;
ac496bf4
AL
275 void *stack;
276 int i;
277
ac496bf4 278 for (i = 0; i < NR_CACHED_STACKS; i++) {
112166f8
CL
279 struct vm_struct *s;
280
281 s = this_cpu_xchg(cached_stacks[i], NULL);
ac496bf4
AL
282
283 if (!s)
284 continue;
ac496bf4 285
51fb34de 286 /* Reset stack metadata. */
cebd0eb2 287 kasan_unpoison_range(s->addr, THREAD_SIZE);
eafb149e 288
51fb34de
AK
289 stack = kasan_reset_tag(s->addr);
290
ca182551 291 /* Clear stale pointers from reused stack. */
51fb34de 292 memset(stack, 0, THREAD_SIZE);
e01e8063 293
1a03d3f1 294 if (memcg_charge_kernel_stack(s)) {
f1c1a9ee
SAS
295 vfree(s->addr);
296 return -ENOMEM;
297 }
298
ac496bf4 299 tsk->stack_vm_area = s;
51fb34de 300 tsk->stack = stack;
7865aba3 301 return 0;
ac496bf4 302 }
ac496bf4 303
9b6f7e16
RG
304 /*
305 * Allocated stacks are cached and later reused by new threads,
306 * so memcg accounting is performed manually on assigning/releasing
307 * stacks to tasks. Drop __GFP_ACCOUNT.
308 */
48ac3c18 309 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
ac496bf4 310 VMALLOC_START, VMALLOC_END,
9b6f7e16 311 THREADINFO_GFP & ~__GFP_ACCOUNT,
ac496bf4
AL
312 PAGE_KERNEL,
313 0, node, __builtin_return_address(0));
7865aba3
SAS
314 if (!stack)
315 return -ENOMEM;
ba14a194 316
1a03d3f1
SAS
317 vm = find_vm_area(stack);
318 if (memcg_charge_kernel_stack(vm)) {
f1c1a9ee
SAS
319 vfree(stack);
320 return -ENOMEM;
321 }
ba14a194
AL
322 /*
323 * We can't call find_vm_area() in interrupt context, and
324 * free_thread_stack() can be called in interrupt context,
325 * so cache the vm_struct.
326 */
1a03d3f1 327 tsk->stack_vm_area = vm;
51fb34de 328 stack = kasan_reset_tag(stack);
7865aba3
SAS
329 tsk->stack = stack;
330 return 0;
b69c49b7
FT
331}
332
be9a2277 333static void free_thread_stack(struct task_struct *tsk)
b69c49b7 334{
e540bf31
SAS
335 if (!try_release_thread_stack_to_cache(tsk->stack_vm_area))
336 thread_stack_delayed_free(tsk);
9b6f7e16 337
be9a2277
SAS
338 tsk->stack = NULL;
339 tsk->stack_vm_area = NULL;
340}
ac496bf4 341
be9a2277 342# else /* !CONFIG_VMAP_STACK */
ac496bf4 343
e540bf31
SAS
344static void thread_stack_free_rcu(struct rcu_head *rh)
345{
346 __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
347}
348
349static void thread_stack_delayed_free(struct task_struct *tsk)
350{
351 struct rcu_head *rh = tsk->stack;
352
353 call_rcu(rh, thread_stack_free_rcu);
354}
355
7865aba3 356static int alloc_thread_stack_node(struct task_struct *tsk, int node)
be9a2277 357{
4949148a
VD
358 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
359 THREAD_SIZE_ORDER);
b6a84016 360
1bf4580e 361 if (likely(page)) {
8dcc1d34 362 tsk->stack = kasan_reset_tag(page_address(page));
7865aba3 363 return 0;
1bf4580e 364 }
7865aba3 365 return -ENOMEM;
b69c49b7
FT
366}
367
be9a2277 368static void free_thread_stack(struct task_struct *tsk)
b69c49b7 369{
e540bf31 370 thread_stack_delayed_free(tsk);
be9a2277 371 tsk->stack = NULL;
b69c49b7 372}
ac496bf4 373
be9a2277
SAS
374# endif /* CONFIG_VMAP_STACK */
375# else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
9b6f7e16 376
b235beea 377static struct kmem_cache *thread_stack_cache;
ac496bf4 378
e540bf31
SAS
379static void thread_stack_free_rcu(struct rcu_head *rh)
380{
381 kmem_cache_free(thread_stack_cache, rh);
382}
ac496bf4 383
e540bf31
SAS
384static void thread_stack_delayed_free(struct task_struct *tsk)
385{
386 struct rcu_head *rh = tsk->stack;
ac496bf4 387
e540bf31 388 call_rcu(rh, thread_stack_free_rcu);
b69c49b7 389}
0d15d74a 390
7865aba3 391static int alloc_thread_stack_node(struct task_struct *tsk, int node)
0d15d74a 392{
5eed6f1d
RR
393 unsigned long *stack;
394 stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
8dcc1d34 395 stack = kasan_reset_tag(stack);
5eed6f1d 396 tsk->stack = stack;
7865aba3 397 return stack ? 0 : -ENOMEM;
0d15d74a
TG
398}
399
ba14a194 400static void free_thread_stack(struct task_struct *tsk)
0d15d74a 401{
e540bf31 402 thread_stack_delayed_free(tsk);
be9a2277 403 tsk->stack = NULL;
0d15d74a
TG
404}
405
b235beea 406void thread_stack_cache_init(void)
0d15d74a 407{
f9d29946
DW
408 thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
409 THREAD_SIZE, THREAD_SIZE, 0, 0,
410 THREAD_SIZE, NULL);
b235beea 411 BUG_ON(thread_stack_cache == NULL);
0d15d74a 412}
be9a2277
SAS
413
414# endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
b69c49b7 415
1da177e4 416/* SLAB cache for signal_struct structures (tsk->signal) */
e18b890b 417static struct kmem_cache *signal_cachep;
1da177e4
LT
418
419/* SLAB cache for sighand_struct structures (tsk->sighand) */
e18b890b 420struct kmem_cache *sighand_cachep;
1da177e4
LT
421
422/* SLAB cache for files_struct structures (tsk->files) */
e18b890b 423struct kmem_cache *files_cachep;
1da177e4
LT
424
425/* SLAB cache for fs_struct structures (tsk->fs) */
e18b890b 426struct kmem_cache *fs_cachep;
1da177e4
LT
427
428/* SLAB cache for vm_area_struct structures */
3928d4f5 429static struct kmem_cache *vm_area_cachep;
1da177e4
LT
430
431/* SLAB cache for mm_struct structures (tsk->mm) */
e18b890b 432static struct kmem_cache *mm_cachep;
1da177e4 433
c7f8f31c
SB
434#ifdef CONFIG_PER_VMA_LOCK
435
436/* SLAB cache for vm_area_struct.lock */
437static struct kmem_cache *vma_lock_cachep;
438
439static bool vma_lock_alloc(struct vm_area_struct *vma)
440{
441 vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
442 if (!vma->vm_lock)
443 return false;
444
445 init_rwsem(&vma->vm_lock->lock);
446 vma->vm_lock_seq = -1;
447
448 return true;
449}
450
451static inline void vma_lock_free(struct vm_area_struct *vma)
452{
453 kmem_cache_free(vma_lock_cachep, vma->vm_lock);
454}
455
456#else /* CONFIG_PER_VMA_LOCK */
457
458static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
459static inline void vma_lock_free(struct vm_area_struct *vma) {}
460
461#endif /* CONFIG_PER_VMA_LOCK */
462
490fc053 463struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
3928d4f5 464{
a670468f 465 struct vm_area_struct *vma;
490fc053 466
a670468f 467 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
c7f8f31c
SB
468 if (!vma)
469 return NULL;
470
471 vma_init(vma, mm);
472 if (!vma_lock_alloc(vma)) {
473 kmem_cache_free(vm_area_cachep, vma);
474 return NULL;
475 }
476
490fc053 477 return vma;
3928d4f5
LT
478}
479
480struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
481{
95faf699
LT
482 struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
483
c7f8f31c
SB
484 if (!new)
485 return NULL;
486
487 ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
488 ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
489 /*
490 * orig->shared.rb may be modified concurrently, but the clone
491 * will be reinitialized.
492 */
493 data_race(memcpy(new, orig, sizeof(*new)));
494 if (!vma_lock_alloc(new)) {
495 kmem_cache_free(vm_area_cachep, new);
496 return NULL;
95faf699 497 }
c7f8f31c 498 INIT_LIST_HEAD(&new->anon_vma_chain);
ef6a22b7 499 vma_numab_state_init(new);
c7f8f31c
SB
500 dup_anon_vma_name(orig, new);
501
95faf699 502 return new;
3928d4f5
LT
503}
504
0d2ebf9c 505void __vm_area_free(struct vm_area_struct *vma)
3928d4f5 506{
ef6a22b7 507 vma_numab_state_free(vma);
5c26f6ac 508 free_anon_vma_name(vma);
c7f8f31c 509 vma_lock_free(vma);
3928d4f5
LT
510 kmem_cache_free(vm_area_cachep, vma);
511}
512
20cce633
ML
513#ifdef CONFIG_PER_VMA_LOCK
514static void vm_area_free_rcu_cb(struct rcu_head *head)
515{
516 struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
517 vm_rcu);
f2e13784
SB
518
519 /* The vma should not be locked while being destroyed. */
c7f8f31c 520 VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
20cce633
ML
521 __vm_area_free(vma);
522}
523#endif
524
525void vm_area_free(struct vm_area_struct *vma)
526{
527#ifdef CONFIG_PER_VMA_LOCK
528 call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
529#else
530 __vm_area_free(vma);
531#endif
532}
533
ba14a194 534static void account_kernel_stack(struct task_struct *tsk, int account)
c6a7f572 535{
0ce055f8
SAS
536 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
537 struct vm_struct *vm = task_stack_vm_area(tsk);
27faca83 538 int i;
ba14a194 539
27faca83
MS
540 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
541 mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
542 account * (PAGE_SIZE / 1024));
543 } else {
0ce055f8
SAS
544 void *stack = task_stack_page(tsk);
545
27faca83 546 /* All stack pages are in the same node. */
da3ceeff 547 mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
991e7673 548 account * (THREAD_SIZE / 1024));
27faca83 549 }
c6a7f572
KM
550}
551
1a03d3f1 552void exit_task_stack_account(struct task_struct *tsk)
9b6f7e16 553{
1a03d3f1 554 account_kernel_stack(tsk, -1);
991e7673 555
1a03d3f1
SAS
556 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
557 struct vm_struct *vm;
9b6f7e16
RG
558 int i;
559
1a03d3f1
SAS
560 vm = task_stack_vm_area(tsk);
561 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
562 memcg_kmem_uncharge_page(vm->pages[i], 0);
9b6f7e16 563 }
9b6f7e16
RG
564}
565
68f24b08 566static void release_task_stack(struct task_struct *tsk)
1da177e4 567{
2f064a59 568 if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
405c0759
AL
569 return; /* Better to leak the stack than to free prematurely */
570
ba14a194 571 free_thread_stack(tsk);
68f24b08
AL
572}
573
574#ifdef CONFIG_THREAD_INFO_IN_TASK
575void put_task_stack(struct task_struct *tsk)
576{
f0b89d39 577 if (refcount_dec_and_test(&tsk->stack_refcount))
68f24b08
AL
578 release_task_stack(tsk);
579}
580#endif
581
582void free_task(struct task_struct *tsk)
583{
a1140cb2
KI
584#ifdef CONFIG_SECCOMP
585 WARN_ON_ONCE(tsk->seccomp.filter);
586#endif
b90ca8ba 587 release_user_cpus_ptr(tsk);
d08b9f0c
ST
588 scs_release(tsk);
589
68f24b08
AL
590#ifndef CONFIG_THREAD_INFO_IN_TASK
591 /*
592 * The task is finally done with both the stack and thread_info,
593 * so free both.
594 */
595 release_task_stack(tsk);
596#else
597 /*
598 * If the task had a separate stack allocation, it should be gone
599 * by now.
600 */
f0b89d39 601 WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
68f24b08 602#endif
23f78d4a 603 rt_mutex_debug_task_free(tsk);
fb52607a 604 ftrace_graph_exit_task(tsk);
f19b9f74 605 arch_release_task_struct(tsk);
1da5c46f
ON
606 if (tsk->flags & PF_KTHREAD)
607 free_kthread_struct(tsk);
b0fd1852 608 bpf_task_storage_free(tsk);
1da177e4
LT
609 free_task_struct(tsk);
610}
611EXPORT_SYMBOL(free_task);
612
fe69d560
DH
613static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
614{
615 struct file *exe_file;
616
617 exe_file = get_mm_exe_file(oldmm);
618 RCU_INIT_POINTER(mm->exe_file, exe_file);
619 /*
620 * We depend on the oldmm having properly denied write access to the
621 * exe_file already.
622 */
623 if (exe_file && deny_write_access(exe_file))
624 pr_warn_once("deny_write_access() failed in %s\n", __func__);
625}
626
d70f2a14
AM
627#ifdef CONFIG_MMU
628static __latent_entropy int dup_mmap(struct mm_struct *mm,
629 struct mm_struct *oldmm)
630{
763ecb03 631 struct vm_area_struct *mpnt, *tmp;
d70f2a14 632 int retval;
c9dbe82c 633 unsigned long charge = 0;
d70f2a14 634 LIST_HEAD(uf);
3b9dbd5e 635 VMA_ITERATOR(vmi, mm, 0);
d70f2a14
AM
636
637 uprobe_start_dup_mmap();
d8ed45c5 638 if (mmap_write_lock_killable(oldmm)) {
d70f2a14
AM
639 retval = -EINTR;
640 goto fail_uprobe_end;
641 }
642 flush_cache_dup_mm(oldmm);
643 uprobe_dup_mmap(oldmm, mm);
644 /*
645 * Not linked in yet - no deadlock potential:
646 */
aaa2cc56 647 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
d70f2a14
AM
648
649 /* No ordering required: file already has been exposed. */
fe69d560 650 dup_mm_exe_file(mm, oldmm);
d70f2a14
AM
651
652 mm->total_vm = oldmm->total_vm;
653 mm->data_vm = oldmm->data_vm;
654 mm->exec_vm = oldmm->exec_vm;
655 mm->stack_vm = oldmm->stack_vm;
656
d70f2a14
AM
657 retval = ksm_fork(mm, oldmm);
658 if (retval)
659 goto out;
d2081b2b 660 khugepaged_fork(mm, oldmm);
d70f2a14 661
d2406291
PZ
662 /* Use __mt_dup() to efficiently build an identical maple tree. */
663 retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
664 if (unlikely(retval))
c9dbe82c
LH
665 goto out;
666
3dd44325 667 mt_clear_in_rcu(vmi.mas.tree);
d2406291 668 for_each_vma(vmi, mpnt) {
d70f2a14
AM
669 struct file *file;
670
fb49c455 671 vma_start_write(mpnt);
d70f2a14 672 if (mpnt->vm_flags & VM_DONTCOPY) {
d2406291
PZ
673 retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start,
674 mpnt->vm_end, GFP_KERNEL);
675 if (retval)
676 goto loop_out;
677
d70f2a14
AM
678 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
679 continue;
680 }
681 charge = 0;
655c79bb
TH
682 /*
683 * Don't duplicate many vmas if we've been oom-killed (for
684 * example)
685 */
686 if (fatal_signal_pending(current)) {
687 retval = -EINTR;
d4af56c5 688 goto loop_out;
655c79bb 689 }
d70f2a14
AM
690 if (mpnt->vm_flags & VM_ACCOUNT) {
691 unsigned long len = vma_pages(mpnt);
692
693 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
694 goto fail_nomem;
695 charge = len;
696 }
3928d4f5 697 tmp = vm_area_dup(mpnt);
d70f2a14
AM
698 if (!tmp)
699 goto fail_nomem;
d70f2a14
AM
700 retval = vma_dup_policy(mpnt, tmp);
701 if (retval)
702 goto fail_nomem_policy;
703 tmp->vm_mm = mm;
704 retval = dup_userfaultfd(tmp, &uf);
705 if (retval)
706 goto fail_nomem_anon_vma_fork;
707 if (tmp->vm_flags & VM_WIPEONFORK) {
93949bb2
LX
708 /*
709 * VM_WIPEONFORK gets a clean slate in the child.
710 * Don't prepare anon_vma until fault since we don't
711 * copy page for current vma.
712 */
d70f2a14 713 tmp->anon_vma = NULL;
d70f2a14
AM
714 } else if (anon_vma_fork(tmp, mpnt))
715 goto fail_nomem_anon_vma_fork;
e430a95a 716 vm_flags_clear(tmp, VM_LOCKED_MASK);
35e35178
ML
717 /*
718 * Copy/update hugetlb private vma information.
719 */
720 if (is_vm_hugetlb_page(tmp))
721 hugetlb_dup_vma_private(tmp);
722
723 /*
724 * Link the vma into the MT. After using __mt_dup(), memory
725 * allocation is not necessary here, so it cannot fail.
726 */
727 vma_iter_bulk_store(&vmi, tmp);
728
729 mm->map_count++;
730
731 if (tmp->vm_ops && tmp->vm_ops->open)
732 tmp->vm_ops->open(tmp);
733
d70f2a14
AM
734 file = tmp->vm_file;
735 if (file) {
d70f2a14
AM
736 struct address_space *mapping = file->f_mapping;
737
738 get_file(file);
d70f2a14 739 i_mmap_lock_write(mapping);
e8e17ee9 740 if (vma_is_shared_maywrite(tmp))
cf508b58 741 mapping_allow_writable(mapping);
d70f2a14
AM
742 flush_dcache_mmap_lock(mapping);
743 /* insert tmp into the share list, just after mpnt */
744 vma_interval_tree_insert_after(tmp, mpnt,
745 &mapping->i_mmap);
746 flush_dcache_mmap_unlock(mapping);
747 i_mmap_unlock_write(mapping);
748 }
749
d70f2a14 750 if (!(tmp->vm_flags & VM_WIPEONFORK))
c78f4636 751 retval = copy_page_range(tmp, mpnt);
d70f2a14 752
d2406291
PZ
753 if (retval) {
754 mpnt = vma_next(&vmi);
d4af56c5 755 goto loop_out;
d2406291 756 }
d70f2a14
AM
757 }
758 /* a new mm has just been created */
1ed0cc5a 759 retval = arch_dup_mmap(oldmm, mm);
d4af56c5 760loop_out:
3b9dbd5e 761 vma_iter_free(&vmi);
d2406291 762 if (!retval) {
3dd44325 763 mt_set_in_rcu(vmi.mas.tree);
d2406291
PZ
764 } else if (mpnt) {
765 /*
766 * The entire maple tree has already been duplicated. If the
767 * mmap duplication fails, mark the failure point with
768 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
769 * stop releasing VMAs that have not been duplicated after this
770 * point.
771 */
772 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
773 mas_store(&vmi.mas, XA_ZERO_ENTRY);
774 }
d70f2a14 775out:
d8ed45c5 776 mmap_write_unlock(mm);
d70f2a14 777 flush_tlb_mm(oldmm);
d8ed45c5 778 mmap_write_unlock(oldmm);
d70f2a14
AM
779 dup_userfaultfd_complete(&uf);
780fail_uprobe_end:
781 uprobe_end_dup_mmap();
782 return retval;
c9dbe82c 783
d70f2a14
AM
784fail_nomem_anon_vma_fork:
785 mpol_put(vma_policy(tmp));
786fail_nomem_policy:
3928d4f5 787 vm_area_free(tmp);
d70f2a14
AM
788fail_nomem:
789 retval = -ENOMEM;
790 vm_unacct_memory(charge);
d4af56c5 791 goto loop_out;
d70f2a14
AM
792}
793
794static inline int mm_alloc_pgd(struct mm_struct *mm)
795{
796 mm->pgd = pgd_alloc(mm);
797 if (unlikely(!mm->pgd))
798 return -ENOMEM;
799 return 0;
800}
801
802static inline void mm_free_pgd(struct mm_struct *mm)
803{
804 pgd_free(mm, mm->pgd);
805}
806#else
807static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
808{
d8ed45c5 809 mmap_write_lock(oldmm);
fe69d560 810 dup_mm_exe_file(mm, oldmm);
d8ed45c5 811 mmap_write_unlock(oldmm);
d70f2a14
AM
812 return 0;
813}
814#define mm_alloc_pgd(mm) (0)
815#define mm_free_pgd(mm)
816#endif /* CONFIG_MMU */
817
818static void check_mm(struct mm_struct *mm)
819{
820 int i;
821
8495f7e6
SPP
822 BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
823 "Please make sure 'struct resident_page_types[]' is updated as well");
824
d70f2a14 825 for (i = 0; i < NR_MM_COUNTERS; i++) {
f1a79412 826 long x = percpu_counter_sum(&mm->rss_stat[i]);
d70f2a14
AM
827
828 if (unlikely(x))
8495f7e6
SPP
829 pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
830 mm, resident_page_types[i], x);
d70f2a14
AM
831 }
832
833 if (mm_pgtables_bytes(mm))
834 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
835 mm_pgtables_bytes(mm));
836
837#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
838 VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
839#endif
840}
841
842#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
843#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
844
2655421a
NP
845static void do_check_lazy_tlb(void *arg)
846{
847 struct mm_struct *mm = arg;
848
849 WARN_ON_ONCE(current->active_mm == mm);
850}
851
852static void do_shoot_lazy_tlb(void *arg)
853{
854 struct mm_struct *mm = arg;
855
856 if (current->active_mm == mm) {
857 WARN_ON_ONCE(current->mm);
858 current->active_mm = &init_mm;
859 switch_mm(mm, &init_mm, current);
860 }
861}
862
863static void cleanup_lazy_tlbs(struct mm_struct *mm)
864{
865 if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
866 /*
867 * In this case, lazy tlb mms are refounted and would not reach
868 * __mmdrop until all CPUs have switched away and mmdrop()ed.
869 */
870 return;
871 }
872
873 /*
874 * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it
875 * requires lazy mm users to switch to another mm when the refcount
876 * drops to zero, before the mm is freed. This requires IPIs here to
877 * switch kernel threads to init_mm.
878 *
879 * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm
880 * switch with the final userspace teardown TLB flush which leaves the
881 * mm lazy on this CPU but no others, reducing the need for additional
882 * IPIs here. There are cases where a final IPI is still required here,
883 * such as the final mmdrop being performed on a different CPU than the
884 * one exiting, or kernel threads using the mm when userspace exits.
885 *
886 * IPI overheads have not found to be expensive, but they could be
887 * reduced in a number of possible ways, for example (roughly
888 * increasing order of complexity):
889 * - The last lazy reference created by exit_mm() could instead switch
890 * to init_mm, however it's probable this will run on the same CPU
891 * immediately afterwards, so this may not reduce IPIs much.
892 * - A batch of mms requiring IPIs could be gathered and freed at once.
893 * - CPUs store active_mm where it can be remotely checked without a
894 * lock, to filter out false-positives in the cpumask.
895 * - After mm_users or mm_count reaches zero, switching away from the
896 * mm could clear mm_cpumask to reduce some IPIs, perhaps together
897 * with some batching or delaying of the final IPIs.
898 * - A delayed freeing and RCU-like quiescing sequence based on mm
899 * switching to avoid IPIs completely.
900 */
901 on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1);
902 if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES))
903 on_each_cpu(do_check_lazy_tlb, (void *)mm, 1);
904}
905
d70f2a14
AM
906/*
907 * Called when the last reference to the mm
908 * is dropped: either by a lazy thread or by
909 * mmput. Free the page directory and the mm.
910 */
d34bc48f 911void __mmdrop(struct mm_struct *mm)
d70f2a14
AM
912{
913 BUG_ON(mm == &init_mm);
3eda69c9 914 WARN_ON_ONCE(mm == current->mm);
2655421a
NP
915
916 /* Ensure no CPUs are using this as their lazy tlb mm */
917 cleanup_lazy_tlbs(mm);
918
3eda69c9 919 WARN_ON_ONCE(mm == current->active_mm);
d70f2a14
AM
920 mm_free_pgd(mm);
921 destroy_context(mm);
984cfe4e 922 mmu_notifier_subscriptions_destroy(mm);
d70f2a14
AM
923 check_mm(mm);
924 put_user_ns(mm->user_ns);
2667ed10 925 mm_pasid_drop(mm);
223baf9d 926 mm_destroy_cid(mm);
14ef95be 927 percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS);
f1a79412 928
d70f2a14
AM
929 free_mm(mm);
930}
d34bc48f 931EXPORT_SYMBOL_GPL(__mmdrop);
d70f2a14
AM
932
933static void mmdrop_async_fn(struct work_struct *work)
934{
935 struct mm_struct *mm;
936
937 mm = container_of(work, struct mm_struct, async_put_work);
938 __mmdrop(mm);
939}
940
941static void mmdrop_async(struct mm_struct *mm)
942{
943 if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
944 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
945 schedule_work(&mm->async_put_work);
946 }
947}
948
ea6d290c
ON
949static inline void free_signal_struct(struct signal_struct *sig)
950{
97101eb4 951 taskstats_tgid_free(sig);
1c5354de 952 sched_autogroup_exit(sig);
7283094e
MH
953 /*
954 * __mmdrop is not safe to call from softirq context on x86 due to
955 * pgd_dtor so postpone it to the async context
956 */
26db62f1 957 if (sig->oom_mm)
7283094e 958 mmdrop_async(sig->oom_mm);
ea6d290c
ON
959 kmem_cache_free(signal_cachep, sig);
960}
961
962static inline void put_signal_struct(struct signal_struct *sig)
963{
60d4de3f 964 if (refcount_dec_and_test(&sig->sigcnt))
ea6d290c
ON
965 free_signal_struct(sig);
966}
967
158d9ebd 968void __put_task_struct(struct task_struct *tsk)
1da177e4 969{
270f722d 970 WARN_ON(!tsk->exit_state);
ec1d2819 971 WARN_ON(refcount_read(&tsk->usage));
1da177e4
LT
972 WARN_ON(tsk == current);
973
0f212204 974 io_uring_free(tsk);
2e91fa7f 975 cgroup_free(tsk);
16d51a59 976 task_numa_free(tsk, true);
1a2a4d06 977 security_task_free(tsk);
e0e81739 978 exit_creds(tsk);
35df17c5 979 delayacct_tsk_free(tsk);
ea6d290c 980 put_signal_struct(tsk->signal);
6e33cad0 981 sched_core_free(tsk);
2873cd31 982 free_task(tsk);
1da177e4 983}
77c100c8 984EXPORT_SYMBOL_GPL(__put_task_struct);
1da177e4 985
d243b344
WLC
986void __put_task_struct_rcu_cb(struct rcu_head *rhp)
987{
988 struct task_struct *task = container_of(rhp, struct task_struct, rcu);
989
990 __put_task_struct(task);
991}
992EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb);
993
6c0a9fa6 994void __init __weak arch_task_cache_init(void) { }
61c4628b 995
ff691f6e
HS
996/*
997 * set_max_threads
998 */
16db3d3f 999static void set_max_threads(unsigned int max_threads_suggested)
ff691f6e 1000{
ac1b398d 1001 u64 threads;
ca79b0c2 1002 unsigned long nr_pages = totalram_pages();
ff691f6e
HS
1003
1004 /*
ac1b398d
HS
1005 * The number of threads shall be limited such that the thread
1006 * structures may only consume a small part of the available memory.
ff691f6e 1007 */
3d6357de 1008 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
ac1b398d
HS
1009 threads = MAX_THREADS;
1010 else
3d6357de 1011 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
ac1b398d
HS
1012 (u64) THREAD_SIZE * 8UL);
1013
16db3d3f
HS
1014 if (threads > max_threads_suggested)
1015 threads = max_threads_suggested;
1016
ac1b398d 1017 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
ff691f6e
HS
1018}
1019
5aaeb5c0
IM
1020#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1021/* Initialized by the architecture: */
1022int arch_task_struct_size __read_mostly;
1023#endif
0c8c0f03 1024
5905429a
KC
1025static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
1026{
1027 /* Fetch thread_struct whitelist for the architecture. */
1028 arch_thread_struct_whitelist(offset, size);
1029
1030 /*
1031 * Handle zero-sized whitelist or empty thread_struct, otherwise
1032 * adjust offset to position of thread_struct in task_struct.
1033 */
1034 if (unlikely(*size == 0))
1035 *offset = 0;
1036 else
1037 *offset += offsetof(struct task_struct, thread);
1038}
1039
ff691f6e 1040void __init fork_init(void)
1da177e4 1041{
25f9c081 1042 int i;
1da177e4 1043#ifndef ARCH_MIN_TASKALIGN
e274795e 1044#define ARCH_MIN_TASKALIGN 0
1da177e4 1045#endif
95cb64c1 1046 int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
5905429a 1047 unsigned long useroffset, usersize;
e274795e 1048
1da177e4 1049 /* create a slab on which task_structs can be allocated */
5905429a
KC
1050 task_struct_whitelist(&useroffset, &usersize);
1051 task_struct_cachep = kmem_cache_create_usercopy("task_struct",
e274795e 1052 arch_task_struct_size, align,
5905429a
KC
1053 SLAB_PANIC|SLAB_ACCOUNT,
1054 useroffset, usersize, NULL);
1da177e4 1055
61c4628b
SS
1056 /* do the arch specific task caches init */
1057 arch_task_cache_init();
1058
16db3d3f 1059 set_max_threads(MAX_THREADS);
1da177e4
LT
1060
1061 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
1062 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
1063 init_task.signal->rlim[RLIMIT_SIGPENDING] =
1064 init_task.signal->rlim[RLIMIT_NPROC];
b376c3e1 1065
de399236 1066 for (i = 0; i < UCOUNT_COUNTS; i++)
25f9c081 1067 init_user_ns.ucount_max[i] = max_threads/2;
19659c59 1068
de399236
AG
1069 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY);
1070 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY);
1071 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
1072 set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
21d1c5e3 1073
19659c59
HR
1074#ifdef CONFIG_VMAP_STACK
1075 cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
1076 NULL, free_vm_stack_cache);
1077#endif
b09be676 1078
d08b9f0c
ST
1079 scs_init();
1080
b09be676 1081 lockdep_init_task(&init_task);
aad42dd4 1082 uprobes_init();
1da177e4
LT
1083}
1084
52f5684c 1085int __weak arch_dup_task_struct(struct task_struct *dst,
61c4628b
SS
1086 struct task_struct *src)
1087{
1088 *dst = *src;
1089 return 0;
1090}
1091
d4311ff1
AT
1092void set_task_stack_end_magic(struct task_struct *tsk)
1093{
1094 unsigned long *stackend;
1095
1096 stackend = end_of_stack(tsk);
1097 *stackend = STACK_END_MAGIC; /* for overflow detection */
1098}
1099
725fc629 1100static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
1da177e4
LT
1101{
1102 struct task_struct *tsk;
3e26c149 1103 int err;
1da177e4 1104
725fc629
AK
1105 if (node == NUMA_NO_NODE)
1106 node = tsk_fork_get_node(orig);
504f52b5 1107 tsk = alloc_task_struct_node(node);
1da177e4
LT
1108 if (!tsk)
1109 return NULL;
1110
546c42b2
SAS
1111 err = arch_dup_task_struct(tsk, orig);
1112 if (err)
f19b9f74 1113 goto free_tsk;
1da177e4 1114
7865aba3
SAS
1115 err = alloc_thread_stack_node(tsk, node);
1116 if (err)
f19b9f74 1117 goto free_tsk;
ba14a194 1118
68f24b08 1119#ifdef CONFIG_THREAD_INFO_IN_TASK
f0b89d39 1120 refcount_set(&tsk->stack_refcount, 1);
68f24b08 1121#endif
1a03d3f1 1122 account_kernel_stack(tsk, 1);
164c33c6 1123
d08b9f0c
ST
1124 err = scs_prepare(tsk, node);
1125 if (err)
1126 goto free_stack;
1127
dbd95212
KC
1128#ifdef CONFIG_SECCOMP
1129 /*
1130 * We must handle setting up seccomp filters once we're under
1131 * the sighand lock in case orig has changed between now and
1132 * then. Until then, filter must be NULL to avoid messing up
1133 * the usage counts on the error path calling free_task.
1134 */
1135 tsk->seccomp.filter = NULL;
1136#endif
87bec58a
AM
1137
1138 setup_thread_stack(tsk, orig);
8e7cac79 1139 clear_user_return_notifier(tsk);
f26f9aff 1140 clear_tsk_need_resched(tsk);
d4311ff1 1141 set_task_stack_end_magic(tsk);
1446e1df 1142 clear_syscall_work_syscall_user_dispatch(tsk);
1da177e4 1143
050e9baa 1144#ifdef CONFIG_STACKPROTECTOR
7cd815bc 1145 tsk->stack_canary = get_random_canary();
0a425405 1146#endif
3bd37062
SAS
1147 if (orig->cpus_ptr == &orig->cpus_mask)
1148 tsk->cpus_ptr = &tsk->cpus_mask;
b90ca8ba 1149 dup_user_cpus_ptr(tsk, orig, node);
0a425405 1150
fb0a685c 1151 /*
0ff7b2cf
EB
1152 * One for the user space visible state that goes away when reaped.
1153 * One for the scheduler.
fb0a685c 1154 */
0ff7b2cf
EB
1155 refcount_set(&tsk->rcu_users, 2);
1156 /* One for the rcu users */
1157 refcount_set(&tsk->usage, 1);
6c5c9341 1158#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 1159 tsk->btrace_seq = 0;
6c5c9341 1160#endif
a0aa7f68 1161 tsk->splice_pipe = NULL;
5640f768 1162 tsk->task_frag.page = NULL;
093e5840 1163 tsk->wake_q.next = NULL;
e32cf5df 1164 tsk->worker_private = NULL;
c6a7f572 1165
5c9a8750 1166 kcov_task_init(tsk);
50b5e49c 1167 kmsan_task_create(tsk);
5fbda3ec 1168 kmap_local_fork(tsk);
5c9a8750 1169
e41d5818
DV
1170#ifdef CONFIG_FAULT_INJECTION
1171 tsk->fail_nth = 0;
1172#endif
1173
2c323017 1174#ifdef CONFIG_BLK_CGROUP
f05837ed 1175 tsk->throttle_disk = NULL;
2c323017
JB
1176 tsk->use_memdelay = 0;
1177#endif
1178
8f23f5db 1179#ifdef CONFIG_ARCH_HAS_CPU_PASID
a3d29e82
PZ
1180 tsk->pasid_activated = 0;
1181#endif
1182
d46eb14b
SB
1183#ifdef CONFIG_MEMCG
1184 tsk->active_memcg = NULL;
1185#endif
b041b525
TL
1186
1187#ifdef CONFIG_CPU_SUP_INTEL
1188 tsk->reported_split_lock = 0;
1189#endif
1190
af7f588d
MD
1191#ifdef CONFIG_SCHED_MM_CID
1192 tsk->mm_cid = -1;
223baf9d 1193 tsk->last_mm_cid = -1;
af7f588d 1194 tsk->mm_cid_active = 0;
223baf9d 1195 tsk->migrate_from_cpu = -1;
af7f588d 1196#endif
1da177e4 1197 return tsk;
61c4628b 1198
b235beea 1199free_stack:
1a03d3f1 1200 exit_task_stack_account(tsk);
ba14a194 1201 free_thread_stack(tsk);
f19b9f74 1202free_tsk:
61c4628b
SS
1203 free_task_struct(tsk);
1204 return NULL;
1da177e4
LT
1205}
1206
23ff4440 1207__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
1da177e4 1208
4cb0e11b
HK
1209static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
1210
1211static int __init coredump_filter_setup(char *s)
1212{
1213 default_dump_filter =
1214 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
1215 MMF_DUMP_FILTER_MASK;
1216 return 1;
1217}
1218
1219__setup("coredump_filter=", coredump_filter_setup);
1220
1da177e4
LT
1221#include <linux/init_task.h>
1222
858f0993
AD
1223static void mm_init_aio(struct mm_struct *mm)
1224{
1225#ifdef CONFIG_AIO
1226 spin_lock_init(&mm->ioctx_lock);
db446a08 1227 mm->ioctx_table = NULL;
858f0993
AD
1228#endif
1229}
1230
c3f3ce04
AA
1231static __always_inline void mm_clear_owner(struct mm_struct *mm,
1232 struct task_struct *p)
1233{
1234#ifdef CONFIG_MEMCG
1235 if (mm->owner == p)
1236 WRITE_ONCE(mm->owner, NULL);
1237#endif
1238}
1239
33144e84
VD
1240static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1241{
1242#ifdef CONFIG_MEMCG
1243 mm->owner = p;
1244#endif
1245}
1246
355627f5
EB
1247static void mm_init_uprobes_state(struct mm_struct *mm)
1248{
1249#ifdef CONFIG_UPROBES
1250 mm->uprobes_state.xol_area = NULL;
1251#endif
1252}
1253
bfedb589
EB
1254static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1255 struct user_namespace *user_ns)
1da177e4 1256{
d4af56c5
LH
1257 mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
1258 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
1da177e4
LT
1259 atomic_set(&mm->mm_users, 1);
1260 atomic_set(&mm->mm_count, 1);
57efa1fe 1261 seqcount_init(&mm->write_protect_seq);
d8ed45c5 1262 mmap_init_lock(mm);
1da177e4 1263 INIT_LIST_HEAD(&mm->mmlist);
5e31275c
SB
1264#ifdef CONFIG_PER_VMA_LOCK
1265 mm->mm_lock_seq = 0;
1266#endif
af5b0f6a 1267 mm_pgtables_bytes_init(mm);
41f727fd
VD
1268 mm->map_count = 0;
1269 mm->locked_vm = 0;
70f8a3ca 1270 atomic64_set(&mm->pinned_vm, 0);
d559db08 1271 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
1da177e4 1272 spin_lock_init(&mm->page_table_lock);
88aa7cc6 1273 spin_lock_init(&mm->arg_lock);
41f727fd 1274 mm_init_cpumask(mm);
858f0993 1275 mm_init_aio(mm);
cf475ad2 1276 mm_init_owner(mm, p);
a6cbd440 1277 mm_pasid_init(mm);
2b7e8665 1278 RCU_INIT_POINTER(mm->exe_file, NULL);
984cfe4e 1279 mmu_notifier_subscriptions_init(mm);
16af97dc 1280 init_tlb_flush_pending(mm);
41f727fd
VD
1281#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1282 mm->pmd_huge_pte = NULL;
1283#endif
355627f5 1284 mm_init_uprobes_state(mm);
13db8c50 1285 hugetlb_count_init(mm);
1da177e4 1286
a0715cc2 1287 if (current->mm) {
24e41bf8 1288 mm->flags = mmf_init_flags(current->mm->flags);
a0715cc2
AT
1289 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1290 } else {
1291 mm->flags = default_dump_filter;
1da177e4 1292 mm->def_flags = 0;
a0715cc2
AT
1293 }
1294
41f727fd
VD
1295 if (mm_alloc_pgd(mm))
1296 goto fail_nopgd;
1297
1298 if (init_new_context(p, mm))
1299 goto fail_nocontext;
78fb7466 1300
223baf9d
MD
1301 if (mm_alloc_cid(mm))
1302 goto fail_cid;
1303
14ef95be
MG
1304 if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT,
1305 NR_MM_COUNTERS))
1306 goto fail_pcpu;
f1a79412 1307
bfedb589 1308 mm->user_ns = get_user_ns(user_ns);
bd74fdae 1309 lru_gen_init_mm(mm);
41f727fd
VD
1310 return mm;
1311
f1a79412 1312fail_pcpu:
223baf9d
MD
1313 mm_destroy_cid(mm);
1314fail_cid:
b20b0368 1315 destroy_context(mm);
41f727fd
VD
1316fail_nocontext:
1317 mm_free_pgd(mm);
1318fail_nopgd:
1da177e4
LT
1319 free_mm(mm);
1320 return NULL;
1321}
1322
1323/*
1324 * Allocate and initialize an mm_struct.
1325 */
fb0a685c 1326struct mm_struct *mm_alloc(void)
1da177e4 1327{
fb0a685c 1328 struct mm_struct *mm;
1da177e4
LT
1329
1330 mm = allocate_mm();
de03c72c
KM
1331 if (!mm)
1332 return NULL;
1333
1334 memset(mm, 0, sizeof(*mm));
bfedb589 1335 return mm_init(mm, current, current_user_ns());
1da177e4
LT
1336}
1337
ec8d7c14
MH
1338static inline void __mmput(struct mm_struct *mm)
1339{
1340 VM_BUG_ON(atomic_read(&mm->mm_users));
1341
1342 uprobe_clear_state(mm);
1343 exit_aio(mm);
1344 ksm_exit(mm);
1345 khugepaged_exit(mm); /* must run before exit_mmap */
1346 exit_mmap(mm);
632230ff 1347 mm_put_huge_zero_folio(mm);
ec8d7c14
MH
1348 set_mm_exe_file(mm, NULL);
1349 if (!list_empty(&mm->mmlist)) {
1350 spin_lock(&mmlist_lock);
1351 list_del(&mm->mmlist);
1352 spin_unlock(&mmlist_lock);
1353 }
1354 if (mm->binfmt)
1355 module_put(mm->binfmt->module);
bd74fdae 1356 lru_gen_del_mm(mm);
ec8d7c14
MH
1357 mmdrop(mm);
1358}
1359
1da177e4
LT
1360/*
1361 * Decrement the use count and release all resources for an mm.
1362 */
1363void mmput(struct mm_struct *mm)
1364{
0ae26f1b
AM
1365 might_sleep();
1366
ec8d7c14
MH
1367 if (atomic_dec_and_test(&mm->mm_users))
1368 __mmput(mm);
1369}
1370EXPORT_SYMBOL_GPL(mmput);
1371
a1b2289c
SY
1372#ifdef CONFIG_MMU
1373static void mmput_async_fn(struct work_struct *work)
1374{
1375 struct mm_struct *mm = container_of(work, struct mm_struct,
1376 async_put_work);
1377
1378 __mmput(mm);
1379}
1380
1381void mmput_async(struct mm_struct *mm)
1382{
1383 if (atomic_dec_and_test(&mm->mm_users)) {
1384 INIT_WORK(&mm->async_put_work, mmput_async_fn);
1385 schedule_work(&mm->async_put_work);
1386 }
1387}
85eaeb50 1388EXPORT_SYMBOL_GPL(mmput_async);
a1b2289c
SY
1389#endif
1390
90f31d0e
KK
1391/**
1392 * set_mm_exe_file - change a reference to the mm's executable file
ff0712ea
MWO
1393 * @mm: The mm to change.
1394 * @new_exe_file: The new file to use.
90f31d0e
KK
1395 *
1396 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1397 *
6e399cd1 1398 * Main users are mmput() and sys_execve(). Callers prevent concurrent
a7031f14
MG
1399 * invocations: in mmput() nobody alive left, in execve it happens before
1400 * the new mm is made visible to anyone.
fe69d560
DH
1401 *
1402 * Can only fail if new_exe_file != NULL.
90f31d0e 1403 */
fe69d560 1404int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
38646013 1405{
6e399cd1
DB
1406 struct file *old_exe_file;
1407
1408 /*
1409 * It is safe to dereference the exe_file without RCU as
1410 * this function is only called if nobody else can access
1411 * this mm -- see comment above for justification.
1412 */
1413 old_exe_file = rcu_dereference_raw(mm->exe_file);
90f31d0e 1414
fe69d560
DH
1415 if (new_exe_file) {
1416 /*
1417 * We expect the caller (i.e., sys_execve) to already denied
1418 * write access, so this is unlikely to fail.
1419 */
1420 if (unlikely(deny_write_access(new_exe_file)))
1421 return -EACCES;
38646013 1422 get_file(new_exe_file);
fe69d560 1423 }
90f31d0e 1424 rcu_assign_pointer(mm->exe_file, new_exe_file);
fe69d560
DH
1425 if (old_exe_file) {
1426 allow_write_access(old_exe_file);
90f31d0e 1427 fput(old_exe_file);
fe69d560
DH
1428 }
1429 return 0;
38646013
JS
1430}
1431
35d7bdc8
DH
1432/**
1433 * replace_mm_exe_file - replace a reference to the mm's executable file
ff0712ea
MWO
1434 * @mm: The mm to change.
1435 * @new_exe_file: The new file to use.
35d7bdc8 1436 *
a7031f14 1437 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
35d7bdc8
DH
1438 *
1439 * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
1440 */
1441int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1442{
1443 struct vm_area_struct *vma;
1444 struct file *old_exe_file;
1445 int ret = 0;
1446
1447 /* Forbid mm->exe_file change if old file still mapped. */
1448 old_exe_file = get_mm_exe_file(mm);
1449 if (old_exe_file) {
fa5e5876 1450 VMA_ITERATOR(vmi, mm, 0);
35d7bdc8 1451 mmap_read_lock(mm);
fa5e5876 1452 for_each_vma(vmi, vma) {
35d7bdc8
DH
1453 if (!vma->vm_file)
1454 continue;
1455 if (path_equal(&vma->vm_file->f_path,
fa5e5876 1456 &old_exe_file->f_path)) {
35d7bdc8 1457 ret = -EBUSY;
fa5e5876
MWO
1458 break;
1459 }
35d7bdc8
DH
1460 }
1461 mmap_read_unlock(mm);
1462 fput(old_exe_file);
1463 if (ret)
1464 return ret;
1465 }
1466
fe69d560
DH
1467 ret = deny_write_access(new_exe_file);
1468 if (ret)
1469 return -EACCES;
35d7bdc8 1470 get_file(new_exe_file);
fe69d560 1471
a7031f14
MG
1472 /* set the new file */
1473 mmap_write_lock(mm);
1474 old_exe_file = rcu_dereference_raw(mm->exe_file);
1475 rcu_assign_pointer(mm->exe_file, new_exe_file);
1476 mmap_write_unlock(mm);
1477
fe69d560 1478 if (old_exe_file) {
fe69d560 1479 allow_write_access(old_exe_file);
35d7bdc8 1480 fput(old_exe_file);
fe69d560 1481 }
35d7bdc8 1482 return 0;
38646013
JS
1483}
1484
90f31d0e
KK
1485/**
1486 * get_mm_exe_file - acquire a reference to the mm's executable file
ff0712ea 1487 * @mm: The mm of interest.
90f31d0e
KK
1488 *
1489 * Returns %NULL if mm has no associated executable file.
1490 * User must release file via fput().
1491 */
38646013
JS
1492struct file *get_mm_exe_file(struct mm_struct *mm)
1493{
1494 struct file *exe_file;
1495
90f31d0e 1496 rcu_read_lock();
0ede61d8 1497 exe_file = get_file_rcu(&mm->exe_file);
90f31d0e 1498 rcu_read_unlock();
38646013
JS
1499 return exe_file;
1500}
1501
cd81a917
MG
1502/**
1503 * get_task_exe_file - acquire a reference to the task's executable file
ff0712ea 1504 * @task: The task.
cd81a917
MG
1505 *
1506 * Returns %NULL if task's mm (if any) has no associated executable file or
1507 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1508 * User must release file via fput().
1509 */
1510struct file *get_task_exe_file(struct task_struct *task)
1511{
1512 struct file *exe_file = NULL;
1513 struct mm_struct *mm;
1514
1515 task_lock(task);
1516 mm = task->mm;
1517 if (mm) {
1518 if (!(task->flags & PF_KTHREAD))
1519 exe_file = get_mm_exe_file(mm);
1520 }
1521 task_unlock(task);
1522 return exe_file;
1523}
38646013 1524
1da177e4
LT
1525/**
1526 * get_task_mm - acquire a reference to the task's mm
ff0712ea 1527 * @task: The task.
1da177e4 1528 *
246bb0b1 1529 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1da177e4
LT
1530 * this kernel workthread has transiently adopted a user mm with use_mm,
1531 * to do its AIO) is not set and if so returns a reference to it, after
1532 * bumping up the use count. User must release the mm via mmput()
1533 * after use. Typically used by /proc and ptrace.
1534 */
1535struct mm_struct *get_task_mm(struct task_struct *task)
1536{
1537 struct mm_struct *mm;
1538
1539 task_lock(task);
1540 mm = task->mm;
1541 if (mm) {
246bb0b1 1542 if (task->flags & PF_KTHREAD)
1da177e4
LT
1543 mm = NULL;
1544 else
3fce371b 1545 mmget(mm);
1da177e4
LT
1546 }
1547 task_unlock(task);
1548 return mm;
1549}
1550EXPORT_SYMBOL_GPL(get_task_mm);
1551
8cdb878d
CY
1552struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1553{
1554 struct mm_struct *mm;
1555 int err;
1556
f7cfd871 1557 err = down_read_killable(&task->signal->exec_update_lock);
8cdb878d
CY
1558 if (err)
1559 return ERR_PTR(err);
1560
1561 mm = get_task_mm(task);
1562 if (mm && mm != current->mm &&
1563 !ptrace_may_access(task, mode)) {
1564 mmput(mm);
1565 mm = ERR_PTR(-EACCES);
1566 }
f7cfd871 1567 up_read(&task->signal->exec_update_lock);
8cdb878d
CY
1568
1569 return mm;
1570}
1571
57b59c4a 1572static void complete_vfork_done(struct task_struct *tsk)
c415c3b4 1573{
d68b46fe 1574 struct completion *vfork;
c415c3b4 1575
d68b46fe
ON
1576 task_lock(tsk);
1577 vfork = tsk->vfork_done;
1578 if (likely(vfork)) {
1579 tsk->vfork_done = NULL;
1580 complete(vfork);
1581 }
1582 task_unlock(tsk);
1583}
1584
1585static int wait_for_vfork_done(struct task_struct *child,
1586 struct completion *vfork)
1587{
a903904c 1588 unsigned int state = TASK_KILLABLE|TASK_FREEZABLE;
d68b46fe
ON
1589 int killed;
1590
76f969e8 1591 cgroup_enter_frozen();
f5d39b02 1592 killed = wait_for_completion_state(vfork, state);
76f969e8 1593 cgroup_leave_frozen(false);
d68b46fe
ON
1594
1595 if (killed) {
1596 task_lock(child);
1597 child->vfork_done = NULL;
1598 task_unlock(child);
1599 }
1600
1601 put_task_struct(child);
1602 return killed;
c415c3b4
ON
1603}
1604
1da177e4
LT
1605/* Please note the differences between mmput and mm_release.
1606 * mmput is called whenever we stop holding onto a mm_struct,
1607 * error success whatever.
1608 *
1609 * mm_release is called after a mm_struct has been removed
1610 * from the current process.
1611 *
1612 * This difference is important for error handling, when we
1613 * only half set up a mm_struct for a new process and need to restore
1614 * the old one. Because we mmput the new mm_struct before
1615 * restoring the old one. . .
1616 * Eric Biederman 10 January 1998
1617 */
4610ba7a 1618static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1da177e4 1619{
0326f5a9
SD
1620 uprobe_free_utask(tsk);
1621
1da177e4
LT
1622 /* Get rid of any cached register state */
1623 deactivate_mm(tsk, mm);
1624
fec1d011 1625 /*
735f2770
MH
1626 * Signal userspace if we're not exiting with a core dump
1627 * because we want to leave the value intact for debugging
1628 * purposes.
fec1d011 1629 */
9c8a8228 1630 if (tsk->clear_child_tid) {
92307383 1631 if (atomic_read(&mm->mm_users) > 1) {
9c8a8228
ED
1632 /*
1633 * We don't check the error code - if userspace has
1634 * not set up a proper pointer then tough luck.
1635 */
1636 put_user(0, tsk->clear_child_tid);
2de0db99
DB
1637 do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1638 1, NULL, NULL, 0, 0);
9c8a8228 1639 }
1da177e4 1640 tsk->clear_child_tid = NULL;
1da177e4 1641 }
f7505d64
KK
1642
1643 /*
1644 * All done, finally we can wake up parent and return this mm to him.
1645 * Also kthread_stop() uses this completion for synchronization.
1646 */
1647 if (tsk->vfork_done)
1648 complete_vfork_done(tsk);
1da177e4
LT
1649}
1650
4610ba7a
TG
1651void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1652{
150d7158 1653 futex_exit_release(tsk);
4610ba7a
TG
1654 mm_release(tsk, mm);
1655}
1656
1657void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1658{
150d7158 1659 futex_exec_release(tsk);
4610ba7a
TG
1660 mm_release(tsk, mm);
1661}
1662
13585fa0
NA
1663/**
1664 * dup_mm() - duplicates an existing mm structure
1665 * @tsk: the task_struct with which the new mm will be associated.
1666 * @oldmm: the mm to duplicate.
1667 *
1668 * Allocates a new mm structure and duplicates the provided @oldmm structure
1669 * content into it.
1670 *
1671 * Return: the duplicated mm or NULL on failure.
a0a7ec30 1672 */
13585fa0
NA
1673static struct mm_struct *dup_mm(struct task_struct *tsk,
1674 struct mm_struct *oldmm)
a0a7ec30 1675{
13585fa0 1676 struct mm_struct *mm;
a0a7ec30
JD
1677 int err;
1678
a0a7ec30
JD
1679 mm = allocate_mm();
1680 if (!mm)
1681 goto fail_nomem;
1682
1683 memcpy(mm, oldmm, sizeof(*mm));
1684
bfedb589 1685 if (!mm_init(mm, tsk, mm->user_ns))
a0a7ec30
JD
1686 goto fail_nomem;
1687
a0a7ec30
JD
1688 err = dup_mmap(mm, oldmm);
1689 if (err)
1690 goto free_pt;
1691
1692 mm->hiwater_rss = get_mm_rss(mm);
1693 mm->hiwater_vm = mm->total_vm;
1694
801460d0
HS
1695 if (mm->binfmt && !try_module_get(mm->binfmt->module))
1696 goto free_pt;
1697
a0a7ec30
JD
1698 return mm;
1699
1700free_pt:
801460d0
HS
1701 /* don't put binfmt in mmput, we haven't got module yet */
1702 mm->binfmt = NULL;
c3f3ce04 1703 mm_init_owner(mm, NULL);
a0a7ec30
JD
1704 mmput(mm);
1705
1706fail_nomem:
1707 return NULL;
a0a7ec30
JD
1708}
1709
fb0a685c 1710static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1da177e4 1711{
fb0a685c 1712 struct mm_struct *mm, *oldmm;
1da177e4
LT
1713
1714 tsk->min_flt = tsk->maj_flt = 0;
1715 tsk->nvcsw = tsk->nivcsw = 0;
17406b82
MSB
1716#ifdef CONFIG_DETECT_HUNG_TASK
1717 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
a2e51445 1718 tsk->last_switch_time = 0;
17406b82 1719#endif
1da177e4
LT
1720
1721 tsk->mm = NULL;
1722 tsk->active_mm = NULL;
1723
1724 /*
1725 * Are we cloning a kernel thread?
1726 *
1727 * We need to steal a active VM for that..
1728 */
1729 oldmm = current->mm;
1730 if (!oldmm)
1731 return 0;
1732
1733 if (clone_flags & CLONE_VM) {
3fce371b 1734 mmget(oldmm);
1da177e4 1735 mm = oldmm;
a6895399
REB
1736 } else {
1737 mm = dup_mm(tsk, current->mm);
1738 if (!mm)
1739 return -ENOMEM;
1da177e4
LT
1740 }
1741
1da177e4
LT
1742 tsk->mm = mm;
1743 tsk->active_mm = mm;
af7f588d 1744 sched_mm_cid_fork(tsk);
1da177e4 1745 return 0;
1da177e4
LT
1746}
1747
a39bc516 1748static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1da177e4 1749{
498052bb 1750 struct fs_struct *fs = current->fs;
1da177e4 1751 if (clone_flags & CLONE_FS) {
498052bb 1752 /* tsk->fs is already what we want */
2a4419b5 1753 spin_lock(&fs->lock);
90383cc0 1754 /* "users" and "in_exec" locked for check_unsafe_exec() */
498052bb 1755 if (fs->in_exec) {
2a4419b5 1756 spin_unlock(&fs->lock);
498052bb
AV
1757 return -EAGAIN;
1758 }
1759 fs->users++;
2a4419b5 1760 spin_unlock(&fs->lock);
1da177e4
LT
1761 return 0;
1762 }
498052bb 1763 tsk->fs = copy_fs_struct(fs);
1da177e4
LT
1764 if (!tsk->fs)
1765 return -ENOMEM;
1766 return 0;
1767}
1768
11f3f500
MC
1769static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
1770 int no_files)
a016f338
JD
1771{
1772 struct files_struct *oldf, *newf;
1773 int error = 0;
1774
1775 /*
1776 * A background process may not have any files ...
1777 */
1778 oldf = current->files;
1779 if (!oldf)
1780 goto out;
1781
11f3f500
MC
1782 if (no_files) {
1783 tsk->files = NULL;
1784 goto out;
1785 }
1786
a016f338
JD
1787 if (clone_flags & CLONE_FILES) {
1788 atomic_inc(&oldf->count);
1789 goto out;
1790 }
1791
60997c3d 1792 newf = dup_fd(oldf, NR_OPEN_MAX, &error);
a016f338
JD
1793 if (!newf)
1794 goto out;
1795
1796 tsk->files = newf;
1797 error = 0;
1798out:
1799 return error;
1800}
1801
a39bc516 1802static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1da177e4
LT
1803{
1804 struct sighand_struct *sig;
1805
60348802 1806 if (clone_flags & CLONE_SIGHAND) {
d036bda7 1807 refcount_inc(&current->sighand->count);
1da177e4
LT
1808 return 0;
1809 }
1810 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
0c282b06 1811 RCU_INIT_POINTER(tsk->sighand, sig);
1da177e4
LT
1812 if (!sig)
1813 return -ENOMEM;
9d7fb042 1814
d036bda7 1815 refcount_set(&sig->count, 1);
06e62a46 1816 spin_lock_irq(&current->sighand->siglock);
1da177e4 1817 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
06e62a46 1818 spin_unlock_irq(&current->sighand->siglock);
b612e5df
CB
1819
1820 /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1821 if (clone_flags & CLONE_CLEAR_SIGHAND)
1822 flush_signal_handlers(tsk, 0);
1823
1da177e4
LT
1824 return 0;
1825}
1826
a7e5328a 1827void __cleanup_sighand(struct sighand_struct *sighand)
c81addc9 1828{
d036bda7 1829 if (refcount_dec_and_test(&sighand->count)) {
d80e731e 1830 signalfd_cleanup(sighand);
392809b2 1831 /*
5f0d5a3a 1832 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
392809b2
ON
1833 * without an RCU grace period, see __lock_task_sighand().
1834 */
c81addc9 1835 kmem_cache_free(sighand_cachep, sighand);
d80e731e 1836 }
c81addc9
ON
1837}
1838
f06febc9
FM
1839/*
1840 * Initialize POSIX timer handling for a thread group.
1841 */
1842static void posix_cpu_timers_init_group(struct signal_struct *sig)
1843{
2b69942f 1844 struct posix_cputimers *pct = &sig->posix_cputimers;
78d7d407
JS
1845 unsigned long cpu_limit;
1846
316c1608 1847 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
3a245c0f 1848 posix_cputimers_group_init(pct, cpu_limit);
f06febc9
FM
1849}
1850
a39bc516 1851static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1da177e4
LT
1852{
1853 struct signal_struct *sig;
1da177e4 1854
4ab6c083 1855 if (clone_flags & CLONE_THREAD)
490dea45 1856 return 0;
490dea45 1857
a56704ef 1858 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1da177e4
LT
1859 tsk->signal = sig;
1860 if (!sig)
1861 return -ENOMEM;
1862
b3ac022c 1863 sig->nr_threads = 1;
d80f7d7b 1864 sig->quick_threads = 1;
1da177e4 1865 atomic_set(&sig->live, 1);
60d4de3f 1866 refcount_set(&sig->sigcnt, 1);
0c740d0a
ON
1867
1868 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1869 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1870 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1871
1da177e4 1872 init_waitqueue_head(&sig->wait_chldexit);
db51aecc 1873 sig->curr_target = tsk;
1da177e4 1874 init_sigpending(&sig->shared_pending);
c3ad2c3b 1875 INIT_HLIST_HEAD(&sig->multiprocess);
e78c3496 1876 seqlock_init(&sig->stats_lock);
9d7fb042 1877 prev_cputime_init(&sig->prev_cputime);
1da177e4 1878
baa73d9e 1879#ifdef CONFIG_POSIX_TIMERS
b18b6a9c 1880 INIT_LIST_HEAD(&sig->posix_timers);
c9cb2e3d 1881 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1da177e4 1882 sig->real_timer.function = it_real_fn;
baa73d9e 1883#endif
1da177e4 1884
1da177e4
LT
1885 task_lock(current->group_leader);
1886 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1887 task_unlock(current->group_leader);
1888
6279a751
ON
1889 posix_cpu_timers_init_group(sig);
1890
522ed776 1891 tty_audit_fork(sig);
5091faa4 1892 sched_autogroup_fork(sig);
522ed776 1893
a63d83f4 1894 sig->oom_score_adj = current->signal->oom_score_adj;
dabb16f6 1895 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
28b83c51 1896
9b1bf12d 1897 mutex_init(&sig->cred_guard_mutex);
f7cfd871 1898 init_rwsem(&sig->exec_update_lock);
9b1bf12d 1899
1da177e4
LT
1900 return 0;
1901}
1902
dbd95212
KC
1903static void copy_seccomp(struct task_struct *p)
1904{
1905#ifdef CONFIG_SECCOMP
1906 /*
1907 * Must be called with sighand->lock held, which is common to
1908 * all threads in the group. Holding cred_guard_mutex is not
1909 * needed because this new task is not yet running and cannot
1910 * be racing exec.
1911 */
69f6a34b 1912 assert_spin_locked(&current->sighand->siglock);
dbd95212
KC
1913
1914 /* Ref-count the new filter user, and assign it. */
1915 get_seccomp_filter(current);
1916 p->seccomp = current->seccomp;
1917
1918 /*
1919 * Explicitly enable no_new_privs here in case it got set
1920 * between the task_struct being duplicated and holding the
1921 * sighand lock. The seccomp state and nnp must be in sync.
1922 */
1923 if (task_no_new_privs(current))
1924 task_set_no_new_privs(p);
1925
1926 /*
1927 * If the parent gained a seccomp mode after copying thread
1928 * flags and between before we held the sighand lock, we have
1929 * to manually enable the seccomp thread flag here.
1930 */
1931 if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
23d67a54 1932 set_task_syscall_work(p, SECCOMP);
dbd95212
KC
1933#endif
1934}
1935
17da2bd9 1936SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1da177e4
LT
1937{
1938 current->clear_child_tid = tidptr;
1939
b488893a 1940 return task_pid_vnr(current);
1da177e4
LT
1941}
1942
a39bc516 1943static void rt_mutex_init_task(struct task_struct *p)
23f78d4a 1944{
1d615482 1945 raw_spin_lock_init(&p->pi_lock);
e29e175b 1946#ifdef CONFIG_RT_MUTEXES
a23ba907 1947 p->pi_waiters = RB_ROOT_CACHED;
e96a7705 1948 p->pi_top_task = NULL;
23f78d4a 1949 p->pi_blocked_on = NULL;
23f78d4a
IM
1950#endif
1951}
1952
2c470475
EB
1953static inline void init_task_pid_links(struct task_struct *task)
1954{
1955 enum pid_type type;
1956
96e1e984 1957 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
2c470475 1958 INIT_HLIST_NODE(&task->pid_links[type]);
2c470475
EB
1959}
1960
81907739
ON
1961static inline void
1962init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1963{
2c470475
EB
1964 if (type == PIDTYPE_PID)
1965 task->thread_pid = pid;
1966 else
1967 task->signal->pids[type] = pid;
81907739
ON
1968}
1969
6bfbaa51
IM
1970static inline void rcu_copy_process(struct task_struct *p)
1971{
1972#ifdef CONFIG_PREEMPT_RCU
1973 p->rcu_read_lock_nesting = 0;
1974 p->rcu_read_unlock_special.s = 0;
1975 p->rcu_blocked_node = NULL;
1976 INIT_LIST_HEAD(&p->rcu_node_entry);
1977#endif /* #ifdef CONFIG_PREEMPT_RCU */
1978#ifdef CONFIG_TASKS_RCU
1979 p->rcu_tasks_holdout = false;
1980 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1981 p->rcu_tasks_idle_cpu = -1;
46faf9d8 1982 INIT_LIST_HEAD(&p->rcu_tasks_exit_list);
6bfbaa51 1983#endif /* #ifdef CONFIG_TASKS_RCU */
d5f177d3
PM
1984#ifdef CONFIG_TASKS_TRACE_RCU
1985 p->trc_reader_nesting = 0;
276c4104 1986 p->trc_reader_special.s = 0;
d5f177d3 1987 INIT_LIST_HEAD(&p->trc_holdout_list);
434c9eef 1988 INIT_LIST_HEAD(&p->trc_blkd_node);
d5f177d3 1989#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
6bfbaa51
IM
1990}
1991
6ae930d9
CB
1992/**
1993 * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
1994 * @pid: the struct pid for which to create a pidfd
1995 * @flags: flags of the new @pidfd
ff0712ea 1996 * @ret: Where to return the file for the pidfd.
6ae930d9
CB
1997 *
1998 * Allocate a new file that stashes @pid and reserve a new pidfd number in the
1999 * caller's file descriptor table. The pidfd is reserved but not installed yet.
ff0712ea 2000 *
6ae930d9
CB
2001 * The helper doesn't perform checks on @pid which makes it useful for pidfds
2002 * created via CLONE_PIDFD where @pid has no task attached when the pidfd and
2003 * pidfd file are prepared.
2004 *
2005 * If this function returns successfully the caller is responsible to either
2006 * call fd_install() passing the returned pidfd and pidfd file as arguments in
2007 * order to install the pidfd into its file descriptor table or they must use
2008 * put_unused_fd() and fput() on the returned pidfd and pidfd file
2009 * respectively.
2010 *
2011 * This function is useful when a pidfd must already be reserved but there
2012 * might still be points of failure afterwards and the caller wants to ensure
2013 * that no pidfd is leaked into its file descriptor table.
2014 *
2015 * Return: On success, a reserved pidfd is returned from the function and a new
2016 * pidfd file is returned in the last argument to the function. On
2017 * error, a negative error code is returned from the function and the
2018 * last argument remains unchanged.
2019 */
2020static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
2021{
2022 int pidfd;
2023 struct file *pidfd_file;
2024
cdefbf23 2025 pidfd = get_unused_fd_flags(O_CLOEXEC);
6ae930d9
CB
2026 if (pidfd < 0)
2027 return pidfd;
2028
cb12fd8e 2029 pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR);
6ae930d9
CB
2030 if (IS_ERR(pidfd_file)) {
2031 put_unused_fd(pidfd);
2032 return PTR_ERR(pidfd_file);
2033 }
64bef697
ON
2034 /*
2035 * anon_inode_getfile() ignores everything outside of the
2036 * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually.
2037 */
2038 pidfd_file->f_flags |= (flags & PIDFD_THREAD);
6ae930d9
CB
2039 *ret = pidfd_file;
2040 return pidfd;
2041}
2042
2043/**
2044 * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
2045 * @pid: the struct pid for which to create a pidfd
2046 * @flags: flags of the new @pidfd
ff0712ea 2047 * @ret: Where to return the pidfd.
6ae930d9
CB
2048 *
2049 * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2050 * caller's file descriptor table. The pidfd is reserved but not installed yet.
2051 *
64bef697
ON
2052 * The helper verifies that @pid is still in use, without PIDFD_THREAD the
2053 * task identified by @pid must be a thread-group leader.
6ae930d9
CB
2054 *
2055 * If this function returns successfully the caller is responsible to either
2056 * call fd_install() passing the returned pidfd and pidfd file as arguments in
2057 * order to install the pidfd into its file descriptor table or they must use
2058 * put_unused_fd() and fput() on the returned pidfd and pidfd file
2059 * respectively.
2060 *
2061 * This function is useful when a pidfd must already be reserved but there
2062 * might still be points of failure afterwards and the caller wants to ensure
2063 * that no pidfd is leaked into its file descriptor table.
2064 *
2065 * Return: On success, a reserved pidfd is returned from the function and a new
2066 * pidfd file is returned in the last argument to the function. On
2067 * error, a negative error code is returned from the function and the
2068 * last argument remains unchanged.
2069 */
2070int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
2071{
64bef697
ON
2072 bool thread = flags & PIDFD_THREAD;
2073
2074 if (!pid || !pid_has_task(pid, thread ? PIDTYPE_PID : PIDTYPE_TGID))
6ae930d9
CB
2075 return -EINVAL;
2076
2077 return __pidfd_prepare(pid, flags, ret);
2078}
2079
c3f3ce04
AA
2080static void __delayed_free_task(struct rcu_head *rhp)
2081{
2082 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
2083
2084 free_task(tsk);
2085}
2086
2087static __always_inline void delayed_free_task(struct task_struct *tsk)
2088{
2089 if (IS_ENABLED(CONFIG_MEMCG))
2090 call_rcu(&tsk->rcu, __delayed_free_task);
2091 else
2092 free_task(tsk);
2093}
2094
67197a4f
SB
2095static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
2096{
2097 /* Skip if kernel thread */
2098 if (!tsk->mm)
2099 return;
2100
2101 /* Skip if spawning a thread or using vfork */
2102 if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
2103 return;
2104
2105 /* We need to synchronize with __set_oom_adj */
2106 mutex_lock(&oom_adj_mutex);
2107 set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
2108 /* Update the values in case they were changed after copy_signal */
2109 tsk->signal->oom_score_adj = current->signal->oom_score_adj;
2110 tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
2111 mutex_unlock(&oom_adj_mutex);
2112}
2113
79257534
DBO
2114#ifdef CONFIG_RV
2115static void rv_task_fork(struct task_struct *p)
2116{
2117 int i;
2118
2119 for (i = 0; i < RV_PER_TASK_MONITORS; i++)
2120 p->rv[i].da_mon.monitoring = false;
2121}
2122#else
2123#define rv_task_fork(p) do {} while (0)
2124#endif
2125
1da177e4
LT
2126/*
2127 * This creates a new process as a copy of the old one,
2128 * but does not actually start it yet.
2129 *
2130 * It copies the registers, and all the appropriate
2131 * parts of the process environment (as per the clone
2132 * flags). The actual kick-off is left to the caller.
2133 */
89c8e98d 2134__latent_entropy struct task_struct *copy_process(
09a05394 2135 struct pid *pid,
3033f14a 2136 int trace,
7f192e3c
CB
2137 int node,
2138 struct kernel_clone_args *args)
1da177e4 2139{
b3e58382 2140 int pidfd = -1, retval;
a24efe62 2141 struct task_struct *p;
c3ad2c3b 2142 struct multiprocess_signals delayed;
6fd2fe49 2143 struct file *pidfile = NULL;
c5febea0 2144 const u64 clone_flags = args->flags;
769071ac 2145 struct nsproxy *nsp = current->nsproxy;
1da177e4 2146
667b6094
MPS
2147 /*
2148 * Don't allow sharing the root directory with processes in a different
2149 * namespace
2150 */
1da177e4
LT
2151 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
2152 return ERR_PTR(-EINVAL);
2153
e66eded8
EB
2154 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
2155 return ERR_PTR(-EINVAL);
2156
1da177e4
LT
2157 /*
2158 * Thread groups must share signals as well, and detached threads
2159 * can only be started up within the thread group.
2160 */
2161 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
2162 return ERR_PTR(-EINVAL);
2163
2164 /*
2165 * Shared signal handlers imply shared VM. By way of the above,
2166 * thread groups also imply shared VM. Blocking this case allows
2167 * for various simplifications in other code.
2168 */
2169 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
2170 return ERR_PTR(-EINVAL);
2171
123be07b
SB
2172 /*
2173 * Siblings of global init remain as zombies on exit since they are
2174 * not reaped by their parent (swapper). To solve this and to avoid
2175 * multi-rooted process trees, prevent global and container-inits
2176 * from creating siblings.
2177 */
2178 if ((clone_flags & CLONE_PARENT) &&
2179 current->signal->flags & SIGNAL_UNKILLABLE)
2180 return ERR_PTR(-EINVAL);
2181
8382fcac 2182 /*
40a0d32d 2183 * If the new process will be in a different pid or user namespace
faf00da5 2184 * do not allow it to share a thread group with the forking task.
8382fcac 2185 */
faf00da5 2186 if (clone_flags & CLONE_THREAD) {
40a0d32d 2187 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
769071ac
AV
2188 (task_active_pid_ns(current) != nsp->pid_ns_for_children))
2189 return ERR_PTR(-EINVAL);
2190 }
2191
b3e58382 2192 if (clone_flags & CLONE_PIDFD) {
b3e58382 2193 /*
b3e58382
CB
2194 * - CLONE_DETACHED is blocked so that we can potentially
2195 * reuse it later for CLONE_PIDFD.
b3e58382 2196 */
83b290c9 2197 if (clone_flags & CLONE_DETACHED)
b3e58382 2198 return ERR_PTR(-EINVAL);
b3e58382
CB
2199 }
2200
c3ad2c3b
EB
2201 /*
2202 * Force any signals received before this point to be delivered
2203 * before the fork happens. Collect up signals sent to multiple
2204 * processes that happen during the fork and delay them so that
2205 * they appear to happen after the fork.
2206 */
2207 sigemptyset(&delayed.signal);
2208 INIT_HLIST_NODE(&delayed.node);
2209
2210 spin_lock_irq(&current->sighand->siglock);
2211 if (!(clone_flags & CLONE_THREAD))
2212 hlist_add_head(&delayed.node, &current->signal->multiprocess);
2213 recalc_sigpending();
2214 spin_unlock_irq(&current->sighand->siglock);
2215 retval = -ERESTARTNOINTR;
66ae0d1e 2216 if (task_sigpending(current))
c3ad2c3b
EB
2217 goto fork_out;
2218
1da177e4 2219 retval = -ENOMEM;
725fc629 2220 p = dup_task_struct(current, node);
1da177e4
LT
2221 if (!p)
2222 goto fork_out;
753550eb
EB
2223 p->flags &= ~PF_KTHREAD;
2224 if (args->kthread)
2225 p->flags |= PF_KTHREAD;
f9010dbd 2226 if (args->user_worker) {
b16b3855 2227 /*
f9010dbd 2228 * Mark us a user worker, and block any signal that isn't
b16b3855
JA
2229 * fatal or STOP
2230 */
f9010dbd 2231 p->flags |= PF_USER_WORKER;
b16b3855
JA
2232 siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
2233 }
f9010dbd
MC
2234 if (args->io_thread)
2235 p->flags |= PF_IO_WORKER;
1da177e4 2236
cf587db2
MC
2237 if (args->name)
2238 strscpy_pad(p->comm, args->name, sizeof(p->comm));
2239
7f192e3c 2240 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
4d6501dc
VN
2241 /*
2242 * Clear TID on mm_release()?
2243 */
7f192e3c 2244 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
4d6501dc 2245
f7e8b616
SR
2246 ftrace_graph_init_task(p);
2247
bea493a0
PZ
2248 rt_mutex_init_task(p);
2249
a21ee605 2250 lockdep_assert_irqs_enabled();
d12c1a37 2251#ifdef CONFIG_PROVE_LOCKING
de30a2b3
IM
2252 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
2253#endif
8f2f9c4d
EB
2254 retval = copy_creds(p, clone_flags);
2255 if (retval < 0)
2256 goto bad_fork_free;
2257
1da177e4 2258 retval = -EAGAIN;
de399236 2259 if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
b57922b6
EP
2260 if (p->real_cred->user != INIT_USER &&
2261 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
8f2f9c4d 2262 goto bad_fork_cleanup_count;
1da177e4 2263 }
72fa5997 2264 current->flags &= ~PF_NPROC_EXCEEDED;
1da177e4 2265
1da177e4
LT
2266 /*
2267 * If multiple threads are within copy_process(), then this check
2268 * triggers too late. This doesn't hurt, the check is only there
2269 * to stop root fork bombs.
2270 */
04ec93fe 2271 retval = -EAGAIN;
c17d1a3a 2272 if (data_race(nr_threads >= max_threads))
1da177e4
LT
2273 goto bad_fork_cleanup_count;
2274
ca74e92b 2275 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
a8ea6fc9 2276 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
514ddb44 2277 p->flags |= PF_FORKNOEXEC;
1da177e4
LT
2278 INIT_LIST_HEAD(&p->children);
2279 INIT_LIST_HEAD(&p->sibling);
f41d911f 2280 rcu_copy_process(p);
1da177e4
LT
2281 p->vfork_done = NULL;
2282 spin_lock_init(&p->alloc_lock);
1da177e4 2283
1da177e4
LT
2284 init_sigpending(&p->pending);
2285
64861634 2286 p->utime = p->stime = p->gtime = 0;
40565b5a 2287#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
64861634 2288 p->utimescaled = p->stimescaled = 0;
40565b5a 2289#endif
9d7fb042
PZ
2290 prev_cputime_init(&p->prev_cputime);
2291
6a61671b 2292#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
bac5b6b6
FW
2293 seqcount_init(&p->vtime.seqcount);
2294 p->vtime.starttime = 0;
2295 p->vtime.state = VTIME_INACTIVE;
6a61671b
FW
2296#endif
2297
0f212204
JA
2298#ifdef CONFIG_IO_URING
2299 p->io_uring = NULL;
2300#endif
2301
6976675d
AV
2302 p->default_timer_slack_ns = current->timer_slack_ns;
2303
eb414681
JW
2304#ifdef CONFIG_PSI
2305 p->psi_flags = 0;
2306#endif
2307
5995477a 2308 task_io_accounting_init(&p->ioac);
1da177e4
LT
2309 acct_clear_integrals(p);
2310
3a245c0f 2311 posix_cputimers_init(&p->posix_cputimers);
1da177e4 2312
1da177e4 2313 p->io_context = NULL;
c0b0ae8a 2314 audit_set_context(p, NULL);
b4f48b63 2315 cgroup_fork(p);
343f4c49 2316 if (args->kthread) {
40966e31 2317 if (!set_kthread_struct(p))
ff8288ff 2318 goto bad_fork_cleanup_delayacct;
40966e31 2319 }
1da177e4 2320#ifdef CONFIG_NUMA
846a16bf 2321 p->mempolicy = mpol_dup(p->mempolicy);
fb0a685c
DRO
2322 if (IS_ERR(p->mempolicy)) {
2323 retval = PTR_ERR(p->mempolicy);
2324 p->mempolicy = NULL;
ff8288ff 2325 goto bad_fork_cleanup_delayacct;
fb0a685c 2326 }
1da177e4 2327#endif
778d3b0f
MH
2328#ifdef CONFIG_CPUSETS
2329 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
2330 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
b7505861 2331 seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
778d3b0f 2332#endif
de30a2b3 2333#ifdef CONFIG_TRACE_IRQFLAGS
0584df9c
ME
2334 memset(&p->irqtrace, 0, sizeof(p->irqtrace));
2335 p->irqtrace.hardirq_disable_ip = _THIS_IP_;
2336 p->irqtrace.softirq_enable_ip = _THIS_IP_;
2337 p->softirqs_enabled = 1;
2338 p->softirq_context = 0;
de30a2b3 2339#endif
8bcbde54
DH
2340
2341 p->pagefault_disabled = 0;
2342
fbb9ce95 2343#ifdef CONFIG_LOCKDEP
b09be676 2344 lockdep_init_task(p);
fbb9ce95 2345#endif
1da177e4 2346
408894ee
IM
2347#ifdef CONFIG_DEBUG_MUTEXES
2348 p->blocked_on = NULL; /* not blocked yet */
2349#endif
cafe5635
KO
2350#ifdef CONFIG_BCACHE
2351 p->sequential_io = 0;
2352 p->sequential_io_avg = 0;
2353#endif
a10787e6
SL
2354#ifdef CONFIG_BPF_SYSCALL
2355 RCU_INIT_POINTER(p->bpf_storage, NULL);
c7603cfa 2356 p->bpf_ctx = NULL;
a10787e6 2357#endif
0f481406 2358
3c90e6e9 2359 /* Perform scheduler related setup. Assign this task to a CPU. */
aab03e05
DF
2360 retval = sched_fork(clone_flags, p);
2361 if (retval)
2362 goto bad_fork_cleanup_policy;
6ab423e0 2363
2b26f0aa 2364 retval = perf_event_init_task(p, clone_flags);
6ab423e0
PZ
2365 if (retval)
2366 goto bad_fork_cleanup_policy;
fb0a685c
DRO
2367 retval = audit_alloc(p);
2368 if (retval)
6c72e350 2369 goto bad_fork_cleanup_perf;
1da177e4 2370 /* copy all the process information */
ab602f79 2371 shm_init_task(p);
e4e55b47 2372 retval = security_task_alloc(p, clone_flags);
fb0a685c 2373 if (retval)
1da177e4 2374 goto bad_fork_cleanup_audit;
e4e55b47
TH
2375 retval = copy_semundo(clone_flags, p);
2376 if (retval)
2377 goto bad_fork_cleanup_security;
11f3f500 2378 retval = copy_files(clone_flags, p, args->no_files);
fb0a685c 2379 if (retval)
1da177e4 2380 goto bad_fork_cleanup_semundo;
fb0a685c
DRO
2381 retval = copy_fs(clone_flags, p);
2382 if (retval)
1da177e4 2383 goto bad_fork_cleanup_files;
fb0a685c
DRO
2384 retval = copy_sighand(clone_flags, p);
2385 if (retval)
1da177e4 2386 goto bad_fork_cleanup_fs;
fb0a685c
DRO
2387 retval = copy_signal(clone_flags, p);
2388 if (retval)
1da177e4 2389 goto bad_fork_cleanup_sighand;
fb0a685c
DRO
2390 retval = copy_mm(clone_flags, p);
2391 if (retval)
1da177e4 2392 goto bad_fork_cleanup_signal;
fb0a685c
DRO
2393 retval = copy_namespaces(clone_flags, p);
2394 if (retval)
d84f4f99 2395 goto bad_fork_cleanup_mm;
fb0a685c
DRO
2396 retval = copy_io(clone_flags, p);
2397 if (retval)
fd0928df 2398 goto bad_fork_cleanup_namespaces;
c5febea0 2399 retval = copy_thread(p, args);
1da177e4 2400 if (retval)
fd0928df 2401 goto bad_fork_cleanup_io;
1da177e4 2402
afaef01c
AP
2403 stackleak_task_init(p);
2404
425fb2b4 2405 if (pid != &init_struct_pid) {
49cb2fc4
AR
2406 pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
2407 args->set_tid_size);
35f71bc0
MH
2408 if (IS_ERR(pid)) {
2409 retval = PTR_ERR(pid);
0740aa5f 2410 goto bad_fork_cleanup_thread;
35f71bc0 2411 }
425fb2b4
PE
2412 }
2413
b3e58382
CB
2414 /*
2415 * This has to happen after we've potentially unshared the file
2416 * descriptor table (so that the pidfd doesn't leak into the child
2417 * if the fd table isn't shared).
2418 */
2419 if (clone_flags & CLONE_PIDFD) {
83b290c9
ON
2420 int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0;
2421
ca7707f5 2422 /* Note that no task has been attached to @pid yet. */
83b290c9 2423 retval = __pidfd_prepare(pid, flags, &pidfile);
b3e58382
CB
2424 if (retval < 0)
2425 goto bad_fork_free_pid;
b3e58382 2426 pidfd = retval;
6fd2fe49 2427
7f192e3c 2428 retval = put_user(pidfd, args->pidfd);
b3e58382
CB
2429 if (retval)
2430 goto bad_fork_put_pidfd;
2431 }
2432
73c10101
JA
2433#ifdef CONFIG_BLOCK
2434 p->plug = NULL;
2435#endif
ba31c1a4
TG
2436 futex_init_task(p);
2437
f9a3879a
GM
2438 /*
2439 * sigaltstack should be cleared when sharing the same VM
2440 */
2441 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
2a742138 2442 sas_ss_reset(p);
f9a3879a 2443
1da177e4 2444 /*
6580807d
ON
2445 * Syscall tracing and stepping should be turned off in the
2446 * child regardless of CLONE_PTRACE.
1da177e4 2447 */
6580807d 2448 user_disable_single_step(p);
64c19ba2 2449 clear_task_syscall_work(p, SYSCALL_TRACE);
64eb35f7
GKB
2450#if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
2451 clear_task_syscall_work(p, SYSCALL_EMU);
ed75e8d5 2452#endif
e02c9b0d 2453 clear_tsk_latency_tracing(p);
1da177e4 2454
1da177e4 2455 /* ok, now we should be set up.. */
18c830df
ON
2456 p->pid = pid_nr(pid);
2457 if (clone_flags & CLONE_THREAD) {
18c830df
ON
2458 p->group_leader = current->group_leader;
2459 p->tgid = current->tgid;
2460 } else {
18c830df
ON
2461 p->group_leader = p;
2462 p->tgid = p->pid;
2463 }
5f8aadd8 2464
9d823e8f
WF
2465 p->nr_dirtied = 0;
2466 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
83712358 2467 p->dirty_paused_when = 0;
9d823e8f 2468
bb8cbbfe 2469 p->pdeath_signal = 0;
158e1645 2470 p->task_works = NULL;
ca7752ca 2471 clear_posix_cputimers_work(p);
1da177e4 2472
d741bf41
PZ
2473#ifdef CONFIG_KRETPROBES
2474 p->kretprobe_instances.first = NULL;
2475#endif
54ecbe6f
MH
2476#ifdef CONFIG_RETHOOK
2477 p->rethooks.first = NULL;
2478#endif
d741bf41 2479
7e47682e
AS
2480 /*
2481 * Ensure that the cgroup subsystem policies allow the new process to be
7b7b8a2c 2482 * forked. It should be noted that the new process's css_set can be changed
7e47682e
AS
2483 * between here and cgroup_post_fork() if an organisation operation is in
2484 * progress.
2485 */
ef2c41cf 2486 retval = cgroup_can_fork(p, args);
7e47682e 2487 if (retval)
5a5cf5cb 2488 goto bad_fork_put_pidfd;
7e47682e 2489
b1e82065
PZ
2490 /*
2491 * Now that the cgroups are pinned, re-clone the parent cgroup and put
2492 * the new task on the correct runqueue. All this *before* the task
2493 * becomes visible.
2494 *
2495 * This isn't part of ->can_fork() because while the re-cloning is
2496 * cgroup specific, it unconditionally needs to place the task on a
2497 * runqueue.
2498 */
2499 sched_cgroup_fork(p, args);
2500
7b558513
DH
2501 /*
2502 * From this point on we must avoid any synchronous user-space
2503 * communication until we take the tasklist-lock. In particular, we do
2504 * not want user-space to be able to predict the process start-time by
2505 * stalling fork(2) after we recorded the start_time but before it is
2506 * visible to the system.
2507 */
2508
2509 p->start_time = ktime_get_ns();
cf25e24d 2510 p->start_boottime = ktime_get_boottime_ns();
7b558513 2511
18c830df
ON
2512 /*
2513 * Make it visible to the rest of the system, but dont wake it up yet.
2514 * Need tasklist lock for parent etc handling!
2515 */
1da177e4
LT
2516 write_lock_irq(&tasklist_lock);
2517
1da177e4 2518 /* CLONE_PARENT re-uses the old parent */
2d5516cb 2519 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1da177e4 2520 p->real_parent = current->real_parent;
2d5516cb 2521 p->parent_exec_id = current->parent_exec_id;
b4e00444
EW
2522 if (clone_flags & CLONE_THREAD)
2523 p->exit_signal = -1;
2524 else
2525 p->exit_signal = current->group_leader->exit_signal;
2d5516cb 2526 } else {
1da177e4 2527 p->real_parent = current;
2d5516cb 2528 p->parent_exec_id = current->self_exec_id;
b4e00444 2529 p->exit_signal = args->exit_signal;
2d5516cb 2530 }
1da177e4 2531
d83a7cb3
JP
2532 klp_copy_process(p);
2533
85dd3f61
PZ
2534 sched_core_fork(p);
2535
3f17da69 2536 spin_lock(&current->sighand->siglock);
4a2c7a78 2537
79257534
DBO
2538 rv_task_fork(p);
2539
d7822b1e
MD
2540 rseq_fork(p, clone_flags);
2541
4ca1d3ee 2542 /* Don't start children in a dying pid namespace */
e8cfbc24 2543 if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
3fd37226
KT
2544 retval = -ENOMEM;
2545 goto bad_fork_cancel_cgroup;
2546 }
4a2c7a78 2547
7673bf55
EB
2548 /* Let kill terminate clone/fork in the middle */
2549 if (fatal_signal_pending(current)) {
2550 retval = -EINTR;
2551 goto bad_fork_cancel_cgroup;
2552 }
2553
a1140cb2
KI
2554 /* No more failure paths after this point. */
2555
2556 /*
2557 * Copy seccomp details explicitly here, in case they were changed
2558 * before holding sighand lock.
2559 */
2560 copy_seccomp(p);
2561
2c470475 2562 init_task_pid_links(p);
73b9ebfe 2563 if (likely(p->pid)) {
4b9d33e6 2564 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
73b9ebfe 2565
81907739 2566 init_task_pid(p, PIDTYPE_PID, pid);
73b9ebfe 2567 if (thread_group_leader(p)) {
6883f81a 2568 init_task_pid(p, PIDTYPE_TGID, pid);
81907739
ON
2569 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
2570 init_task_pid(p, PIDTYPE_SID, task_session(current));
2571
1c4042c2 2572 if (is_child_reaper(pid)) {
17cf22c3 2573 ns_of_pid(pid)->child_reaper = p;
1c4042c2
EB
2574 p->signal->flags |= SIGNAL_UNKILLABLE;
2575 }
c3ad2c3b 2576 p->signal->shared_pending.signal = delayed.signal;
9c9f4ded 2577 p->signal->tty = tty_kref_get(current->signal->tty);
749860ce
PT
2578 /*
2579 * Inherit has_child_subreaper flag under the same
2580 * tasklist_lock with adding child to the process tree
2581 * for propagate_has_child_subreaper optimization.
2582 */
2583 p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2584 p->real_parent->signal->is_child_subreaper;
9cd80bbb 2585 list_add_tail(&p->sibling, &p->real_parent->children);
5e85d4ab 2586 list_add_tail_rcu(&p->tasks, &init_task.tasks);
6883f81a 2587 attach_pid(p, PIDTYPE_TGID);
81907739
ON
2588 attach_pid(p, PIDTYPE_PGID);
2589 attach_pid(p, PIDTYPE_SID);
909ea964 2590 __this_cpu_inc(process_counts);
80628ca0
ON
2591 } else {
2592 current->signal->nr_threads++;
d80f7d7b 2593 current->signal->quick_threads++;
80628ca0 2594 atomic_inc(&current->signal->live);
60d4de3f 2595 refcount_inc(&current->signal->sigcnt);
924de3b8 2596 task_join_group_stop(p);
0c740d0a
ON
2597 list_add_tail_rcu(&p->thread_node,
2598 &p->signal->thread_head);
73b9ebfe 2599 }
81907739 2600 attach_pid(p, PIDTYPE_PID);
73b9ebfe 2601 nr_threads++;
1da177e4 2602 }
1da177e4 2603 total_forks++;
c3ad2c3b 2604 hlist_del_init(&delayed.node);
3f17da69 2605 spin_unlock(&current->sighand->siglock);
4af4206b 2606 syscall_tracepoint_update(p);
1da177e4 2607 write_unlock_irq(&tasklist_lock);
4af4206b 2608
ddc204b5
WL
2609 if (pidfile)
2610 fd_install(pidfd, pidfile);
2611
c13cf856 2612 proc_fork_connector(p);
b1e82065 2613 sched_post_fork(p);
ef2c41cf 2614 cgroup_post_fork(p, args);
cdd6c482 2615 perf_event_fork(p);
43d2b113
KH
2616
2617 trace_task_newtask(p, clone_flags);
3ab67966 2618 uprobe_copy_process(p, clone_flags);
fd593511 2619 user_events_fork(p, clone_flags);
43d2b113 2620
67197a4f
SB
2621 copy_oom_score_adj(clone_flags, p);
2622
1da177e4
LT
2623 return p;
2624
7e47682e 2625bad_fork_cancel_cgroup:
85dd3f61 2626 sched_core_free(p);
3fd37226
KT
2627 spin_unlock(&current->sighand->siglock);
2628 write_unlock_irq(&tasklist_lock);
ef2c41cf 2629 cgroup_cancel_fork(p, args);
b3e58382 2630bad_fork_put_pidfd:
6fd2fe49
AV
2631 if (clone_flags & CLONE_PIDFD) {
2632 fput(pidfile);
2633 put_unused_fd(pidfd);
2634 }
425fb2b4
PE
2635bad_fork_free_pid:
2636 if (pid != &init_struct_pid)
2637 free_pid(pid);
0740aa5f
JS
2638bad_fork_cleanup_thread:
2639 exit_thread(p);
fd0928df 2640bad_fork_cleanup_io:
b69f2292
LR
2641 if (p->io_context)
2642 exit_io_context(p);
ab516013 2643bad_fork_cleanup_namespaces:
444f378b 2644 exit_task_namespaces(p);
1da177e4 2645bad_fork_cleanup_mm:
c3f3ce04
AA
2646 if (p->mm) {
2647 mm_clear_owner(p->mm, p);
1da177e4 2648 mmput(p->mm);
c3f3ce04 2649 }
1da177e4 2650bad_fork_cleanup_signal:
4ab6c083 2651 if (!(clone_flags & CLONE_THREAD))
1c5354de 2652 free_signal_struct(p->signal);
1da177e4 2653bad_fork_cleanup_sighand:
a7e5328a 2654 __cleanup_sighand(p->sighand);
1da177e4
LT
2655bad_fork_cleanup_fs:
2656 exit_fs(p); /* blocking */
2657bad_fork_cleanup_files:
2658 exit_files(p); /* blocking */
2659bad_fork_cleanup_semundo:
2660 exit_sem(p);
e4e55b47
TH
2661bad_fork_cleanup_security:
2662 security_task_free(p);
1da177e4
LT
2663bad_fork_cleanup_audit:
2664 audit_free(p);
6c72e350 2665bad_fork_cleanup_perf:
cdd6c482 2666 perf_event_free_task(p);
6c72e350 2667bad_fork_cleanup_policy:
b09be676 2668 lockdep_free_task(p);
1da177e4 2669#ifdef CONFIG_NUMA
f0be3d32 2670 mpol_put(p->mempolicy);
1da177e4 2671#endif
ff8288ff 2672bad_fork_cleanup_delayacct:
35df17c5 2673 delayacct_tsk_free(p);
1da177e4 2674bad_fork_cleanup_count:
21d1c5e3 2675 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
e0e81739 2676 exit_creds(p);
1da177e4 2677bad_fork_free:
2f064a59 2678 WRITE_ONCE(p->__state, TASK_DEAD);
1a03d3f1 2679 exit_task_stack_account(p);
68f24b08 2680 put_task_stack(p);
c3f3ce04 2681 delayed_free_task(p);
fe7d37d1 2682fork_out:
c3ad2c3b
EB
2683 spin_lock_irq(&current->sighand->siglock);
2684 hlist_del_init(&delayed.node);
2685 spin_unlock_irq(&current->sighand->siglock);
fe7d37d1 2686 return ERR_PTR(retval);
1da177e4
LT
2687}
2688
2c470475 2689static inline void init_idle_pids(struct task_struct *idle)
f106eee1
ON
2690{
2691 enum pid_type type;
2692
2693 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2c470475
EB
2694 INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
2695 init_task_pid(idle, type, &init_struct_pid);
f106eee1
ON
2696 }
2697}
2698
36cb0e1c
EB
2699static int idle_dummy(void *dummy)
2700{
2701 /* This function is never called */
2702 return 0;
2703}
2704
f1a0a376 2705struct task_struct * __init fork_idle(int cpu)
1da177e4 2706{
36c8b586 2707 struct task_struct *task;
7f192e3c 2708 struct kernel_clone_args args = {
343f4c49 2709 .flags = CLONE_VM,
5bd2e97c
EB
2710 .fn = &idle_dummy,
2711 .fn_arg = NULL,
343f4c49 2712 .kthread = 1,
36cb0e1c 2713 .idle = 1,
7f192e3c
CB
2714 };
2715
2716 task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
f106eee1 2717 if (!IS_ERR(task)) {
2c470475 2718 init_idle_pids(task);
753ca4f3 2719 init_idle(task, cpu);
f106eee1 2720 }
73b9ebfe 2721
1da177e4
LT
2722 return task;
2723}
2724
cc440e87
JA
2725/*
2726 * This is like kernel_clone(), but shaved down and tailored to just
2727 * creating io_uring workers. It returns a created task, or an error pointer.
2728 * The returned task is inactive, and the caller must fire it up through
2729 * wake_up_new_task(p). All signals are blocked in the created task.
2730 */
2731struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
2732{
2733 unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
2734 CLONE_IO;
2735 struct kernel_clone_args args = {
2736 .flags = ((lower_32_bits(flags) | CLONE_VM |
2737 CLONE_UNTRACED) & ~CSIGNAL),
2738 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
5bd2e97c
EB
2739 .fn = fn,
2740 .fn_arg = arg,
cc440e87 2741 .io_thread = 1,
54e6842d 2742 .user_worker = 1,
cc440e87 2743 };
cc440e87 2744
b16b3855 2745 return copy_process(NULL, 0, node, &args);
cc440e87
JA
2746}
2747
1da177e4
LT
2748/*
2749 * Ok, this is the main fork-routine.
2750 *
2751 * It copies the process, and if successful kick-starts
2752 * it and waits for it to finish using the VM if required.
a0eb9abd
ES
2753 *
2754 * args->exit_signal is expected to be checked for sanity by the caller.
1da177e4 2755 */
cad6967a 2756pid_t kernel_clone(struct kernel_clone_args *args)
1da177e4 2757{
7f192e3c 2758 u64 clone_flags = args->flags;
9f5325aa
MPS
2759 struct completion vfork;
2760 struct pid *pid;
1da177e4
LT
2761 struct task_struct *p;
2762 int trace = 0;
cad6967a 2763 pid_t nr;
1da177e4 2764
3af8588c
CB
2765 /*
2766 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2767 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2768 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2769 * field in struct clone_args and it still doesn't make sense to have
2770 * them both point at the same memory location. Performing this check
2771 * here has the advantage that we don't need to have a separate helper
2772 * to check for legacy clone().
2773 */
b639585e
WJ
2774 if ((clone_flags & CLONE_PIDFD) &&
2775 (clone_flags & CLONE_PARENT_SETTID) &&
3af8588c
CB
2776 (args->pidfd == args->parent_tid))
2777 return -EINVAL;
2778
09a05394 2779 /*
4b9d33e6
TH
2780 * Determine whether and which event to report to ptracer. When
2781 * called from kernel_thread or CLONE_UNTRACED is explicitly
2782 * requested, no event is reported; otherwise, report if the event
2783 * for the type of forking is enabled.
09a05394 2784 */
e80d6661 2785 if (!(clone_flags & CLONE_UNTRACED)) {
4b9d33e6
TH
2786 if (clone_flags & CLONE_VFORK)
2787 trace = PTRACE_EVENT_VFORK;
7f192e3c 2788 else if (args->exit_signal != SIGCHLD)
4b9d33e6
TH
2789 trace = PTRACE_EVENT_CLONE;
2790 else
2791 trace = PTRACE_EVENT_FORK;
2792
2793 if (likely(!ptrace_event_enabled(current, trace)))
2794 trace = 0;
2795 }
1da177e4 2796
7f192e3c 2797 p = copy_process(NULL, trace, NUMA_NO_NODE, args);
38addce8 2798 add_latent_entropy();
9f5325aa
MPS
2799
2800 if (IS_ERR(p))
2801 return PTR_ERR(p);
2802
1da177e4
LT
2803 /*
2804 * Do this prior waking up the new thread - the thread pointer
2805 * might get invalid after that point, if the thread exits quickly.
2806 */
9f5325aa 2807 trace_sched_process_fork(current, p);
0a16b607 2808
9f5325aa
MPS
2809 pid = get_task_pid(p, PIDTYPE_PID);
2810 nr = pid_vnr(pid);
30e49c26 2811
9f5325aa 2812 if (clone_flags & CLONE_PARENT_SETTID)
7f192e3c 2813 put_user(nr, args->parent_tid);
a6f5e063 2814
9f5325aa
MPS
2815 if (clone_flags & CLONE_VFORK) {
2816 p->vfork_done = &vfork;
2817 init_completion(&vfork);
2818 get_task_struct(p);
2819 }
1da177e4 2820
61dd3f24 2821 if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) {
bd74fdae
YZ
2822 /* lock the task to synchronize with memcg migration */
2823 task_lock(p);
2824 lru_gen_add_mm(p->mm);
2825 task_unlock(p);
2826 }
2827
9f5325aa 2828 wake_up_new_task(p);
09a05394 2829
9f5325aa
MPS
2830 /* forking complete and child started to run, tell ptracer */
2831 if (unlikely(trace))
2832 ptrace_event_pid(trace, pid);
4e52365f 2833
9f5325aa
MPS
2834 if (clone_flags & CLONE_VFORK) {
2835 if (!wait_for_vfork_done(p, &vfork))
2836 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
1da177e4 2837 }
9f5325aa
MPS
2838
2839 put_pid(pid);
92476d7f 2840 return nr;
1da177e4
LT
2841}
2842
2aa3a7f8
AV
2843/*
2844 * Create a kernel thread.
2845 */
cf587db2
MC
2846pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
2847 unsigned long flags)
2aa3a7f8 2848{
7f192e3c 2849 struct kernel_clone_args args = {
3f2c788a
CB
2850 .flags = ((lower_32_bits(flags) | CLONE_VM |
2851 CLONE_UNTRACED) & ~CSIGNAL),
2852 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
5bd2e97c
EB
2853 .fn = fn,
2854 .fn_arg = arg,
cf587db2 2855 .name = name,
343f4c49
EB
2856 .kthread = 1,
2857 };
2858
2859 return kernel_clone(&args);
2860}
2861
2862/*
2863 * Create a user mode thread.
2864 */
2865pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
2aa3a7f8 2866{
7f192e3c 2867 struct kernel_clone_args args = {
3f2c788a
CB
2868 .flags = ((lower_32_bits(flags) | CLONE_VM |
2869 CLONE_UNTRACED) & ~CSIGNAL),
2870 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
5bd2e97c
EB
2871 .fn = fn,
2872 .fn_arg = arg,
7f192e3c
CB
2873 };
2874
cad6967a 2875 return kernel_clone(&args);
2aa3a7f8 2876}
2aa3a7f8 2877
d2125043
AV
2878#ifdef __ARCH_WANT_SYS_FORK
2879SYSCALL_DEFINE0(fork)
2880{
2881#ifdef CONFIG_MMU
7f192e3c
CB
2882 struct kernel_clone_args args = {
2883 .exit_signal = SIGCHLD,
2884 };
2885
cad6967a 2886 return kernel_clone(&args);
d2125043
AV
2887#else
2888 /* can not support in nommu mode */
5d59e182 2889 return -EINVAL;
d2125043
AV
2890#endif
2891}
2892#endif
2893
2894#ifdef __ARCH_WANT_SYS_VFORK
2895SYSCALL_DEFINE0(vfork)
2896{
7f192e3c
CB
2897 struct kernel_clone_args args = {
2898 .flags = CLONE_VFORK | CLONE_VM,
2899 .exit_signal = SIGCHLD,
2900 };
2901
cad6967a 2902 return kernel_clone(&args);
d2125043
AV
2903}
2904#endif
2905
2906#ifdef __ARCH_WANT_SYS_CLONE
2907#ifdef CONFIG_CLONE_BACKWARDS
2908SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2909 int __user *, parent_tidptr,
3033f14a 2910 unsigned long, tls,
d2125043
AV
2911 int __user *, child_tidptr)
2912#elif defined(CONFIG_CLONE_BACKWARDS2)
2913SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2914 int __user *, parent_tidptr,
2915 int __user *, child_tidptr,
3033f14a 2916 unsigned long, tls)
dfa9771a
MS
2917#elif defined(CONFIG_CLONE_BACKWARDS3)
2918SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2919 int, stack_size,
2920 int __user *, parent_tidptr,
2921 int __user *, child_tidptr,
3033f14a 2922 unsigned long, tls)
d2125043
AV
2923#else
2924SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2925 int __user *, parent_tidptr,
2926 int __user *, child_tidptr,
3033f14a 2927 unsigned long, tls)
d2125043
AV
2928#endif
2929{
7f192e3c 2930 struct kernel_clone_args args = {
3f2c788a 2931 .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
7f192e3c
CB
2932 .pidfd = parent_tidptr,
2933 .child_tid = child_tidptr,
2934 .parent_tid = parent_tidptr,
3f2c788a 2935 .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
7f192e3c
CB
2936 .stack = newsp,
2937 .tls = tls,
2938 };
2939
cad6967a 2940 return kernel_clone(&args);
7f192e3c 2941}
d68dbb0c 2942#endif
7f192e3c 2943
d68dbb0c 2944#ifdef __ARCH_WANT_SYS_CLONE3
dd499f7a 2945
7f192e3c
CB
2946noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2947 struct clone_args __user *uargs,
f14c234b 2948 size_t usize)
7f192e3c 2949{
f14c234b 2950 int err;
7f192e3c 2951 struct clone_args args;
49cb2fc4 2952 pid_t *kset_tid = kargs->set_tid;
7f192e3c 2953
a966dcfe
ES
2954 BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
2955 CLONE_ARGS_SIZE_VER0);
2956 BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
2957 CLONE_ARGS_SIZE_VER1);
2958 BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
2959 CLONE_ARGS_SIZE_VER2);
2960 BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
2961
f14c234b 2962 if (unlikely(usize > PAGE_SIZE))
7f192e3c 2963 return -E2BIG;
f14c234b 2964 if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
7f192e3c
CB
2965 return -EINVAL;
2966
f14c234b
AS
2967 err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
2968 if (err)
2969 return err;
7f192e3c 2970
49cb2fc4
AR
2971 if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
2972 return -EINVAL;
2973
2974 if (unlikely(!args.set_tid && args.set_tid_size > 0))
2975 return -EINVAL;
2976
2977 if (unlikely(args.set_tid && args.set_tid_size == 0))
2978 return -EINVAL;
2979
a0eb9abd
ES
2980 /*
2981 * Verify that higher 32bits of exit_signal are unset and that
2982 * it is a valid signal
2983 */
2984 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
2985 !valid_signal(args.exit_signal)))
2986 return -EINVAL;
2987
62173872
ES
2988 if ((args.flags & CLONE_INTO_CGROUP) &&
2989 (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
ef2c41cf
CB
2990 return -EINVAL;
2991
7f192e3c
CB
2992 *kargs = (struct kernel_clone_args){
2993 .flags = args.flags,
2994 .pidfd = u64_to_user_ptr(args.pidfd),
2995 .child_tid = u64_to_user_ptr(args.child_tid),
2996 .parent_tid = u64_to_user_ptr(args.parent_tid),
2997 .exit_signal = args.exit_signal,
2998 .stack = args.stack,
2999 .stack_size = args.stack_size,
3000 .tls = args.tls,
49cb2fc4 3001 .set_tid_size = args.set_tid_size,
ef2c41cf 3002 .cgroup = args.cgroup,
7f192e3c
CB
3003 };
3004
49cb2fc4
AR
3005 if (args.set_tid &&
3006 copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
3007 (kargs->set_tid_size * sizeof(pid_t))))
3008 return -EFAULT;
3009
3010 kargs->set_tid = kset_tid;
3011
7f192e3c
CB
3012 return 0;
3013}
3014
fa729c4d
CB
3015/**
3016 * clone3_stack_valid - check and prepare stack
3017 * @kargs: kernel clone args
3018 *
3019 * Verify that the stack arguments userspace gave us are sane.
3020 * In addition, set the stack direction for userspace since it's easy for us to
3021 * determine.
3022 */
3023static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
3024{
3025 if (kargs->stack == 0) {
3026 if (kargs->stack_size > 0)
3027 return false;
3028 } else {
3029 if (kargs->stack_size == 0)
3030 return false;
3031
3032 if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
3033 return false;
3034
cf8e8658 3035#if !defined(CONFIG_STACK_GROWSUP)
fa729c4d
CB
3036 kargs->stack += kargs->stack_size;
3037#endif
3038 }
3039
3040 return true;
3041}
3042
3043static bool clone3_args_valid(struct kernel_clone_args *kargs)
7f192e3c 3044{
b612e5df 3045 /* Verify that no unknown flags are passed along. */
ef2c41cf
CB
3046 if (kargs->flags &
3047 ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
7f192e3c
CB
3048 return false;
3049
3050 /*
a8ca6b13
XC
3051 * - make the CLONE_DETACHED bit reusable for clone3
3052 * - make the CSIGNAL bits reusable for clone3
7f192e3c 3053 */
a402f1e3 3054 if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
7f192e3c
CB
3055 return false;
3056
b612e5df
CB
3057 if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
3058 (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
3059 return false;
3060
7f192e3c
CB
3061 if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
3062 kargs->exit_signal)
3063 return false;
3064
fa729c4d
CB
3065 if (!clone3_stack_valid(kargs))
3066 return false;
3067
7f192e3c
CB
3068 return true;
3069}
3070
501bd016 3071/**
ff0712ea 3072 * sys_clone3 - create a new process with specific properties
501bd016
CB
3073 * @uargs: argument structure
3074 * @size: size of @uargs
3075 *
3076 * clone3() is the extensible successor to clone()/clone2().
3077 * It takes a struct as argument that is versioned by its size.
3078 *
3079 * Return: On success, a positive PID for the child process.
3080 * On error, a negative errno number.
3081 */
7f192e3c
CB
3082SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
3083{
3084 int err;
3085
3086 struct kernel_clone_args kargs;
49cb2fc4
AR
3087 pid_t set_tid[MAX_PID_NS_LEVEL];
3088
3089 kargs.set_tid = set_tid;
7f192e3c
CB
3090
3091 err = copy_clone_args_from_user(&kargs, uargs, size);
3092 if (err)
3093 return err;
3094
3095 if (!clone3_args_valid(&kargs))
3096 return -EINVAL;
3097
cad6967a 3098 return kernel_clone(&kargs);
d2125043
AV
3099}
3100#endif
3101
0f1b92cb
ON
3102void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
3103{
3104 struct task_struct *leader, *parent, *child;
3105 int res;
3106
3107 read_lock(&tasklist_lock);
3108 leader = top = top->group_leader;
3109down:
3110 for_each_thread(leader, parent) {
3111 list_for_each_entry(child, &parent->children, sibling) {
3112 res = visitor(child, data);
3113 if (res) {
3114 if (res < 0)
3115 goto out;
3116 leader = child;
3117 goto down;
3118 }
3119up:
3120 ;
3121 }
3122 }
3123
3124 if (leader != top) {
3125 child = leader;
3126 parent = child->real_parent;
3127 leader = parent->group_leader;
3128 goto up;
3129 }
3130out:
3131 read_unlock(&tasklist_lock);
3132}
3133
5fd63b30
RT
3134#ifndef ARCH_MIN_MMSTRUCT_ALIGN
3135#define ARCH_MIN_MMSTRUCT_ALIGN 0
3136#endif
3137
51cc5068 3138static void sighand_ctor(void *data)
aa1757f9
ON
3139{
3140 struct sighand_struct *sighand = data;
3141
a35afb83 3142 spin_lock_init(&sighand->siglock);
b8fceee1 3143 init_waitqueue_head(&sighand->signalfd_wqh);
aa1757f9
ON
3144}
3145
af806027 3146void __init mm_cache_init(void)
1da177e4 3147{
c1a2f7f0
RR
3148 unsigned int mm_size;
3149
af806027
PZ
3150 /*
3151 * The mm_cpumask is located at the end of mm_struct, and is
3152 * dynamically sized based on the maximum CPU number this system
3153 * can have, taking hotplug into account (nr_cpu_ids).
3154 */
af7f588d 3155 mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
af806027
PZ
3156
3157 mm_cachep = kmem_cache_create_usercopy("mm_struct",
3158 mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
3159 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3160 offsetof(struct mm_struct, saved_auxv),
3161 sizeof_field(struct mm_struct, saved_auxv),
3162 NULL);
3163}
3164
3165void __init proc_caches_init(void)
3166{
1da177e4
LT
3167 sighand_cachep = kmem_cache_create("sighand_cache",
3168 sizeof(struct sighand_struct), 0,
5f0d5a3a 3169 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
75f296d9 3170 SLAB_ACCOUNT, sighand_ctor);
1da177e4
LT
3171 signal_cachep = kmem_cache_create("signal_cache",
3172 sizeof(struct signal_struct), 0,
75f296d9 3173 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
5d097056 3174 NULL);
20c2df83 3175 files_cachep = kmem_cache_create("files_cache",
1da177e4 3176 sizeof(struct files_struct), 0,
75f296d9 3177 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
5d097056 3178 NULL);
20c2df83 3179 fs_cachep = kmem_cache_create("fs_cache",
1da177e4 3180 sizeof(struct fs_struct), 0,
75f296d9 3181 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
5d097056 3182 NULL);
c1a2f7f0 3183
5d097056 3184 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
c7f8f31c
SB
3185#ifdef CONFIG_PER_VMA_LOCK
3186 vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT);
3187#endif
8feae131 3188 mmap_init();
66577193 3189 nsproxy_cache_init();
1da177e4 3190}
cf2e340f 3191
cf2e340f 3192/*
9bfb23fc 3193 * Check constraints on flags passed to the unshare system call.
cf2e340f 3194 */
9bfb23fc 3195static int check_unshare_flags(unsigned long unshare_flags)
cf2e340f 3196{
9bfb23fc
ON
3197 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
3198 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
50804fe3 3199 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
769071ac
AV
3200 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
3201 CLONE_NEWTIME))
9bfb23fc 3202 return -EINVAL;
cf2e340f 3203 /*
12c641ab
EB
3204 * Not implemented, but pretend it works if there is nothing
3205 * to unshare. Note that unsharing the address space or the
3206 * signal handlers also need to unshare the signal queues (aka
3207 * CLONE_THREAD).
cf2e340f 3208 */
9bfb23fc 3209 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
12c641ab
EB
3210 if (!thread_group_empty(current))
3211 return -EINVAL;
3212 }
3213 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
d036bda7 3214 if (refcount_read(&current->sighand->count) > 1)
12c641ab
EB
3215 return -EINVAL;
3216 }
3217 if (unshare_flags & CLONE_VM) {
3218 if (!current_is_single_threaded())
9bfb23fc
ON
3219 return -EINVAL;
3220 }
cf2e340f
JD
3221
3222 return 0;
3223}
3224
3225/*
99d1419d 3226 * Unshare the filesystem structure if it is being shared
cf2e340f
JD
3227 */
3228static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
3229{
3230 struct fs_struct *fs = current->fs;
3231
498052bb
AV
3232 if (!(unshare_flags & CLONE_FS) || !fs)
3233 return 0;
3234
3235 /* don't need lock here; in the worst case we'll do useless copy */
3236 if (fs->users == 1)
3237 return 0;
3238
3239 *new_fsp = copy_fs_struct(fs);
3240 if (!*new_fsp)
3241 return -ENOMEM;
cf2e340f
JD
3242
3243 return 0;
3244}
3245
cf2e340f 3246/*
a016f338 3247 * Unshare file descriptor table if it is being shared
cf2e340f 3248 */
60997c3d
CB
3249int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
3250 struct files_struct **new_fdp)
cf2e340f
JD
3251{
3252 struct files_struct *fd = current->files;
a016f338 3253 int error = 0;
cf2e340f
JD
3254
3255 if ((unshare_flags & CLONE_FILES) &&
a016f338 3256 (fd && atomic_read(&fd->count) > 1)) {
60997c3d 3257 *new_fdp = dup_fd(fd, max_fds, &error);
a016f338
JD
3258 if (!*new_fdp)
3259 return error;
3260 }
cf2e340f
JD
3261
3262 return 0;
3263}
3264
cf2e340f
JD
3265/*
3266 * unshare allows a process to 'unshare' part of the process
3267 * context which was originally shared using clone. copy_*
cad6967a 3268 * functions used by kernel_clone() cannot be used here directly
cf2e340f
JD
3269 * because they modify an inactive task_struct that is being
3270 * constructed. Here we are modifying the current, active,
3271 * task_struct.
3272 */
9b32105e 3273int ksys_unshare(unsigned long unshare_flags)
cf2e340f 3274{
cf2e340f 3275 struct fs_struct *fs, *new_fs = NULL;
ba1f70dd 3276 struct files_struct *new_fd = NULL;
b2e0d987 3277 struct cred *new_cred = NULL;
cf7b708c 3278 struct nsproxy *new_nsproxy = NULL;
9edff4ab 3279 int do_sysvsem = 0;
9bfb23fc 3280 int err;
cf2e340f 3281
b2e0d987 3282 /*
faf00da5
EB
3283 * If unsharing a user namespace must also unshare the thread group
3284 * and unshare the filesystem root and working directories.
b2e0d987
EB
3285 */
3286 if (unshare_flags & CLONE_NEWUSER)
e66eded8 3287 unshare_flags |= CLONE_THREAD | CLONE_FS;
50804fe3
EB
3288 /*
3289 * If unsharing vm, must also unshare signal handlers.
3290 */
3291 if (unshare_flags & CLONE_VM)
3292 unshare_flags |= CLONE_SIGHAND;
12c641ab
EB
3293 /*
3294 * If unsharing a signal handlers, must also unshare the signal queues.
3295 */
3296 if (unshare_flags & CLONE_SIGHAND)
3297 unshare_flags |= CLONE_THREAD;
9bfb23fc
ON
3298 /*
3299 * If unsharing namespace, must also unshare filesystem information.
3300 */
3301 if (unshare_flags & CLONE_NEWNS)
3302 unshare_flags |= CLONE_FS;
50804fe3
EB
3303
3304 err = check_unshare_flags(unshare_flags);
3305 if (err)
3306 goto bad_unshare_out;
6013f67f
MS
3307 /*
3308 * CLONE_NEWIPC must also detach from the undolist: after switching
3309 * to a new ipc namespace, the semaphore arrays from the old
3310 * namespace are unreachable.
3311 */
3312 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
9edff4ab 3313 do_sysvsem = 1;
fb0a685c
DRO
3314 err = unshare_fs(unshare_flags, &new_fs);
3315 if (err)
9bfb23fc 3316 goto bad_unshare_out;
60997c3d 3317 err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd);
fb0a685c 3318 if (err)
9bfb23fc 3319 goto bad_unshare_cleanup_fs;
b2e0d987 3320 err = unshare_userns(unshare_flags, &new_cred);
fb0a685c 3321 if (err)
9edff4ab 3322 goto bad_unshare_cleanup_fd;
b2e0d987
EB
3323 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
3324 new_cred, new_fs);
3325 if (err)
3326 goto bad_unshare_cleanup_cred;
c0b2fc31 3327
905ae01c
AG
3328 if (new_cred) {
3329 err = set_cred_ucounts(new_cred);
3330 if (err)
3331 goto bad_unshare_cleanup_cred;
3332 }
3333
b2e0d987 3334 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
9edff4ab
MS
3335 if (do_sysvsem) {
3336 /*
3337 * CLONE_SYSVSEM is equivalent to sys_exit().
3338 */
3339 exit_sem(current);
3340 }
ab602f79
JM
3341 if (unshare_flags & CLONE_NEWIPC) {
3342 /* Orphan segments in old ns (see sem above). */
3343 exit_shm(current);
3344 shm_init_task(current);
3345 }
ab516013 3346
6f977e6b 3347 if (new_nsproxy)
cf7b708c 3348 switch_task_namespaces(current, new_nsproxy);
cf2e340f 3349
cf7b708c
PE
3350 task_lock(current);
3351
cf2e340f
JD
3352 if (new_fs) {
3353 fs = current->fs;
2a4419b5 3354 spin_lock(&fs->lock);
cf2e340f 3355 current->fs = new_fs;
498052bb
AV
3356 if (--fs->users)
3357 new_fs = NULL;
3358 else
3359 new_fs = fs;
2a4419b5 3360 spin_unlock(&fs->lock);
cf2e340f
JD
3361 }
3362
ba1f70dd
RX
3363 if (new_fd)
3364 swap(current->files, new_fd);
cf2e340f
JD
3365
3366 task_unlock(current);
b2e0d987
EB
3367
3368 if (new_cred) {
3369 /* Install the new user namespace */
3370 commit_creds(new_cred);
3371 new_cred = NULL;
3372 }
cf2e340f
JD
3373 }
3374
e4222673
HB
3375 perf_event_namespaces(current);
3376
b2e0d987
EB
3377bad_unshare_cleanup_cred:
3378 if (new_cred)
3379 put_cred(new_cred);
cf2e340f
JD
3380bad_unshare_cleanup_fd:
3381 if (new_fd)
3382 put_files_struct(new_fd);
3383
cf2e340f
JD
3384bad_unshare_cleanup_fs:
3385 if (new_fs)
498052bb 3386 free_fs_struct(new_fs);
cf2e340f 3387
cf2e340f
JD
3388bad_unshare_out:
3389 return err;
3390}
3b125388 3391
9b32105e
DB
3392SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
3393{
3394 return ksys_unshare(unshare_flags);
3395}
3396
3b125388
AV
3397/*
3398 * Helper to unshare the files of the current task.
3399 * We don't want to expose copy_files internals to
3400 * the exec layer of the kernel.
3401 */
3402
1f702603 3403int unshare_files(void)
3b125388
AV
3404{
3405 struct task_struct *task = current;
1f702603 3406 struct files_struct *old, *copy = NULL;
3b125388
AV
3407 int error;
3408
60997c3d 3409 error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, &copy);
1f702603 3410 if (error || !copy)
3b125388 3411 return error;
1f702603
EB
3412
3413 old = task->files;
3b125388
AV
3414 task_lock(task);
3415 task->files = copy;
3416 task_unlock(task);
1f702603 3417 put_files_struct(old);
3b125388
AV
3418 return 0;
3419}
16db3d3f
HS
3420
3421int sysctl_max_threads(struct ctl_table *table, int write,
b0daa2c7 3422 void *buffer, size_t *lenp, loff_t *ppos)
16db3d3f
HS
3423{
3424 struct ctl_table t;
3425 int ret;
3426 int threads = max_threads;
b0f53dbc 3427 int min = 1;
16db3d3f
HS
3428 int max = MAX_THREADS;
3429
3430 t = *table;
3431 t.data = &threads;
3432 t.extra1 = &min;
3433 t.extra2 = &max;
3434
3435 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3436 if (ret || !write)
3437 return ret;
3438
b0f53dbc 3439 max_threads = threads;
16db3d3f
HS
3440
3441 return 0;
3442}