1 // SPDX-License-Identifier: GPL-2.0-only
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50 #include <linux/memfd.h>
52 #include <linux/uaccess.h>
53 #include <asm/cacheflush.h>
55 #include <asm/mmu_context.h>
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/mmap.h>
62 #ifndef arch_mmap_check
63 #define arch_mmap_check(addr, len, flags) (0)
66 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
67 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
68 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
69 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
71 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
72 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
73 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
74 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
77 static bool ignore_rlimit_data;
78 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
80 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
81 void vma_set_page_prot(struct vm_area_struct *vma)
83 unsigned long vm_flags = vma->vm_flags;
84 pgprot_t vm_page_prot;
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
87 if (vma_wants_writenotify(vma, vm_page_prot)) {
88 vm_flags &= ~VM_SHARED;
89 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
91 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
96 * check_brk_limits() - Use platform specific check of range & verify mlock
98 * @addr: The address to check
99 * @len: The size of increase.
101 * Return: 0 on success.
103 static int check_brk_limits(unsigned long addr, unsigned long len)
105 unsigned long mapped_addr;
107 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
108 if (IS_ERR_VALUE(mapped_addr))
111 return mlock_future_ok(current->mm, current->mm->def_flags, len)
115 SYSCALL_DEFINE1(brk, unsigned long, brk)
117 unsigned long newbrk, oldbrk, origbrk;
118 struct mm_struct *mm = current->mm;
119 struct vm_area_struct *brkvma, *next = NULL;
120 unsigned long min_brk;
121 bool populate = false;
123 struct vma_iterator vmi;
125 if (mmap_write_lock_killable(mm))
130 #ifdef CONFIG_COMPAT_BRK
132 * CONFIG_COMPAT_BRK can still be overridden by setting
133 * randomize_va_space to 2, which will still cause mm->start_brk
134 * to be arbitrarily shifted
136 if (current->brk_randomized)
137 min_brk = mm->start_brk;
139 min_brk = mm->end_data;
141 min_brk = mm->start_brk;
147 * Check against rlimit here. If this check is done later after the test
148 * of oldbrk with newbrk then it can escape the test and let the data
149 * segment grow beyond its set limit the in case where the limit is
150 * not page aligned -Ram Gupta
152 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
153 mm->end_data, mm->start_data))
156 newbrk = PAGE_ALIGN(brk);
157 oldbrk = PAGE_ALIGN(mm->brk);
158 if (oldbrk == newbrk) {
163 /* Always allow shrinking brk. */
164 if (brk <= mm->brk) {
165 /* Search one past newbrk */
166 vma_iter_init(&vmi, mm, newbrk);
167 brkvma = vma_find(&vmi, oldbrk);
168 if (!brkvma || brkvma->vm_start >= oldbrk)
169 goto out; /* mapping intersects with an existing non-brk vma. */
171 * mm->brk must be protected by write mmap_lock.
172 * do_vmi_align_munmap() will drop the lock on success, so
173 * update it before calling do_vma_munmap().
176 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
177 /* unlock = */ true))
180 goto success_unlocked;
183 if (check_brk_limits(oldbrk, newbrk - oldbrk))
187 * Only check if the next VMA is within the stack_guard_gap of the
190 vma_iter_init(&vmi, mm, oldbrk);
191 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
192 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
195 brkvma = vma_prev_limit(&vmi, mm->start_brk);
196 /* Ok, looks good - let it rip. */
197 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
201 if (mm->def_flags & VM_LOCKED)
205 mmap_write_unlock(mm);
207 userfaultfd_unmap_complete(mm, &uf);
209 mm_populate(oldbrk, newbrk - oldbrk);
214 mmap_write_unlock(mm);
219 * If a hint addr is less than mmap_min_addr change hint to be as
220 * low as possible but still greater than mmap_min_addr
222 static inline unsigned long round_hint_to_min(unsigned long hint)
225 if (((void *)hint != NULL) &&
226 (hint < mmap_min_addr))
227 return PAGE_ALIGN(mmap_min_addr);
231 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
234 unsigned long locked_pages, limit_pages;
236 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
239 locked_pages = bytes >> PAGE_SHIFT;
240 locked_pages += mm->locked_vm;
242 limit_pages = rlimit(RLIMIT_MEMLOCK);
243 limit_pages >>= PAGE_SHIFT;
245 return locked_pages <= limit_pages;
248 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
250 if (S_ISREG(inode->i_mode))
251 return MAX_LFS_FILESIZE;
253 if (S_ISBLK(inode->i_mode))
254 return MAX_LFS_FILESIZE;
256 if (S_ISSOCK(inode->i_mode))
257 return MAX_LFS_FILESIZE;
259 /* Special "we do even unsigned file positions" case */
260 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
263 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
267 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
268 unsigned long pgoff, unsigned long len)
270 u64 maxsize = file_mmap_size_max(file, inode);
272 if (maxsize && len > maxsize)
275 if (pgoff > maxsize >> PAGE_SHIFT)
281 * do_mmap() - Perform a userland memory mapping into the current process
282 * address space of length @len with protection bits @prot, mmap flags @flags
283 * (from which VMA flags will be inferred), and any additional VMA flags to
284 * apply @vm_flags. If this is a file-backed mapping then the file is specified
285 * in @file and page offset into the file via @pgoff.
287 * This function does not perform security checks on the file and assumes, if
288 * @uf is non-NULL, the caller has provided a list head to track unmap events
289 * for userfaultfd @uf.
291 * It also simply indicates whether memory population is required by setting
292 * @populate, which must be non-NULL, expecting the caller to actually perform
293 * this task itself if appropriate.
295 * This function will invoke architecture-specific (and if provided and
296 * relevant, file system-specific) logic to determine the most appropriate
297 * unmapped area in which to place the mapping if not MAP_FIXED.
299 * Callers which require userland mmap() behaviour should invoke vm_mmap(),
300 * which is also exported for module use.
302 * Those which require this behaviour less security checks, userfaultfd and
303 * populate behaviour, and who handle the mmap write lock themselves, should
304 * call this function.
306 * Note that the returned address may reside within a merged VMA if an
307 * appropriate merge were to take place, so it doesn't necessarily specify the
308 * start of a VMA, rather only the start of a valid mapped range of length
309 * @len bytes, rounded down to the nearest page size.
311 * The caller must write-lock current->mm->mmap_lock.
313 * @file: An optional struct file pointer describing the file which is to be
314 * mapped, if a file-backed mapping.
315 * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the
316 * address at which to perform this mapping. See mmap (2) for details. Must be
318 * @len: The length of the mapping. Will be page-aligned and must be at least 1
320 * @prot: Protection bits describing access required to the mapping. See mmap
322 * @flags: Flags specifying how the mapping should be performed, see mmap (2)
324 * @vm_flags: VMA flags which should be set by default, or 0 otherwise.
325 * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise.
326 * @populate: A pointer to a value which will be set to 0 if no population of
327 * the range is required, or the number of bytes to populate if it is. Must be
328 * non-NULL. See mmap (2) for details as to under what circumstances population
329 * of the range occurs.
330 * @uf: An optional pointer to a list head to track userfaultfd unmap events
331 * should unmapping events arise. If provided, it is up to the caller to manage
334 * Returns: Either an error, or the address at which the requested mapping has
337 unsigned long do_mmap(struct file *file, unsigned long addr,
338 unsigned long len, unsigned long prot,
339 unsigned long flags, vm_flags_t vm_flags,
340 unsigned long pgoff, unsigned long *populate,
341 struct list_head *uf)
343 struct mm_struct *mm = current->mm;
348 mmap_assert_write_locked(mm);
354 * Does the application expect PROT_READ to imply PROT_EXEC?
356 * (the exception is when the underlying filesystem is noexec
357 * mounted, in which case we don't add PROT_EXEC.)
359 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
360 if (!(file && path_noexec(&file->f_path)))
363 /* force arch specific MAP_FIXED handling in get_unmapped_area */
364 if (flags & MAP_FIXED_NOREPLACE)
367 if (!(flags & MAP_FIXED))
368 addr = round_hint_to_min(addr);
370 /* Careful about overflows.. */
371 len = PAGE_ALIGN(len);
375 /* offset overflow? */
376 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
379 /* Too many mappings? */
380 if (mm->map_count > sysctl_max_map_count)
384 * addr is returned from get_unmapped_area,
385 * There are two cases:
386 * 1> MAP_FIXED == false
387 * unallocated memory, no need to check sealing.
388 * 1> MAP_FIXED == true
389 * sealing is checked inside mmap_region when
390 * do_vmi_munmap is called.
393 if (prot == PROT_EXEC) {
394 pkey = execute_only_pkey(mm);
399 /* Do simple checking here so the lower-level routines won't have
400 * to. we assume access permissions have been handled by the open
401 * of the memory object, so we don't do any here.
403 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
404 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
406 /* Obtain the address to map to. we verify (or select) it and ensure
407 * that it represents a valid section of the address space.
409 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
410 if (IS_ERR_VALUE(addr))
413 if (flags & MAP_FIXED_NOREPLACE) {
414 if (find_vma_intersection(mm, addr, addr + len))
418 if (flags & MAP_LOCKED)
422 if (!mlock_future_ok(mm, vm_flags, len))
426 struct inode *inode = file_inode(file);
427 unsigned long flags_mask;
430 if (!file_mmap_ok(file, inode, pgoff, len))
433 flags_mask = LEGACY_MAP_MASK;
434 if (file->f_op->fop_flags & FOP_MMAP_SYNC)
435 flags_mask |= MAP_SYNC;
437 switch (flags & MAP_TYPE) {
440 * Force use of MAP_SHARED_VALIDATE with non-legacy
441 * flags. E.g. MAP_SYNC is dangerous to use with
442 * MAP_SHARED as you don't know which consistency model
443 * you will get. We silently ignore unsupported flags
444 * with MAP_SHARED to preserve backward compatibility.
446 flags &= LEGACY_MAP_MASK;
448 case MAP_SHARED_VALIDATE:
449 if (flags & ~flags_mask)
451 if (prot & PROT_WRITE) {
452 if (!(file->f_mode & FMODE_WRITE))
454 if (IS_SWAPFILE(file->f_mapping->host))
459 * Make sure we don't allow writing to an append-only
462 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
465 vm_flags |= VM_SHARED | VM_MAYSHARE;
466 if (!(file->f_mode & FMODE_WRITE))
467 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
470 if (!(file->f_mode & FMODE_READ))
472 if (path_noexec(&file->f_path)) {
473 if (vm_flags & VM_EXEC)
475 vm_flags &= ~VM_MAYEXEC;
478 if (!file_has_valid_mmap_hooks(file))
480 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
489 * Check to see if we are violating any seals and update VMA
490 * flags if necessary to avoid future seal violations.
492 err = memfd_check_seals_mmap(file, &vm_flags);
494 return (unsigned long)err;
496 switch (flags & MAP_TYPE) {
498 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
504 vm_flags |= VM_SHARED | VM_MAYSHARE;
507 if (VM_DROPPABLE == VM_NONE)
510 * A locked or stack area makes no sense to be droppable.
512 * Also, since droppable pages can just go away at any time
513 * it makes no sense to copy them on fork or dump them.
515 * And don't attempt to combine with hugetlb for now.
517 if (flags & (MAP_LOCKED | MAP_HUGETLB))
519 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
522 vm_flags |= VM_DROPPABLE;
525 * If the pages can be dropped, then it doesn't make
526 * sense to reserve them.
528 vm_flags |= VM_NORESERVE;
531 * Likewise, they're volatile enough that they
532 * shouldn't survive forks or coredumps.
534 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
538 * Set pgoff according to addr for anon_vma.
540 pgoff = addr >> PAGE_SHIFT;
548 * Set 'VM_NORESERVE' if we should not account for the
549 * memory use of this mapping.
551 if (flags & MAP_NORESERVE) {
552 /* We honor MAP_NORESERVE if allowed to overcommit */
553 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
554 vm_flags |= VM_NORESERVE;
556 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
557 if (file && is_file_hugepages(file))
558 vm_flags |= VM_NORESERVE;
561 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
562 if (!IS_ERR_VALUE(addr) &&
563 ((vm_flags & VM_LOCKED) ||
564 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
569 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
570 unsigned long prot, unsigned long flags,
571 unsigned long fd, unsigned long pgoff)
573 struct file *file = NULL;
574 unsigned long retval;
576 if (!(flags & MAP_ANONYMOUS)) {
577 audit_mmap_fd(fd, flags);
581 if (is_file_hugepages(file)) {
582 len = ALIGN(len, huge_page_size(hstate_file(file)));
583 } else if (unlikely(flags & MAP_HUGETLB)) {
587 } else if (flags & MAP_HUGETLB) {
590 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
594 len = ALIGN(len, huge_page_size(hs));
596 * VM_NORESERVE is used because the reservations will be
597 * taken when vm_ops->mmap() is called
599 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
601 HUGETLB_ANONHUGE_INODE,
602 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
604 return PTR_ERR(file);
607 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
614 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
615 unsigned long, prot, unsigned long, flags,
616 unsigned long, fd, unsigned long, pgoff)
618 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
621 #ifdef __ARCH_WANT_SYS_OLD_MMAP
622 struct mmap_arg_struct {
628 unsigned long offset;
631 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
633 struct mmap_arg_struct a;
635 if (copy_from_user(&a, arg, sizeof(a)))
637 if (offset_in_page(a.offset))
640 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
641 a.offset >> PAGE_SHIFT);
643 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
646 * Determine if the allocation needs to ensure that there is no
647 * existing mapping within it's guard gaps, for use as start_gap.
649 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
651 if (vm_flags & VM_SHADOW_STACK)
658 * Search for an unmapped address range.
660 * We are looking for a range that:
661 * - does not intersect with any VMA;
662 * - is contained within the [low_limit, high_limit) interval;
663 * - is at least the desired size.
664 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
666 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
670 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
671 addr = unmapped_area_topdown(info);
673 addr = unmapped_area(info);
675 trace_vm_unmapped_area(addr, info);
679 /* Get an address range which is currently unmapped.
680 * For shmat() with addr=0.
682 * Ugly calling convention alert:
683 * Return value with the low bits set means error value,
685 * if (ret & ~PAGE_MASK)
688 * This function "knows" that -ENOMEM has the bits set.
691 generic_get_unmapped_area(struct file *filp, unsigned long addr,
692 unsigned long len, unsigned long pgoff,
693 unsigned long flags, vm_flags_t vm_flags)
695 struct mm_struct *mm = current->mm;
696 struct vm_area_struct *vma, *prev;
697 struct vm_unmapped_area_info info = {};
698 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
700 if (len > mmap_end - mmap_min_addr)
703 if (flags & MAP_FIXED)
707 addr = PAGE_ALIGN(addr);
708 vma = find_vma_prev(mm, addr, &prev);
709 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
710 (!vma || addr + len <= vm_start_gap(vma)) &&
711 (!prev || addr >= vm_end_gap(prev)))
716 info.low_limit = mm->mmap_base;
717 info.high_limit = mmap_end;
718 info.start_gap = stack_guard_placement(vm_flags);
719 if (filp && is_file_hugepages(filp))
720 info.align_mask = huge_page_mask_align(filp);
721 return vm_unmapped_area(&info);
724 #ifndef HAVE_ARCH_UNMAPPED_AREA
726 arch_get_unmapped_area(struct file *filp, unsigned long addr,
727 unsigned long len, unsigned long pgoff,
728 unsigned long flags, vm_flags_t vm_flags)
730 return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
736 * This mmap-allocator allocates new areas top-down from below the
737 * stack's low limit (the base):
740 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
741 unsigned long len, unsigned long pgoff,
742 unsigned long flags, vm_flags_t vm_flags)
744 struct vm_area_struct *vma, *prev;
745 struct mm_struct *mm = current->mm;
746 struct vm_unmapped_area_info info = {};
747 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
749 /* requested length too big for entire address space */
750 if (len > mmap_end - mmap_min_addr)
753 if (flags & MAP_FIXED)
756 /* requesting a specific address */
758 addr = PAGE_ALIGN(addr);
759 vma = find_vma_prev(mm, addr, &prev);
760 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
761 (!vma || addr + len <= vm_start_gap(vma)) &&
762 (!prev || addr >= vm_end_gap(prev)))
766 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
768 info.low_limit = PAGE_SIZE;
769 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
770 info.start_gap = stack_guard_placement(vm_flags);
771 if (filp && is_file_hugepages(filp))
772 info.align_mask = huge_page_mask_align(filp);
773 addr = vm_unmapped_area(&info);
776 * A failed mmap() very likely causes application failure,
777 * so fall back to the bottom-up function here. This scenario
778 * can happen with large stack limits and large mmap()
781 if (offset_in_page(addr)) {
782 VM_BUG_ON(addr != -ENOMEM);
784 info.low_limit = TASK_UNMAPPED_BASE;
785 info.high_limit = mmap_end;
786 addr = vm_unmapped_area(&info);
792 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
794 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
795 unsigned long len, unsigned long pgoff,
796 unsigned long flags, vm_flags_t vm_flags)
798 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
803 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
804 unsigned long addr, unsigned long len,
805 unsigned long pgoff, unsigned long flags,
808 if (test_bit(MMF_TOPDOWN, &mm->flags))
809 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
811 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
815 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
816 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
818 unsigned long (*get_area)(struct file *, unsigned long,
819 unsigned long, unsigned long, unsigned long)
822 unsigned long error = arch_mmap_check(addr, len, flags);
826 /* Careful about overflows.. */
831 if (file->f_op->get_unmapped_area)
832 get_area = file->f_op->get_unmapped_area;
833 } else if (flags & MAP_SHARED) {
835 * mmap_region() will call shmem_zero_setup() to create a file,
836 * so use shmem's get_unmapped_area in case it can be huge.
838 get_area = shmem_get_unmapped_area;
841 /* Always treat pgoff as zero for anonymous memory. */
846 addr = get_area(file, addr, len, pgoff, flags);
847 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file
848 && !addr /* no hint */
849 && IS_ALIGNED(len, PMD_SIZE)) {
850 /* Ensures that larger anonymous mappings are THP aligned. */
851 addr = thp_get_unmapped_area_vmflags(file, addr, len,
852 pgoff, flags, vm_flags);
854 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
855 pgoff, flags, vm_flags);
857 if (IS_ERR_VALUE(addr))
860 if (addr > TASK_SIZE - len)
862 if (offset_in_page(addr))
865 error = security_mmap_addr(addr);
866 return error ? error : addr;
870 mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
871 unsigned long addr, unsigned long len,
872 unsigned long pgoff, unsigned long flags)
874 if (test_bit(MMF_TOPDOWN, &mm->flags))
875 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
876 return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
878 EXPORT_SYMBOL(mm_get_unmapped_area);
881 * find_vma_intersection() - Look up the first VMA which intersects the interval
882 * @mm: The process address space.
883 * @start_addr: The inclusive start user address.
884 * @end_addr: The exclusive end user address.
886 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
887 * start_addr < end_addr.
889 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
890 unsigned long start_addr,
891 unsigned long end_addr)
893 unsigned long index = start_addr;
895 mmap_assert_locked(mm);
896 return mt_find(&mm->mm_mt, &index, end_addr - 1);
898 EXPORT_SYMBOL(find_vma_intersection);
901 * find_vma() - Find the VMA for a given address, or the next VMA.
902 * @mm: The mm_struct to check
905 * Returns: The VMA associated with addr, or the next VMA.
906 * May return %NULL in the case of no VMA at addr or above.
908 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
910 unsigned long index = addr;
912 mmap_assert_locked(mm);
913 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
915 EXPORT_SYMBOL(find_vma);
918 * find_vma_prev() - Find the VMA for a given address, or the next vma and
919 * set %pprev to the previous VMA, if any.
920 * @mm: The mm_struct to check
922 * @pprev: The pointer to set to the previous VMA
924 * Note that RCU lock is missing here since the external mmap_lock() is used
927 * Returns: The VMA associated with @addr, or the next vma.
928 * May return %NULL in the case of no vma at addr or above.
930 struct vm_area_struct *
931 find_vma_prev(struct mm_struct *mm, unsigned long addr,
932 struct vm_area_struct **pprev)
934 struct vm_area_struct *vma;
935 VMA_ITERATOR(vmi, mm, addr);
937 vma = vma_iter_load(&vmi);
938 *pprev = vma_prev(&vmi);
940 vma = vma_next(&vmi);
944 /* enforced gap between the expanding stack and other mappings. */
945 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
947 static int __init cmdline_parse_stack_guard_gap(char *p)
952 val = simple_strtoul(p, &endptr, 10);
954 stack_guard_gap = val << PAGE_SHIFT;
958 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
960 #ifdef CONFIG_STACK_GROWSUP
961 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
963 return expand_upwards(vma, address);
966 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
968 struct vm_area_struct *vma, *prev;
971 vma = find_vma_prev(mm, addr, &prev);
972 if (vma && (vma->vm_start <= addr))
976 if (expand_stack_locked(prev, addr))
978 if (prev->vm_flags & VM_LOCKED)
979 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
983 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
985 return expand_downwards(vma, address);
988 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
990 struct vm_area_struct *vma;
994 vma = find_vma(mm, addr);
997 if (vma->vm_start <= addr)
999 start = vma->vm_start;
1000 if (expand_stack_locked(vma, addr))
1002 if (vma->vm_flags & VM_LOCKED)
1003 populate_vma_page_range(vma, addr, start, NULL);
1008 #if defined(CONFIG_STACK_GROWSUP)
1010 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1011 #define vma_expand_down(vma, addr) (-EFAULT)
1015 #define vma_expand_up(vma,addr) (-EFAULT)
1016 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1021 * expand_stack(): legacy interface for page faulting. Don't use unless
1024 * This is called with the mm locked for reading, drops the lock, takes
1025 * the lock for writing, tries to look up a vma again, expands it if
1026 * necessary, and downgrades the lock to reading again.
1028 * If no vma is found or it can't be expanded, it returns NULL and has
1031 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1033 struct vm_area_struct *vma, *prev;
1035 mmap_read_unlock(mm);
1036 if (mmap_write_lock_killable(mm))
1039 vma = find_vma_prev(mm, addr, &prev);
1040 if (vma && vma->vm_start <= addr)
1043 if (prev && !vma_expand_up(prev, addr)) {
1048 if (vma && !vma_expand_down(vma, addr))
1051 mmap_write_unlock(mm);
1055 mmap_write_downgrade(mm);
1059 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1060 * @mm: The mm_struct
1061 * @start: The start address to munmap
1062 * @len: The length to be munmapped.
1063 * @uf: The userfaultfd list_head
1065 * Return: 0 on success, error otherwise.
1067 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1068 struct list_head *uf)
1070 VMA_ITERATOR(vmi, mm, start);
1072 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1075 int vm_munmap(unsigned long start, size_t len)
1077 return __vm_munmap(start, len, false);
1079 EXPORT_SYMBOL(vm_munmap);
1081 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1083 addr = untagged_addr(addr);
1084 return __vm_munmap(addr, len, true);
1089 * Emulation of deprecated remap_file_pages() syscall.
1091 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1092 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1095 struct mm_struct *mm = current->mm;
1096 struct vm_area_struct *vma;
1097 unsigned long populate = 0;
1098 unsigned long ret = -EINVAL;
1100 vm_flags_t vm_flags;
1102 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1103 current->comm, current->pid);
1107 start = start & PAGE_MASK;
1108 size = size & PAGE_MASK;
1110 if (start + size <= start)
1113 /* Does pgoff wrap? */
1114 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1117 if (mmap_read_lock_killable(mm))
1121 * Look up VMA under read lock first so we can perform the security
1122 * without holding locks (which can be problematic). We reacquire a
1123 * write lock later and check nothing changed underneath us.
1125 vma = vma_lookup(mm, start);
1127 if (!vma || !(vma->vm_flags & VM_SHARED)) {
1128 mmap_read_unlock(mm);
1132 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1133 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1134 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1136 flags &= MAP_NONBLOCK;
1137 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1138 if (vma->vm_flags & VM_LOCKED)
1139 flags |= MAP_LOCKED;
1141 /* Save vm_flags used to calculate prot and flags, and recheck later. */
1142 vm_flags = vma->vm_flags;
1143 file = get_file(vma->vm_file);
1145 mmap_read_unlock(mm);
1147 /* Call outside mmap_lock to be consistent with other callers. */
1148 ret = security_mmap_file(file, prot, flags);
1156 /* OK security check passed, take write lock + let it rip. */
1157 if (mmap_write_lock_killable(mm)) {
1162 vma = vma_lookup(mm, start);
1167 /* Make sure things didn't change under us. */
1168 if (vma->vm_flags != vm_flags)
1170 if (vma->vm_file != file)
1173 if (start + size > vma->vm_end) {
1174 VMA_ITERATOR(vmi, mm, vma->vm_end);
1175 struct vm_area_struct *next, *prev = vma;
1177 for_each_vma_range(vmi, next, start + size) {
1178 /* hole between vmas ? */
1179 if (next->vm_start != prev->vm_end)
1182 if (next->vm_file != vma->vm_file)
1185 if (next->vm_flags != vma->vm_flags)
1188 if (start + size <= next->vm_end)
1198 ret = do_mmap(vma->vm_file, start, size,
1199 prot, flags, 0, pgoff, &populate, NULL);
1201 mmap_write_unlock(mm);
1204 mm_populate(ret, populate);
1205 if (!IS_ERR_VALUE(ret))
1210 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
1212 struct mm_struct *mm = current->mm;
1213 struct vm_area_struct *vma = NULL;
1218 VMA_ITERATOR(vmi, mm, addr);
1220 len = PAGE_ALIGN(request);
1226 /* Until we need other flags, refuse anything except VM_EXEC. */
1227 if ((flags & (~VM_EXEC)) != 0)
1230 if (mmap_write_lock_killable(mm))
1233 ret = check_brk_limits(addr, len);
1237 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1241 vma = vma_prev(&vmi);
1242 ret = do_brk_flags(&vmi, vma, addr, len, flags);
1243 populate = ((mm->def_flags & VM_LOCKED) != 0);
1244 mmap_write_unlock(mm);
1245 userfaultfd_unmap_complete(mm, &uf);
1246 if (populate && !ret)
1247 mm_populate(addr, len);
1252 mmap_write_unlock(mm);
1255 EXPORT_SYMBOL(vm_brk_flags);
1257 /* Release all mmaps. */
1258 void exit_mmap(struct mm_struct *mm)
1260 struct mmu_gather tlb;
1261 struct vm_area_struct *vma;
1262 unsigned long nr_accounted = 0;
1263 VMA_ITERATOR(vmi, mm, 0);
1266 /* mm's last user has gone, and its about to be pulled down */
1267 mmu_notifier_release(mm);
1272 vma = vma_next(&vmi);
1273 if (!vma || unlikely(xa_is_zero(vma))) {
1274 /* Can happen if dup_mmap() received an OOM */
1275 mmap_read_unlock(mm);
1276 mmap_write_lock(mm);
1281 tlb_gather_mmu_fullmm(&tlb, mm);
1282 /* update_hiwater_rss(mm) here? but nobody should be looking */
1283 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1284 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1285 mmap_read_unlock(mm);
1288 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1289 * because the memory has been already freed.
1291 set_bit(MMF_OOM_SKIP, &mm->flags);
1292 mmap_write_lock(mm);
1293 mt_clear_in_rcu(&mm->mm_mt);
1294 vma_iter_set(&vmi, vma->vm_end);
1295 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1296 USER_PGTABLES_CEILING, true);
1297 tlb_finish_mmu(&tlb);
1300 * Walk the list again, actually closing and freeing it, with preemption
1301 * enabled, without holding any MM locks besides the unreachable
1304 vma_iter_set(&vmi, vma->vm_end);
1306 if (vma->vm_flags & VM_ACCOUNT)
1307 nr_accounted += vma_pages(vma);
1308 vma_mark_detached(vma);
1312 vma = vma_next(&vmi);
1313 } while (vma && likely(!xa_is_zero(vma)));
1315 BUG_ON(count != mm->map_count);
1317 trace_exit_mmap(mm);
1319 __mt_destroy(&mm->mm_mt);
1320 mmap_write_unlock(mm);
1321 vm_unacct_memory(nr_accounted);
1325 * Return true if the calling process may expand its vm space by the passed
1328 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
1330 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
1333 if (is_data_mapping(flags) &&
1334 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
1335 /* Workaround for Valgrind */
1336 if (rlimit(RLIMIT_DATA) == 0 &&
1337 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
1340 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
1341 current->comm, current->pid,
1342 (mm->data_vm + npages) << PAGE_SHIFT,
1343 rlimit(RLIMIT_DATA),
1344 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
1346 if (!ignore_rlimit_data)
1353 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
1355 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
1357 if (is_exec_mapping(flags))
1358 mm->exec_vm += npages;
1359 else if (is_stack_mapping(flags))
1360 mm->stack_vm += npages;
1361 else if (is_data_mapping(flags))
1362 mm->data_vm += npages;
1365 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
1368 * Close hook, called for unmap() and on the old vma for mremap().
1370 * Having a close hook prevents vma merging regardless of flags.
1372 static void special_mapping_close(struct vm_area_struct *vma)
1374 const struct vm_special_mapping *sm = vma->vm_private_data;
1380 static const char *special_mapping_name(struct vm_area_struct *vma)
1382 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
1385 static int special_mapping_mremap(struct vm_area_struct *new_vma)
1387 struct vm_special_mapping *sm = new_vma->vm_private_data;
1389 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
1393 return sm->mremap(sm, new_vma);
1398 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
1401 * Forbid splitting special mappings - kernel has expectations over
1402 * the number of pages in mapping. Together with VM_DONTEXPAND
1403 * the size of vma should stay the same over the special mapping's
1409 static const struct vm_operations_struct special_mapping_vmops = {
1410 .close = special_mapping_close,
1411 .fault = special_mapping_fault,
1412 .mremap = special_mapping_mremap,
1413 .name = special_mapping_name,
1414 /* vDSO code relies that VVAR can't be accessed remotely */
1416 .may_split = special_mapping_split,
1419 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
1421 struct vm_area_struct *vma = vmf->vma;
1423 struct page **pages;
1424 struct vm_special_mapping *sm = vma->vm_private_data;
1427 return sm->fault(sm, vmf->vma, vmf);
1431 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
1435 struct page *page = *pages;
1441 return VM_FAULT_SIGBUS;
1444 static struct vm_area_struct *__install_special_mapping(
1445 struct mm_struct *mm,
1446 unsigned long addr, unsigned long len,
1447 unsigned long vm_flags, void *priv,
1448 const struct vm_operations_struct *ops)
1451 struct vm_area_struct *vma;
1453 vma = vm_area_alloc(mm);
1454 if (unlikely(vma == NULL))
1455 return ERR_PTR(-ENOMEM);
1457 vma_set_range(vma, addr, addr + len, 0);
1458 vm_flags_init(vma, (vm_flags | mm->def_flags |
1459 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
1460 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1463 vma->vm_private_data = priv;
1465 ret = insert_vm_struct(mm, vma);
1469 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
1471 perf_event_mmap(vma);
1477 return ERR_PTR(ret);
1480 bool vma_is_special_mapping(const struct vm_area_struct *vma,
1481 const struct vm_special_mapping *sm)
1483 return vma->vm_private_data == sm &&
1484 vma->vm_ops == &special_mapping_vmops;
1488 * Called with mm->mmap_lock held for writing.
1489 * Insert a new vma covering the given region, with the given flags.
1490 * Its pages are supplied by the given array of struct page *.
1491 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
1492 * The region past the last page supplied will always produce SIGBUS.
1493 * The array pointer and the pages it points to are assumed to stay alive
1494 * for as long as this mapping might exist.
1496 struct vm_area_struct *_install_special_mapping(
1497 struct mm_struct *mm,
1498 unsigned long addr, unsigned long len,
1499 unsigned long vm_flags, const struct vm_special_mapping *spec)
1501 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
1502 &special_mapping_vmops);
1505 #ifdef CONFIG_SYSCTL
1506 #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
1507 defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
1508 int sysctl_legacy_va_layout;
1511 static const struct ctl_table mmap_table[] = {
1513 .procname = "max_map_count",
1514 .data = &sysctl_max_map_count,
1515 .maxlen = sizeof(sysctl_max_map_count),
1517 .proc_handler = proc_dointvec_minmax,
1518 .extra1 = SYSCTL_ZERO,
1520 #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \
1521 defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT)
1523 .procname = "legacy_va_layout",
1524 .data = &sysctl_legacy_va_layout,
1525 .maxlen = sizeof(sysctl_legacy_va_layout),
1527 .proc_handler = proc_dointvec_minmax,
1528 .extra1 = SYSCTL_ZERO,
1531 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
1533 .procname = "mmap_rnd_bits",
1534 .data = &mmap_rnd_bits,
1535 .maxlen = sizeof(mmap_rnd_bits),
1537 .proc_handler = proc_dointvec_minmax,
1538 .extra1 = (void *)&mmap_rnd_bits_min,
1539 .extra2 = (void *)&mmap_rnd_bits_max,
1542 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
1544 .procname = "mmap_rnd_compat_bits",
1545 .data = &mmap_rnd_compat_bits,
1546 .maxlen = sizeof(mmap_rnd_compat_bits),
1548 .proc_handler = proc_dointvec_minmax,
1549 .extra1 = (void *)&mmap_rnd_compat_bits_min,
1550 .extra2 = (void *)&mmap_rnd_compat_bits_max,
1554 #endif /* CONFIG_SYSCTL */
1557 * initialise the percpu counter for VM, initialise VMA state.
1559 void __init mmap_init(void)
1563 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
1565 #ifdef CONFIG_SYSCTL
1566 register_sysctl_init("vm", mmap_table);
1572 * Initialise sysctl_user_reserve_kbytes.
1574 * This is intended to prevent a user from starting a single memory hogging
1575 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1578 * The default value is min(3% of free memory, 128MB)
1579 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1581 static int init_user_reserve(void)
1583 unsigned long free_kbytes;
1585 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1587 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
1590 subsys_initcall(init_user_reserve);
1593 * Initialise sysctl_admin_reserve_kbytes.
1595 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1596 * to log in and kill a memory hogging process.
1598 * Systems with more than 256MB will reserve 8MB, enough to recover
1599 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1600 * only reserve 3% of free pages by default.
1602 static int init_admin_reserve(void)
1604 unsigned long free_kbytes;
1606 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1608 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
1611 subsys_initcall(init_admin_reserve);
1614 * Reinititalise user and admin reserves if memory is added or removed.
1616 * The default user reserve max is 128MB, and the default max for the
1617 * admin reserve is 8MB. These are usually, but not always, enough to
1618 * enable recovery from a memory hogging process using login/sshd, a shell,
1619 * and tools like top. It may make sense to increase or even disable the
1620 * reserve depending on the existence of swap or variations in the recovery
1621 * tools. So, the admin may have changed them.
1623 * If memory is added and the reserves have been eliminated or increased above
1624 * the default max, then we'll trust the admin.
1626 * If memory is removed and there isn't enough free memory, then we
1627 * need to reset the reserves.
1629 * Otherwise keep the reserve set by the admin.
1631 static int reserve_mem_notifier(struct notifier_block *nb,
1632 unsigned long action, void *data)
1634 unsigned long tmp, free_kbytes;
1638 /* Default max is 128MB. Leave alone if modified by operator. */
1639 tmp = sysctl_user_reserve_kbytes;
1640 if (tmp > 0 && tmp < SZ_128K)
1641 init_user_reserve();
1643 /* Default max is 8MB. Leave alone if modified by operator. */
1644 tmp = sysctl_admin_reserve_kbytes;
1645 if (tmp > 0 && tmp < SZ_8K)
1646 init_admin_reserve();
1650 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1652 if (sysctl_user_reserve_kbytes > free_kbytes) {
1653 init_user_reserve();
1654 pr_info("vm.user_reserve_kbytes reset to %lu\n",
1655 sysctl_user_reserve_kbytes);
1658 if (sysctl_admin_reserve_kbytes > free_kbytes) {
1659 init_admin_reserve();
1660 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
1661 sysctl_admin_reserve_kbytes);
1670 static int __meminit init_reserve_notifier(void)
1672 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
1673 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
1677 subsys_initcall(init_reserve_notifier);
1680 * Obtain a read lock on mm->mmap_lock, if the specified address is below the
1681 * start of the VMA, the intent is to perform a write, and it is a
1682 * downward-growing stack, then attempt to expand the stack to contain it.
1684 * This function is intended only for obtaining an argument page from an ELF
1685 * image, and is almost certainly NOT what you want to use for any other
1688 * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the
1689 * VMA referenced must not be linked in any user-visible tree, i.e. it must be a
1690 * new VMA being mapped.
1692 * The function assumes that addr is either contained within the VMA or below
1693 * it, and makes no attempt to validate this value beyond that.
1695 * Returns true if the read lock was obtained and a stack was perhaps expanded,
1696 * false if the stack expansion failed.
1698 * On stack expansion the function temporarily acquires an mmap write lock
1699 * before downgrading it.
1701 bool mmap_read_lock_maybe_expand(struct mm_struct *mm,
1702 struct vm_area_struct *new_vma,
1703 unsigned long addr, bool write)
1705 if (!write || addr >= new_vma->vm_start) {
1710 if (!(new_vma->vm_flags & VM_GROWSDOWN))
1713 mmap_write_lock(mm);
1714 if (expand_downwards(new_vma, addr)) {
1715 mmap_write_unlock(mm);
1719 mmap_write_downgrade(mm);
1723 __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
1725 struct vm_area_struct *mpnt, *tmp;
1727 unsigned long charge = 0;
1729 VMA_ITERATOR(vmi, mm, 0);
1731 if (mmap_write_lock_killable(oldmm))
1733 flush_cache_dup_mm(oldmm);
1734 uprobe_dup_mmap(oldmm, mm);
1736 * Not linked in yet - no deadlock potential:
1738 mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
1740 /* No ordering required: file already has been exposed. */
1741 dup_mm_exe_file(mm, oldmm);
1743 mm->total_vm = oldmm->total_vm;
1744 mm->data_vm = oldmm->data_vm;
1745 mm->exec_vm = oldmm->exec_vm;
1746 mm->stack_vm = oldmm->stack_vm;
1748 /* Use __mt_dup() to efficiently build an identical maple tree. */
1749 retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL);
1750 if (unlikely(retval))
1753 mt_clear_in_rcu(vmi.mas.tree);
1754 for_each_vma(vmi, mpnt) {
1757 vma_start_write(mpnt);
1758 if (mpnt->vm_flags & VM_DONTCOPY) {
1759 retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start,
1760 mpnt->vm_end, GFP_KERNEL);
1764 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
1769 * Don't duplicate many vmas if we've been oom-killed (for
1772 if (fatal_signal_pending(current)) {
1776 if (mpnt->vm_flags & VM_ACCOUNT) {
1777 unsigned long len = vma_pages(mpnt);
1779 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
1784 tmp = vm_area_dup(mpnt);
1787 retval = vma_dup_policy(mpnt, tmp);
1789 goto fail_nomem_policy;
1791 retval = dup_userfaultfd(tmp, &uf);
1793 goto fail_nomem_anon_vma_fork;
1794 if (tmp->vm_flags & VM_WIPEONFORK) {
1796 * VM_WIPEONFORK gets a clean slate in the child.
1797 * Don't prepare anon_vma until fault since we don't
1798 * copy page for current vma.
1800 tmp->anon_vma = NULL;
1801 } else if (anon_vma_fork(tmp, mpnt))
1802 goto fail_nomem_anon_vma_fork;
1803 vm_flags_clear(tmp, VM_LOCKED_MASK);
1805 * Copy/update hugetlb private vma information.
1807 if (is_vm_hugetlb_page(tmp))
1808 hugetlb_dup_vma_private(tmp);
1811 * Link the vma into the MT. After using __mt_dup(), memory
1812 * allocation is not necessary here, so it cannot fail.
1814 vma_iter_bulk_store(&vmi, tmp);
1818 if (tmp->vm_ops && tmp->vm_ops->open)
1819 tmp->vm_ops->open(tmp);
1821 file = tmp->vm_file;
1823 struct address_space *mapping = file->f_mapping;
1826 i_mmap_lock_write(mapping);
1827 if (vma_is_shared_maywrite(tmp))
1828 mapping_allow_writable(mapping);
1829 flush_dcache_mmap_lock(mapping);
1830 /* insert tmp into the share list, just after mpnt */
1831 vma_interval_tree_insert_after(tmp, mpnt,
1833 flush_dcache_mmap_unlock(mapping);
1834 i_mmap_unlock_write(mapping);
1837 if (!(tmp->vm_flags & VM_WIPEONFORK))
1838 retval = copy_page_range(tmp, mpnt);
1841 mpnt = vma_next(&vmi);
1845 /* a new mm has just been created */
1846 retval = arch_dup_mmap(oldmm, mm);
1848 vma_iter_free(&vmi);
1850 mt_set_in_rcu(vmi.mas.tree);
1851 ksm_fork(mm, oldmm);
1852 khugepaged_fork(mm, oldmm);
1856 * The entire maple tree has already been duplicated. If the
1857 * mmap duplication fails, mark the failure point with
1858 * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered,
1859 * stop releasing VMAs that have not been duplicated after this
1863 mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1);
1864 mas_store(&vmi.mas, XA_ZERO_ENTRY);
1865 /* Avoid OOM iterating a broken tree */
1866 set_bit(MMF_OOM_SKIP, &mm->flags);
1869 * The mm_struct is going to exit, but the locks will be dropped
1870 * first. Set the mm_struct as unstable is advisable as it is
1871 * not fully initialised.
1873 set_bit(MMF_UNSTABLE, &mm->flags);
1876 mmap_write_unlock(mm);
1877 flush_tlb_mm(oldmm);
1878 mmap_write_unlock(oldmm);
1880 dup_userfaultfd_complete(&uf);
1882 dup_userfaultfd_fail(&uf);
1885 fail_nomem_anon_vma_fork:
1886 mpol_put(vma_policy(tmp));
1891 vm_unacct_memory(charge);