Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * mm/mmap.c | |
4 | * | |
5 | * Written by obz. | |
6 | * | |
046c6884 | 7 | * Address space accounting code <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
8 | */ |
9 | ||
b1de0d13 MH |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
e8420a8e | 12 | #include <linux/kernel.h> |
1da177e4 | 13 | #include <linux/slab.h> |
4af3c9cc | 14 | #include <linux/backing-dev.h> |
1da177e4 | 15 | #include <linux/mm.h> |
17fca131 | 16 | #include <linux/mm_inline.h> |
1da177e4 LT |
17 | #include <linux/shm.h> |
18 | #include <linux/mman.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/syscalls.h> | |
c59ede7b | 22 | #include <linux/capability.h> |
1da177e4 LT |
23 | #include <linux/init.h> |
24 | #include <linux/file.h> | |
25 | #include <linux/fs.h> | |
26 | #include <linux/personality.h> | |
27 | #include <linux/security.h> | |
28 | #include <linux/hugetlb.h> | |
c01d5b30 | 29 | #include <linux/shmem_fs.h> |
1da177e4 | 30 | #include <linux/profile.h> |
b95f1b31 | 31 | #include <linux/export.h> |
1da177e4 LT |
32 | #include <linux/mount.h> |
33 | #include <linux/mempolicy.h> | |
34 | #include <linux/rmap.h> | |
cddb8a5c | 35 | #include <linux/mmu_notifier.h> |
82f71ae4 | 36 | #include <linux/mmdebug.h> |
cdd6c482 | 37 | #include <linux/perf_event.h> |
120a795d | 38 | #include <linux/audit.h> |
b15d00b6 | 39 | #include <linux/khugepaged.h> |
2b144498 | 40 | #include <linux/uprobes.h> |
1640879a AS |
41 | #include <linux/notifier.h> |
42 | #include <linux/memory.h> | |
b1de0d13 | 43 | #include <linux/printk.h> |
19a809af | 44 | #include <linux/userfaultfd_k.h> |
d977d56c | 45 | #include <linux/moduleparam.h> |
62b5f7d0 | 46 | #include <linux/pkeys.h> |
21292580 | 47 | #include <linux/oom.h> |
04f5866e | 48 | #include <linux/sched/mm.h> |
d7597f59 | 49 | #include <linux/ksm.h> |
8ec396d0 | 50 | #include <linux/memfd.h> |
1da177e4 | 51 | |
7c0f6ba6 | 52 | #include <linux/uaccess.h> |
1da177e4 LT |
53 | #include <asm/cacheflush.h> |
54 | #include <asm/tlb.h> | |
d6dd61c8 | 55 | #include <asm/mmu_context.h> |
1da177e4 | 56 | |
df529cab JK |
57 | #define CREATE_TRACE_POINTS |
58 | #include <trace/events/mmap.h> | |
59 | ||
42b77728 JB |
60 | #include "internal.h" |
61 | ||
3a459756 KK |
62 | #ifndef arch_mmap_check |
63 | #define arch_mmap_check(addr, len, flags) (0) | |
64 | #endif | |
65 | ||
d07e2259 DC |
66 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS |
67 | const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN; | |
71a5849a | 68 | int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX; |
d07e2259 DC |
69 | int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS; |
70 | #endif | |
71 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | |
72 | const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN; | |
73 | const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; | |
74 | int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; | |
75 | #endif | |
76 | ||
f4fcd558 | 77 | static bool ignore_rlimit_data; |
d977d56c | 78 | core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); |
d07e2259 | 79 | |
64e45507 PF |
80 | /* Update vma->vm_page_prot to reflect vma->vm_flags. */ |
81 | void vma_set_page_prot(struct vm_area_struct *vma) | |
82 | { | |
bfbe7110 | 83 | vm_flags_t vm_flags = vma->vm_flags; |
6d2329f8 | 84 | pgprot_t vm_page_prot; |
64e45507 | 85 | |
6d2329f8 AA |
86 | vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); |
87 | if (vma_wants_writenotify(vma, vm_page_prot)) { | |
64e45507 | 88 | vm_flags &= ~VM_SHARED; |
6d2329f8 | 89 | vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags); |
64e45507 | 90 | } |
c1e8d7c6 | 91 | /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ |
6d2329f8 | 92 | WRITE_ONCE(vma->vm_page_prot, vm_page_prot); |
64e45507 PF |
93 | } |
94 | ||
2e7ce7d3 LH |
95 | /* |
96 | * check_brk_limits() - Use platform specific check of range & verify mlock | |
97 | * limits. | |
98 | * @addr: The address to check | |
99 | * @len: The size of increase. | |
100 | * | |
101 | * Return: 0 on success. | |
102 | */ | |
103 | static int check_brk_limits(unsigned long addr, unsigned long len) | |
104 | { | |
105 | unsigned long mapped_addr; | |
106 | ||
107 | mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); | |
108 | if (IS_ERR_VALUE(mapped_addr)) | |
109 | return mapped_addr; | |
110 | ||
b0cc5e89 | 111 | return mlock_future_ok(current->mm, current->mm->def_flags, len) |
3c54a298 | 112 | ? 0 : -EAGAIN; |
2e7ce7d3 | 113 | } |
7d344bab | 114 | |
6a6160a7 | 115 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
1da177e4 | 116 | { |
9bc8039e | 117 | unsigned long newbrk, oldbrk, origbrk; |
1da177e4 | 118 | struct mm_struct *mm = current->mm; |
2e7ce7d3 | 119 | struct vm_area_struct *brkvma, *next = NULL; |
a5b4592c | 120 | unsigned long min_brk; |
408579cd | 121 | bool populate = false; |
897ab3e0 | 122 | LIST_HEAD(uf); |
92fed820 | 123 | struct vma_iterator vmi; |
1da177e4 | 124 | |
d8ed45c5 | 125 | if (mmap_write_lock_killable(mm)) |
dc0ef0df | 126 | return -EINTR; |
1da177e4 | 127 | |
9bc8039e YS |
128 | origbrk = mm->brk; |
129 | ||
e001ef96 | 130 | min_brk = mm->start_brk; |
a5b4592c | 131 | #ifdef CONFIG_COMPAT_BRK |
5520e894 JK |
132 | /* |
133 | * CONFIG_COMPAT_BRK can still be overridden by setting | |
134 | * randomize_va_space to 2, which will still cause mm->start_brk | |
135 | * to be arbitrarily shifted | |
136 | */ | |
e001ef96 | 137 | if (!current->brk_randomized) |
5520e894 | 138 | min_brk = mm->end_data; |
a5b4592c JK |
139 | #endif |
140 | if (brk < min_brk) | |
1da177e4 | 141 | goto out; |
1e624196 RG |
142 | |
143 | /* | |
144 | * Check against rlimit here. If this check is done later after the test | |
145 | * of oldbrk with newbrk then it can escape the test and let the data | |
146 | * segment grow beyond its set limit the in case where the limit is | |
147 | * not page aligned -Ram Gupta | |
148 | */ | |
8764b338 CG |
149 | if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, |
150 | mm->end_data, mm->start_data)) | |
1e624196 RG |
151 | goto out; |
152 | ||
1da177e4 LT |
153 | newbrk = PAGE_ALIGN(brk); |
154 | oldbrk = PAGE_ALIGN(mm->brk); | |
9bc8039e YS |
155 | if (oldbrk == newbrk) { |
156 | mm->brk = brk; | |
157 | goto success; | |
158 | } | |
1da177e4 | 159 | |
408579cd | 160 | /* Always allow shrinking brk. */ |
1da177e4 | 161 | if (brk <= mm->brk) { |
2e7ce7d3 | 162 | /* Search one past newbrk */ |
92fed820 LH |
163 | vma_iter_init(&vmi, mm, newbrk); |
164 | brkvma = vma_find(&vmi, oldbrk); | |
f5ad5083 | 165 | if (!brkvma || brkvma->vm_start >= oldbrk) |
2e7ce7d3 | 166 | goto out; /* mapping intersects with an existing non-brk vma. */ |
9bc8039e | 167 | /* |
2e7ce7d3 | 168 | * mm->brk must be protected by write mmap_lock. |
63fc66f5 LH |
169 | * do_vmi_align_munmap() will drop the lock on success, so |
170 | * update it before calling do_vma_munmap(). | |
9bc8039e YS |
171 | */ |
172 | mm->brk = brk; | |
63fc66f5 LH |
173 | if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf, |
174 | /* unlock = */ true)) | |
408579cd LH |
175 | goto out; |
176 | ||
177 | goto success_unlocked; | |
1da177e4 LT |
178 | } |
179 | ||
2e7ce7d3 LH |
180 | if (check_brk_limits(oldbrk, newbrk - oldbrk)) |
181 | goto out; | |
182 | ||
183 | /* | |
184 | * Only check if the next VMA is within the stack_guard_gap of the | |
185 | * expansion area | |
186 | */ | |
92fed820 LH |
187 | vma_iter_init(&vmi, mm, oldbrk); |
188 | next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap); | |
1be7107f | 189 | if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) |
1da177e4 LT |
190 | goto out; |
191 | ||
92fed820 | 192 | brkvma = vma_prev_limit(&vmi, mm->start_brk); |
1da177e4 | 193 | /* Ok, looks good - let it rip. */ |
92fed820 | 194 | if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0) |
1da177e4 | 195 | goto out; |
2e7ce7d3 | 196 | |
49b1b8d6 LS |
197 | mm->brk = brk; |
198 | if (mm->def_flags & VM_LOCKED) | |
199 | populate = true; | |
a67c8caa | 200 | |
49b1b8d6 LS |
201 | success: |
202 | mmap_write_unlock(mm); | |
203 | success_unlocked: | |
204 | userfaultfd_unmap_complete(mm, &uf); | |
205 | if (populate) | |
206 | mm_populate(oldbrk, newbrk - oldbrk); | |
207 | return brk; | |
a67c8caa | 208 | |
49b1b8d6 LS |
209 | out: |
210 | mm->brk = origbrk; | |
211 | mmap_write_unlock(mm); | |
212 | return origbrk; | |
1da177e4 LT |
213 | } |
214 | ||
40401530 AV |
215 | /* |
216 | * If a hint addr is less than mmap_min_addr change hint to be as | |
217 | * low as possible but still greater than mmap_min_addr | |
218 | */ | |
219 | static inline unsigned long round_hint_to_min(unsigned long hint) | |
220 | { | |
221 | hint &= PAGE_MASK; | |
222 | if (((void *)hint != NULL) && | |
223 | (hint < mmap_min_addr)) | |
224 | return PAGE_ALIGN(mmap_min_addr); | |
225 | return hint; | |
226 | } | |
227 | ||
bfbe7110 | 228 | bool mlock_future_ok(struct mm_struct *mm, vm_flags_t vm_flags, |
3c54a298 | 229 | unsigned long bytes) |
363ee17f | 230 | { |
3c54a298 | 231 | unsigned long locked_pages, limit_pages; |
363ee17f | 232 | |
bfbe7110 | 233 | if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) |
3c54a298 LS |
234 | return true; |
235 | ||
236 | locked_pages = bytes >> PAGE_SHIFT; | |
237 | locked_pages += mm->locked_vm; | |
238 | ||
239 | limit_pages = rlimit(RLIMIT_MEMLOCK); | |
240 | limit_pages >>= PAGE_SHIFT; | |
241 | ||
242 | return locked_pages <= limit_pages; | |
363ee17f DB |
243 | } |
244 | ||
be83bbf8 LT |
245 | static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) |
246 | { | |
247 | if (S_ISREG(inode->i_mode)) | |
423913ad | 248 | return MAX_LFS_FILESIZE; |
be83bbf8 LT |
249 | |
250 | if (S_ISBLK(inode->i_mode)) | |
251 | return MAX_LFS_FILESIZE; | |
252 | ||
76f34950 IK |
253 | if (S_ISSOCK(inode->i_mode)) |
254 | return MAX_LFS_FILESIZE; | |
255 | ||
be83bbf8 | 256 | /* Special "we do even unsigned file positions" case */ |
641bb439 | 257 | if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET) |
be83bbf8 LT |
258 | return 0; |
259 | ||
260 | /* Yes, random drivers might want more. But I'm tired of buggy drivers */ | |
261 | return ULONG_MAX; | |
262 | } | |
263 | ||
264 | static inline bool file_mmap_ok(struct file *file, struct inode *inode, | |
265 | unsigned long pgoff, unsigned long len) | |
266 | { | |
267 | u64 maxsize = file_mmap_size_max(file, inode); | |
268 | ||
269 | if (maxsize && len > maxsize) | |
270 | return false; | |
271 | maxsize -= len; | |
272 | if (pgoff > maxsize >> PAGE_SHIFT) | |
273 | return false; | |
274 | return true; | |
275 | } | |
276 | ||
8ad946eb LS |
277 | /** |
278 | * do_mmap() - Perform a userland memory mapping into the current process | |
279 | * address space of length @len with protection bits @prot, mmap flags @flags | |
280 | * (from which VMA flags will be inferred), and any additional VMA flags to | |
281 | * apply @vm_flags. If this is a file-backed mapping then the file is specified | |
282 | * in @file and page offset into the file via @pgoff. | |
283 | * | |
284 | * This function does not perform security checks on the file and assumes, if | |
285 | * @uf is non-NULL, the caller has provided a list head to track unmap events | |
286 | * for userfaultfd @uf. | |
287 | * | |
288 | * It also simply indicates whether memory population is required by setting | |
289 | * @populate, which must be non-NULL, expecting the caller to actually perform | |
290 | * this task itself if appropriate. | |
291 | * | |
292 | * This function will invoke architecture-specific (and if provided and | |
293 | * relevant, file system-specific) logic to determine the most appropriate | |
294 | * unmapped area in which to place the mapping if not MAP_FIXED. | |
295 | * | |
296 | * Callers which require userland mmap() behaviour should invoke vm_mmap(), | |
297 | * which is also exported for module use. | |
298 | * | |
299 | * Those which require this behaviour less security checks, userfaultfd and | |
300 | * populate behaviour, and who handle the mmap write lock themselves, should | |
301 | * call this function. | |
302 | * | |
303 | * Note that the returned address may reside within a merged VMA if an | |
304 | * appropriate merge were to take place, so it doesn't necessarily specify the | |
305 | * start of a VMA, rather only the start of a valid mapped range of length | |
306 | * @len bytes, rounded down to the nearest page size. | |
307 | * | |
3e4e28c5 | 308 | * The caller must write-lock current->mm->mmap_lock. |
8ad946eb LS |
309 | * |
310 | * @file: An optional struct file pointer describing the file which is to be | |
311 | * mapped, if a file-backed mapping. | |
312 | * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the | |
313 | * address at which to perform this mapping. See mmap (2) for details. Must be | |
314 | * page-aligned. | |
315 | * @len: The length of the mapping. Will be page-aligned and must be at least 1 | |
316 | * page in size. | |
317 | * @prot: Protection bits describing access required to the mapping. See mmap | |
318 | * (2) for details. | |
319 | * @flags: Flags specifying how the mapping should be performed, see mmap (2) | |
320 | * for details. | |
321 | * @vm_flags: VMA flags which should be set by default, or 0 otherwise. | |
322 | * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise. | |
323 | * @populate: A pointer to a value which will be set to 0 if no population of | |
324 | * the range is required, or the number of bytes to populate if it is. Must be | |
325 | * non-NULL. See mmap (2) for details as to under what circumstances population | |
326 | * of the range occurs. | |
327 | * @uf: An optional pointer to a list head to track userfaultfd unmap events | |
328 | * should unmapping events arise. If provided, it is up to the caller to manage | |
329 | * this. | |
330 | * | |
331 | * Returns: Either an error, or the address at which the requested mapping has | |
332 | * been performed. | |
1da177e4 | 333 | */ |
1fcfd8db | 334 | unsigned long do_mmap(struct file *file, unsigned long addr, |
1da177e4 | 335 | unsigned long len, unsigned long prot, |
592b5fad YY |
336 | unsigned long flags, vm_flags_t vm_flags, |
337 | unsigned long pgoff, unsigned long *populate, | |
338 | struct list_head *uf) | |
1da177e4 | 339 | { |
cc71aba3 | 340 | struct mm_struct *mm = current->mm; |
62b5f7d0 | 341 | int pkey = 0; |
1da177e4 | 342 | |
41badc15 | 343 | *populate = 0; |
bebeb3d6 | 344 | |
df31155a LS |
345 | mmap_assert_write_locked(mm); |
346 | ||
e37609bb PK |
347 | if (!len) |
348 | return -EINVAL; | |
349 | ||
1da177e4 LT |
350 | /* |
351 | * Does the application expect PROT_READ to imply PROT_EXEC? | |
352 | * | |
353 | * (the exception is when the underlying filesystem is noexec | |
be16dd76 | 354 | * mounted, in which case we don't add PROT_EXEC.) |
1da177e4 LT |
355 | */ |
356 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) | |
90f8572b | 357 | if (!(file && path_noexec(&file->f_path))) |
1da177e4 LT |
358 | prot |= PROT_EXEC; |
359 | ||
a4ff8e86 MH |
360 | /* force arch specific MAP_FIXED handling in get_unmapped_area */ |
361 | if (flags & MAP_FIXED_NOREPLACE) | |
362 | flags |= MAP_FIXED; | |
363 | ||
7cd94146 EP |
364 | if (!(flags & MAP_FIXED)) |
365 | addr = round_hint_to_min(addr); | |
366 | ||
1da177e4 LT |
367 | /* Careful about overflows.. */ |
368 | len = PAGE_ALIGN(len); | |
9206de95 | 369 | if (!len) |
1da177e4 LT |
370 | return -ENOMEM; |
371 | ||
372 | /* offset overflow? */ | |
373 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) | |
cc71aba3 | 374 | return -EOVERFLOW; |
1da177e4 LT |
375 | |
376 | /* Too many mappings? */ | |
377 | if (mm->map_count > sysctl_max_map_count) | |
378 | return -ENOMEM; | |
379 | ||
8be7258a JX |
380 | /* |
381 | * addr is returned from get_unmapped_area, | |
382 | * There are two cases: | |
383 | * 1> MAP_FIXED == false | |
384 | * unallocated memory, no need to check sealing. | |
385 | * 1> MAP_FIXED == true | |
386 | * sealing is checked inside mmap_region when | |
387 | * do_vmi_munmap is called. | |
388 | */ | |
389 | ||
62b5f7d0 DH |
390 | if (prot == PROT_EXEC) { |
391 | pkey = execute_only_pkey(mm); | |
392 | if (pkey < 0) | |
393 | pkey = 0; | |
394 | } | |
395 | ||
1da177e4 LT |
396 | /* Do simple checking here so the lower-level routines won't have |
397 | * to. we assume access permissions have been handled by the open | |
398 | * of the memory object, so we don't do any here. | |
399 | */ | |
5baf8b03 | 400 | vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | |
1da177e4 LT |
401 | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
402 | ||
8a0fe564 RE |
403 | /* Obtain the address to map to. we verify (or select) it and ensure |
404 | * that it represents a valid section of the address space. | |
405 | */ | |
406 | addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); | |
407 | if (IS_ERR_VALUE(addr)) | |
408 | return addr; | |
409 | ||
410 | if (flags & MAP_FIXED_NOREPLACE) { | |
411 | if (find_vma_intersection(mm, addr, addr + len)) | |
412 | return -EEXIST; | |
413 | } | |
414 | ||
cdf7b341 | 415 | if (flags & MAP_LOCKED) |
1da177e4 LT |
416 | if (!can_do_mlock()) |
417 | return -EPERM; | |
ba470de4 | 418 | |
b0cc5e89 | 419 | if (!mlock_future_ok(mm, vm_flags, len)) |
363ee17f | 420 | return -EAGAIN; |
1da177e4 | 421 | |
1da177e4 | 422 | if (file) { |
077bf22b | 423 | struct inode *inode = file_inode(file); |
1c972597 | 424 | unsigned long flags_mask; |
fa00b8ef | 425 | int err; |
1c972597 | 426 | |
be83bbf8 LT |
427 | if (!file_mmap_ok(file, inode, pgoff, len)) |
428 | return -EOVERFLOW; | |
429 | ||
210a03c9 CB |
430 | flags_mask = LEGACY_MAP_MASK; |
431 | if (file->f_op->fop_flags & FOP_MMAP_SYNC) | |
432 | flags_mask |= MAP_SYNC; | |
077bf22b | 433 | |
1da177e4 LT |
434 | switch (flags & MAP_TYPE) { |
435 | case MAP_SHARED: | |
1c972597 DW |
436 | /* |
437 | * Force use of MAP_SHARED_VALIDATE with non-legacy | |
438 | * flags. E.g. MAP_SYNC is dangerous to use with | |
439 | * MAP_SHARED as you don't know which consistency model | |
440 | * you will get. We silently ignore unsupported flags | |
441 | * with MAP_SHARED to preserve backward compatibility. | |
442 | */ | |
443 | flags &= LEGACY_MAP_MASK; | |
e4a9bc58 | 444 | fallthrough; |
1c972597 DW |
445 | case MAP_SHARED_VALIDATE: |
446 | if (flags & ~flags_mask) | |
447 | return -EOPNOTSUPP; | |
dc617f29 DW |
448 | if (prot & PROT_WRITE) { |
449 | if (!(file->f_mode & FMODE_WRITE)) | |
450 | return -EACCES; | |
451 | if (IS_SWAPFILE(file->f_mapping->host)) | |
452 | return -ETXTBSY; | |
453 | } | |
1da177e4 LT |
454 | |
455 | /* | |
456 | * Make sure we don't allow writing to an append-only | |
457 | * file.. | |
458 | */ | |
459 | if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) | |
460 | return -EACCES; | |
461 | ||
1da177e4 LT |
462 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
463 | if (!(file->f_mode & FMODE_WRITE)) | |
464 | vm_flags &= ~(VM_MAYWRITE | VM_SHARED); | |
e4a9bc58 | 465 | fallthrough; |
1da177e4 LT |
466 | case MAP_PRIVATE: |
467 | if (!(file->f_mode & FMODE_READ)) | |
468 | return -EACCES; | |
90f8572b | 469 | if (path_noexec(&file->f_path)) { |
80c5606c LT |
470 | if (vm_flags & VM_EXEC) |
471 | return -EPERM; | |
472 | vm_flags &= ~VM_MAYEXEC; | |
473 | } | |
80c5606c | 474 | |
b013ed40 | 475 | if (!can_mmap_file(file)) |
80c5606c | 476 | return -ENODEV; |
b2c56e4f ON |
477 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
478 | return -EINVAL; | |
1da177e4 LT |
479 | break; |
480 | ||
481 | default: | |
482 | return -EINVAL; | |
483 | } | |
fa00b8ef LS |
484 | |
485 | /* | |
486 | * Check to see if we are violating any seals and update VMA | |
487 | * flags if necessary to avoid future seal violations. | |
488 | */ | |
489 | err = memfd_check_seals_mmap(file, &vm_flags); | |
490 | if (err) | |
491 | return (unsigned long)err; | |
1da177e4 LT |
492 | } else { |
493 | switch (flags & MAP_TYPE) { | |
494 | case MAP_SHARED: | |
b2c56e4f ON |
495 | if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) |
496 | return -EINVAL; | |
ce363942 TH |
497 | /* |
498 | * Ignore pgoff. | |
499 | */ | |
500 | pgoff = 0; | |
1da177e4 LT |
501 | vm_flags |= VM_SHARED | VM_MAYSHARE; |
502 | break; | |
9651fced JD |
503 | case MAP_DROPPABLE: |
504 | if (VM_DROPPABLE == VM_NONE) | |
505 | return -ENOTSUPP; | |
506 | /* | |
507 | * A locked or stack area makes no sense to be droppable. | |
508 | * | |
509 | * Also, since droppable pages can just go away at any time | |
510 | * it makes no sense to copy them on fork or dump them. | |
511 | * | |
512 | * And don't attempt to combine with hugetlb for now. | |
513 | */ | |
514 | if (flags & (MAP_LOCKED | MAP_HUGETLB)) | |
515 | return -EINVAL; | |
516 | if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) | |
517 | return -EINVAL; | |
518 | ||
519 | vm_flags |= VM_DROPPABLE; | |
520 | ||
521 | /* | |
522 | * If the pages can be dropped, then it doesn't make | |
523 | * sense to reserve them. | |
524 | */ | |
525 | vm_flags |= VM_NORESERVE; | |
526 | ||
527 | /* | |
528 | * Likewise, they're volatile enough that they | |
529 | * shouldn't survive forks or coredumps. | |
530 | */ | |
531 | vm_flags |= VM_WIPEONFORK | VM_DONTDUMP; | |
532 | fallthrough; | |
1da177e4 LT |
533 | case MAP_PRIVATE: |
534 | /* | |
535 | * Set pgoff according to addr for anon_vma. | |
536 | */ | |
537 | pgoff = addr >> PAGE_SHIFT; | |
538 | break; | |
539 | default: | |
540 | return -EINVAL; | |
541 | } | |
542 | } | |
543 | ||
c22c0d63 ML |
544 | /* |
545 | * Set 'VM_NORESERVE' if we should not account for the | |
546 | * memory use of this mapping. | |
547 | */ | |
548 | if (flags & MAP_NORESERVE) { | |
549 | /* We honor MAP_NORESERVE if allowed to overcommit */ | |
550 | if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) | |
551 | vm_flags |= VM_NORESERVE; | |
552 | ||
553 | /* hugetlb applies strict overcommit unless MAP_NORESERVE */ | |
554 | if (file && is_file_hugepages(file)) | |
555 | vm_flags |= VM_NORESERVE; | |
556 | } | |
557 | ||
897ab3e0 | 558 | addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); |
09a9f1d2 ML |
559 | if (!IS_ERR_VALUE(addr) && |
560 | ((vm_flags & VM_LOCKED) || | |
561 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) | |
41badc15 | 562 | *populate = len; |
bebeb3d6 | 563 | return addr; |
0165ab44 | 564 | } |
6be5ceb0 | 565 | |
a90f590a DB |
566 | unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, |
567 | unsigned long prot, unsigned long flags, | |
568 | unsigned long fd, unsigned long pgoff) | |
66f0dc48 HD |
569 | { |
570 | struct file *file = NULL; | |
1e3ee14b | 571 | unsigned long retval; |
66f0dc48 HD |
572 | |
573 | if (!(flags & MAP_ANONYMOUS)) { | |
120a795d | 574 | audit_mmap_fd(fd, flags); |
66f0dc48 HD |
575 | file = fget(fd); |
576 | if (!file) | |
1e3ee14b | 577 | return -EBADF; |
7bba8f0e | 578 | if (is_file_hugepages(file)) { |
af73e4d9 | 579 | len = ALIGN(len, huge_page_size(hstate_file(file))); |
7bba8f0e ZL |
580 | } else if (unlikely(flags & MAP_HUGETLB)) { |
581 | retval = -EINVAL; | |
493af578 | 582 | goto out_fput; |
7bba8f0e | 583 | } |
66f0dc48 | 584 | } else if (flags & MAP_HUGETLB) { |
c103a4dc | 585 | struct hstate *hs; |
af73e4d9 | 586 | |
20ac2893 | 587 | hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
091d0d55 LZ |
588 | if (!hs) |
589 | return -EINVAL; | |
590 | ||
591 | len = ALIGN(len, huge_page_size(hs)); | |
66f0dc48 HD |
592 | /* |
593 | * VM_NORESERVE is used because the reservations will be | |
594 | * taken when vm_ops->mmap() is called | |
66f0dc48 | 595 | */ |
af73e4d9 | 596 | file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, |
42d7395f | 597 | VM_NORESERVE, |
83c1fd76 | 598 | HUGETLB_ANONHUGE_INODE, |
42d7395f | 599 | (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
66f0dc48 HD |
600 | if (IS_ERR(file)) |
601 | return PTR_ERR(file); | |
602 | } | |
603 | ||
9fbeb5ab | 604 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
493af578 | 605 | out_fput: |
66f0dc48 HD |
606 | if (file) |
607 | fput(file); | |
66f0dc48 HD |
608 | return retval; |
609 | } | |
610 | ||
a90f590a DB |
611 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
612 | unsigned long, prot, unsigned long, flags, | |
613 | unsigned long, fd, unsigned long, pgoff) | |
614 | { | |
615 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); | |
616 | } | |
617 | ||
a4679373 CH |
618 | #ifdef __ARCH_WANT_SYS_OLD_MMAP |
619 | struct mmap_arg_struct { | |
620 | unsigned long addr; | |
621 | unsigned long len; | |
622 | unsigned long prot; | |
623 | unsigned long flags; | |
624 | unsigned long fd; | |
625 | unsigned long offset; | |
626 | }; | |
627 | ||
628 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) | |
629 | { | |
630 | struct mmap_arg_struct a; | |
631 | ||
632 | if (copy_from_user(&a, arg, sizeof(a))) | |
633 | return -EFAULT; | |
de1741a1 | 634 | if (offset_in_page(a.offset)) |
a4679373 CH |
635 | return -EINVAL; |
636 | ||
a90f590a DB |
637 | return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
638 | a.offset >> PAGE_SHIFT); | |
a4679373 CH |
639 | } |
640 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ | |
641 | ||
df7e1286 MB |
642 | /* |
643 | * Determine if the allocation needs to ensure that there is no | |
644 | * existing mapping within it's guard gaps, for use as start_gap. | |
645 | */ | |
646 | static inline unsigned long stack_guard_placement(vm_flags_t vm_flags) | |
647 | { | |
648 | if (vm_flags & VM_SHADOW_STACK) | |
649 | return PAGE_SIZE; | |
650 | ||
651 | return 0; | |
652 | } | |
653 | ||
baceaf1c JK |
654 | /* |
655 | * Search for an unmapped address range. | |
656 | * | |
657 | * We are looking for a range that: | |
658 | * - does not intersect with any VMA; | |
659 | * - is contained within the [low_limit, high_limit) interval; | |
660 | * - is at least the desired size. | |
661 | * - satisfies (begin_addr & align_mask) == (align_offset & align_mask) | |
662 | */ | |
663 | unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info) | |
664 | { | |
df529cab JK |
665 | unsigned long addr; |
666 | ||
baceaf1c | 667 | if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) |
df529cab | 668 | addr = unmapped_area_topdown(info); |
baceaf1c | 669 | else |
df529cab JK |
670 | addr = unmapped_area(info); |
671 | ||
672 | trace_vm_unmapped_area(addr, info); | |
673 | return addr; | |
baceaf1c | 674 | } |
f6795053 | 675 | |
1da177e4 LT |
676 | /* Get an address range which is currently unmapped. |
677 | * For shmat() with addr=0. | |
678 | * | |
679 | * Ugly calling convention alert: | |
680 | * Return value with the low bits set means error value, | |
681 | * ie | |
682 | * if (ret & ~PAGE_MASK) | |
683 | * error = ret; | |
684 | * | |
685 | * This function "knows" that -ENOMEM has the bits set. | |
686 | */ | |
1da177e4 | 687 | unsigned long |
4b439e25 CL |
688 | generic_get_unmapped_area(struct file *filp, unsigned long addr, |
689 | unsigned long len, unsigned long pgoff, | |
540e00a7 | 690 | unsigned long flags, vm_flags_t vm_flags) |
1da177e4 LT |
691 | { |
692 | struct mm_struct *mm = current->mm; | |
1be7107f | 693 | struct vm_area_struct *vma, *prev; |
b80fa3cb | 694 | struct vm_unmapped_area_info info = {}; |
2cb4de08 | 695 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1da177e4 | 696 | |
f6795053 | 697 | if (len > mmap_end - mmap_min_addr) |
1da177e4 LT |
698 | return -ENOMEM; |
699 | ||
06abdfb4 BH |
700 | if (flags & MAP_FIXED) |
701 | return addr; | |
702 | ||
1da177e4 LT |
703 | if (addr) { |
704 | addr = PAGE_ALIGN(addr); | |
1be7107f | 705 | vma = find_vma_prev(mm, addr, &prev); |
f6795053 | 706 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
1be7107f HD |
707 | (!vma || addr + len <= vm_start_gap(vma)) && |
708 | (!prev || addr >= vm_end_gap(prev))) | |
1da177e4 LT |
709 | return addr; |
710 | } | |
1da177e4 | 711 | |
db4fbfb9 | 712 | info.length = len; |
4e99b021 | 713 | info.low_limit = mm->mmap_base; |
f6795053 | 714 | info.high_limit = mmap_end; |
df7e1286 | 715 | info.start_gap = stack_guard_placement(vm_flags); |
7f24cbc9 OS |
716 | if (filp && is_file_hugepages(filp)) |
717 | info.align_mask = huge_page_mask_align(filp); | |
db4fbfb9 | 718 | return vm_unmapped_area(&info); |
1da177e4 | 719 | } |
4b439e25 CL |
720 | |
721 | #ifndef HAVE_ARCH_UNMAPPED_AREA | |
722 | unsigned long | |
723 | arch_get_unmapped_area(struct file *filp, unsigned long addr, | |
724 | unsigned long len, unsigned long pgoff, | |
25d4054c | 725 | unsigned long flags, vm_flags_t vm_flags) |
4b439e25 | 726 | { |
540e00a7 MB |
727 | return generic_get_unmapped_area(filp, addr, len, pgoff, flags, |
728 | vm_flags); | |
4b439e25 | 729 | } |
cc71aba3 | 730 | #endif |
1da177e4 | 731 | |
1da177e4 LT |
732 | /* |
733 | * This mmap-allocator allocates new areas top-down from below the | |
734 | * stack's low limit (the base): | |
735 | */ | |
1da177e4 | 736 | unsigned long |
4b439e25 CL |
737 | generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
738 | unsigned long len, unsigned long pgoff, | |
540e00a7 | 739 | unsigned long flags, vm_flags_t vm_flags) |
1da177e4 | 740 | { |
1be7107f | 741 | struct vm_area_struct *vma, *prev; |
1da177e4 | 742 | struct mm_struct *mm = current->mm; |
b80fa3cb | 743 | struct vm_unmapped_area_info info = {}; |
2cb4de08 | 744 | const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags); |
1da177e4 LT |
745 | |
746 | /* requested length too big for entire address space */ | |
f6795053 | 747 | if (len > mmap_end - mmap_min_addr) |
1da177e4 LT |
748 | return -ENOMEM; |
749 | ||
06abdfb4 BH |
750 | if (flags & MAP_FIXED) |
751 | return addr; | |
752 | ||
1da177e4 LT |
753 | /* requesting a specific address */ |
754 | if (addr) { | |
755 | addr = PAGE_ALIGN(addr); | |
1be7107f | 756 | vma = find_vma_prev(mm, addr, &prev); |
f6795053 | 757 | if (mmap_end - len >= addr && addr >= mmap_min_addr && |
1be7107f HD |
758 | (!vma || addr + len <= vm_start_gap(vma)) && |
759 | (!prev || addr >= vm_end_gap(prev))) | |
1da177e4 LT |
760 | return addr; |
761 | } | |
762 | ||
db4fbfb9 ML |
763 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
764 | info.length = len; | |
6b008640 | 765 | info.low_limit = PAGE_SIZE; |
f6795053 | 766 | info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); |
df7e1286 | 767 | info.start_gap = stack_guard_placement(vm_flags); |
7f24cbc9 OS |
768 | if (filp && is_file_hugepages(filp)) |
769 | info.align_mask = huge_page_mask_align(filp); | |
db4fbfb9 | 770 | addr = vm_unmapped_area(&info); |
b716ad95 | 771 | |
1da177e4 LT |
772 | /* |
773 | * A failed mmap() very likely causes application failure, | |
774 | * so fall back to the bottom-up function here. This scenario | |
775 | * can happen with large stack limits and large mmap() | |
776 | * allocations. | |
777 | */ | |
de1741a1 | 778 | if (offset_in_page(addr)) { |
db4fbfb9 ML |
779 | VM_BUG_ON(addr != -ENOMEM); |
780 | info.flags = 0; | |
781 | info.low_limit = TASK_UNMAPPED_BASE; | |
f6795053 | 782 | info.high_limit = mmap_end; |
db4fbfb9 ML |
783 | addr = vm_unmapped_area(&info); |
784 | } | |
1da177e4 LT |
785 | |
786 | return addr; | |
787 | } | |
4b439e25 CL |
788 | |
789 | #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
790 | unsigned long | |
791 | arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |
792 | unsigned long len, unsigned long pgoff, | |
25d4054c | 793 | unsigned long flags, vm_flags_t vm_flags) |
96114870 | 794 | { |
540e00a7 MB |
795 | return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags, |
796 | vm_flags); | |
96114870 RE |
797 | } |
798 | #endif | |
799 | ||
800 | unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, | |
801 | unsigned long addr, unsigned long len, | |
802 | unsigned long pgoff, unsigned long flags, | |
803 | vm_flags_t vm_flags) | |
804 | { | |
805 | if (test_bit(MMF_TOPDOWN, &mm->flags)) | |
25d4054c MB |
806 | return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, |
807 | flags, vm_flags); | |
808 | return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags); | |
96114870 RE |
809 | } |
810 | ||
1da177e4 | 811 | unsigned long |
8a0fe564 RE |
812 | __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
813 | unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) | |
1da177e4 | 814 | { |
06abdfb4 | 815 | unsigned long (*get_area)(struct file *, unsigned long, |
529ce23a RE |
816 | unsigned long, unsigned long, unsigned long) |
817 | = NULL; | |
06abdfb4 | 818 | |
9206de95 AV |
819 | unsigned long error = arch_mmap_check(addr, len, flags); |
820 | if (error) | |
821 | return error; | |
822 | ||
823 | /* Careful about overflows.. */ | |
824 | if (len > TASK_SIZE) | |
825 | return -ENOMEM; | |
826 | ||
c01d5b30 HD |
827 | if (file) { |
828 | if (file->f_op->get_unmapped_area) | |
829 | get_area = file->f_op->get_unmapped_area; | |
830 | } else if (flags & MAP_SHARED) { | |
831 | /* | |
832 | * mmap_region() will call shmem_zero_setup() to create a file, | |
833 | * so use shmem's get_unmapped_area in case it can be huge. | |
c01d5b30 | 834 | */ |
c01d5b30 HD |
835 | get_area = shmem_get_unmapped_area; |
836 | } | |
837 | ||
96204e15 RR |
838 | /* Always treat pgoff as zero for anonymous memory. */ |
839 | if (!file) | |
840 | pgoff = 0; | |
841 | ||
ed48e87c | 842 | if (get_area) { |
529ce23a | 843 | addr = get_area(file, addr, len, pgoff, flags); |
34d7cf63 | 844 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file |
249608ee | 845 | && !addr /* no hint */ |
d4148aea | 846 | && IS_ALIGNED(len, PMD_SIZE)) { |
ed48e87c RE |
847 | /* Ensures that larger anonymous mappings are THP aligned. */ |
848 | addr = thp_get_unmapped_area_vmflags(file, addr, len, | |
849 | pgoff, flags, vm_flags); | |
850 | } else { | |
8a0fe564 RE |
851 | addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, |
852 | pgoff, flags, vm_flags); | |
ed48e87c | 853 | } |
06abdfb4 BH |
854 | if (IS_ERR_VALUE(addr)) |
855 | return addr; | |
1da177e4 | 856 | |
07ab67c8 LT |
857 | if (addr > TASK_SIZE - len) |
858 | return -ENOMEM; | |
de1741a1 | 859 | if (offset_in_page(addr)) |
07ab67c8 | 860 | return -EINVAL; |
06abdfb4 | 861 | |
9ac4ed4b AV |
862 | error = security_mmap_addr(addr); |
863 | return error ? error : addr; | |
1da177e4 LT |
864 | } |
865 | ||
529ce23a RE |
866 | unsigned long |
867 | mm_get_unmapped_area(struct mm_struct *mm, struct file *file, | |
868 | unsigned long addr, unsigned long len, | |
869 | unsigned long pgoff, unsigned long flags) | |
870 | { | |
dd3d25f0 PX |
871 | return mm_get_unmapped_area_vmflags(mm, file, addr, len, |
872 | pgoff, flags, 0); | |
529ce23a RE |
873 | } |
874 | EXPORT_SYMBOL(mm_get_unmapped_area); | |
1da177e4 | 875 | |
abdba2dd LH |
876 | /** |
877 | * find_vma_intersection() - Look up the first VMA which intersects the interval | |
878 | * @mm: The process address space. | |
879 | * @start_addr: The inclusive start user address. | |
880 | * @end_addr: The exclusive end user address. | |
881 | * | |
882 | * Returns: The first VMA within the provided range, %NULL otherwise. Assumes | |
883 | * start_addr < end_addr. | |
884 | */ | |
885 | struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, | |
886 | unsigned long start_addr, | |
887 | unsigned long end_addr) | |
888 | { | |
abdba2dd LH |
889 | unsigned long index = start_addr; |
890 | ||
891 | mmap_assert_locked(mm); | |
7964cf8c | 892 | return mt_find(&mm->mm_mt, &index, end_addr - 1); |
abdba2dd LH |
893 | } |
894 | EXPORT_SYMBOL(find_vma_intersection); | |
895 | ||
be8432e7 LH |
896 | /** |
897 | * find_vma() - Find the VMA for a given address, or the next VMA. | |
898 | * @mm: The mm_struct to check | |
899 | * @addr: The address | |
900 | * | |
901 | * Returns: The VMA associated with addr, or the next VMA. | |
902 | * May return %NULL in the case of no VMA at addr or above. | |
903 | */ | |
48aae425 | 904 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
1da177e4 | 905 | { |
be8432e7 | 906 | unsigned long index = addr; |
1da177e4 | 907 | |
5b78ed24 | 908 | mmap_assert_locked(mm); |
7964cf8c | 909 | return mt_find(&mm->mm_mt, &index, ULONG_MAX); |
1da177e4 | 910 | } |
1da177e4 LT |
911 | EXPORT_SYMBOL(find_vma); |
912 | ||
7fdbd37d LH |
913 | /** |
914 | * find_vma_prev() - Find the VMA for a given address, or the next vma and | |
915 | * set %pprev to the previous VMA, if any. | |
916 | * @mm: The mm_struct to check | |
917 | * @addr: The address | |
918 | * @pprev: The pointer to set to the previous VMA | |
919 | * | |
920 | * Note that RCU lock is missing here since the external mmap_lock() is used | |
921 | * instead. | |
922 | * | |
923 | * Returns: The VMA associated with @addr, or the next vma. | |
924 | * May return %NULL in the case of no vma at addr or above. | |
6bd4837d | 925 | */ |
1da177e4 LT |
926 | struct vm_area_struct * |
927 | find_vma_prev(struct mm_struct *mm, unsigned long addr, | |
928 | struct vm_area_struct **pprev) | |
929 | { | |
6bd4837d | 930 | struct vm_area_struct *vma; |
d4e6b397 | 931 | VMA_ITERATOR(vmi, mm, addr); |
1da177e4 | 932 | |
d4e6b397 YD |
933 | vma = vma_iter_load(&vmi); |
934 | *pprev = vma_prev(&vmi); | |
7fdbd37d | 935 | if (!vma) |
d4e6b397 | 936 | vma = vma_next(&vmi); |
6bd4837d | 937 | return vma; |
1da177e4 LT |
938 | } |
939 | ||
1be7107f HD |
940 | /* enforced gap between the expanding stack and other mappings. */ |
941 | unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; | |
942 | ||
943 | static int __init cmdline_parse_stack_guard_gap(char *p) | |
944 | { | |
945 | unsigned long val; | |
946 | char *endptr; | |
947 | ||
948 | val = simple_strtoul(p, &endptr, 10); | |
949 | if (!*endptr) | |
950 | stack_guard_gap = val << PAGE_SHIFT; | |
951 | ||
e6d09493 | 952 | return 1; |
1be7107f HD |
953 | } |
954 | __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); | |
955 | ||
b6a2fea3 | 956 | #ifdef CONFIG_STACK_GROWSUP |
8d7071af | 957 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
b6a2fea3 OW |
958 | { |
959 | return expand_upwards(vma, address); | |
960 | } | |
961 | ||
8d7071af | 962 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
b6a2fea3 OW |
963 | { |
964 | struct vm_area_struct *vma, *prev; | |
965 | ||
966 | addr &= PAGE_MASK; | |
967 | vma = find_vma_prev(mm, addr, &prev); | |
968 | if (vma && (vma->vm_start <= addr)) | |
969 | return vma; | |
f440fa1a LH |
970 | if (!prev) |
971 | return NULL; | |
8d7071af | 972 | if (expand_stack_locked(prev, addr)) |
b6a2fea3 | 973 | return NULL; |
cea10a19 | 974 | if (prev->vm_flags & VM_LOCKED) |
fc05f566 | 975 | populate_vma_page_range(prev, addr, prev->vm_end, NULL); |
b6a2fea3 OW |
976 | return prev; |
977 | } | |
978 | #else | |
8d7071af | 979 | int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) |
b6a2fea3 OW |
980 | { |
981 | return expand_downwards(vma, address); | |
982 | } | |
983 | ||
8d7071af | 984 | struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) |
1da177e4 | 985 | { |
cc71aba3 | 986 | struct vm_area_struct *vma; |
1da177e4 LT |
987 | unsigned long start; |
988 | ||
989 | addr &= PAGE_MASK; | |
cc71aba3 | 990 | vma = find_vma(mm, addr); |
1da177e4 LT |
991 | if (!vma) |
992 | return NULL; | |
993 | if (vma->vm_start <= addr) | |
994 | return vma; | |
1da177e4 | 995 | start = vma->vm_start; |
8d7071af | 996 | if (expand_stack_locked(vma, addr)) |
1da177e4 | 997 | return NULL; |
cea10a19 | 998 | if (vma->vm_flags & VM_LOCKED) |
fc05f566 | 999 | populate_vma_page_range(vma, addr, start, NULL); |
1da177e4 LT |
1000 | return vma; |
1001 | } | |
1002 | #endif | |
1003 | ||
69e583ea | 1004 | #if defined(CONFIG_STACK_GROWSUP) |
8d7071af | 1005 | |
49b1b8d6 LS |
1006 | #define vma_expand_up(vma,addr) expand_upwards(vma, addr) |
1007 | #define vma_expand_down(vma, addr) (-EFAULT) | |
6935e052 | 1008 | |
49b1b8d6 | 1009 | #else |
1da177e4 | 1010 | |
49b1b8d6 LS |
1011 | #define vma_expand_up(vma,addr) (-EFAULT) |
1012 | #define vma_expand_down(vma, addr) expand_downwards(vma, addr) | |
d4af56c5 | 1013 | |
49b1b8d6 | 1014 | #endif |
1da177e4 | 1015 | |
11f9a21a | 1016 | /* |
49b1b8d6 LS |
1017 | * expand_stack(): legacy interface for page faulting. Don't use unless |
1018 | * you have to. | |
11f9a21a | 1019 | * |
49b1b8d6 LS |
1020 | * This is called with the mm locked for reading, drops the lock, takes |
1021 | * the lock for writing, tries to look up a vma again, expands it if | |
1022 | * necessary, and downgrades the lock to reading again. | |
11f9a21a | 1023 | * |
49b1b8d6 LS |
1024 | * If no vma is found or it can't be expanded, it returns NULL and has |
1025 | * dropped the lock. | |
11f9a21a | 1026 | */ |
49b1b8d6 | 1027 | struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) |
11f9a21a | 1028 | { |
49b1b8d6 | 1029 | struct vm_area_struct *vma, *prev; |
11f9a21a | 1030 | |
49b1b8d6 LS |
1031 | mmap_read_unlock(mm); |
1032 | if (mmap_write_lock_killable(mm)) | |
1033 | return NULL; | |
11f9a21a | 1034 | |
49b1b8d6 LS |
1035 | vma = find_vma_prev(mm, addr, &prev); |
1036 | if (vma && vma->vm_start <= addr) | |
1037 | goto success; | |
11f9a21a | 1038 | |
49b1b8d6 LS |
1039 | if (prev && !vma_expand_up(prev, addr)) { |
1040 | vma = prev; | |
1041 | goto success; | |
1042 | } | |
8be7258a | 1043 | |
49b1b8d6 LS |
1044 | if (vma && !vma_expand_down(vma, addr)) |
1045 | goto success; | |
11f9a21a | 1046 | |
49b1b8d6 LS |
1047 | mmap_write_unlock(mm); |
1048 | return NULL; | |
11f9a21a | 1049 | |
49b1b8d6 LS |
1050 | success: |
1051 | mmap_write_downgrade(mm); | |
1052 | return vma; | |
11f9a21a LH |
1053 | } |
1054 | ||
1055 | /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls. | |
1056 | * @mm: The mm_struct | |
1057 | * @start: The start address to munmap | |
1058 | * @len: The length to be munmapped. | |
1059 | * @uf: The userfaultfd list_head | |
408579cd LH |
1060 | * |
1061 | * Return: 0 on success, error otherwise. | |
11f9a21a | 1062 | */ |
dd2283f2 YS |
1063 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, |
1064 | struct list_head *uf) | |
1065 | { | |
183654ce | 1066 | VMA_ITERATOR(vmi, mm, start); |
11f9a21a | 1067 | |
183654ce | 1068 | return do_vmi_munmap(&vmi, mm, start, len, uf, false); |
dd2283f2 YS |
1069 | } |
1070 | ||
dd2283f2 YS |
1071 | int vm_munmap(unsigned long start, size_t len) |
1072 | { | |
1073 | return __vm_munmap(start, len, false); | |
1074 | } | |
a46ef99d LT |
1075 | EXPORT_SYMBOL(vm_munmap); |
1076 | ||
1077 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) | |
1078 | { | |
ce18d171 | 1079 | addr = untagged_addr(addr); |
dd2283f2 | 1080 | return __vm_munmap(addr, len, true); |
a46ef99d | 1081 | } |
1da177e4 | 1082 | |
c8d78c18 KS |
1083 | |
1084 | /* | |
1085 | * Emulation of deprecated remap_file_pages() syscall. | |
1086 | */ | |
1087 | SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |
1088 | unsigned long, prot, unsigned long, pgoff, unsigned long, flags) | |
1089 | { | |
1090 | ||
1091 | struct mm_struct *mm = current->mm; | |
1092 | struct vm_area_struct *vma; | |
1093 | unsigned long populate = 0; | |
1094 | unsigned long ret = -EINVAL; | |
1095 | struct file *file; | |
58a039e6 | 1096 | vm_flags_t vm_flags; |
c8d78c18 | 1097 | |
ee65728e | 1098 | pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n", |
756a025f | 1099 | current->comm, current->pid); |
c8d78c18 KS |
1100 | |
1101 | if (prot) | |
1102 | return ret; | |
1103 | start = start & PAGE_MASK; | |
1104 | size = size & PAGE_MASK; | |
1105 | ||
1106 | if (start + size <= start) | |
1107 | return ret; | |
1108 | ||
1109 | /* Does pgoff wrap? */ | |
1110 | if (pgoff + (size >> PAGE_SHIFT) < pgoff) | |
1111 | return ret; | |
1112 | ||
58a039e6 | 1113 | if (mmap_read_lock_killable(mm)) |
dc0ef0df MH |
1114 | return -EINTR; |
1115 | ||
58a039e6 KS |
1116 | /* |
1117 | * Look up VMA under read lock first so we can perform the security | |
1118 | * without holding locks (which can be problematic). We reacquire a | |
1119 | * write lock later and check nothing changed underneath us. | |
1120 | */ | |
9b593cb2 | 1121 | vma = vma_lookup(mm, start); |
c8d78c18 | 1122 | |
58a039e6 KS |
1123 | if (!vma || !(vma->vm_flags & VM_SHARED)) { |
1124 | mmap_read_unlock(mm); | |
1125 | return -EINVAL; | |
1126 | } | |
1127 | ||
1128 | prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; | |
1129 | prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; | |
1130 | prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; | |
1131 | ||
1132 | flags &= MAP_NONBLOCK; | |
1133 | flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE; | |
1134 | if (vma->vm_flags & VM_LOCKED) | |
1135 | flags |= MAP_LOCKED; | |
1136 | ||
1137 | /* Save vm_flags used to calculate prot and flags, and recheck later. */ | |
1138 | vm_flags = vma->vm_flags; | |
1139 | file = get_file(vma->vm_file); | |
1140 | ||
1141 | mmap_read_unlock(mm); | |
1142 | ||
1143 | /* Call outside mmap_lock to be consistent with other callers. */ | |
1144 | ret = security_mmap_file(file, prot, flags); | |
1145 | if (ret) { | |
1146 | fput(file); | |
1147 | return ret; | |
1148 | } | |
1149 | ||
1150 | ret = -EINVAL; | |
1151 | ||
1152 | /* OK security check passed, take write lock + let it rip. */ | |
1153 | if (mmap_write_lock_killable(mm)) { | |
1154 | fput(file); | |
1155 | return -EINTR; | |
1156 | } | |
1157 | ||
1158 | vma = vma_lookup(mm, start); | |
1159 | ||
1160 | if (!vma) | |
1161 | goto out; | |
1162 | ||
1163 | /* Make sure things didn't change under us. */ | |
1164 | if (vma->vm_flags != vm_flags) | |
1165 | goto out; | |
1166 | if (vma->vm_file != file) | |
c8d78c18 KS |
1167 | goto out; |
1168 | ||
48f7df32 | 1169 | if (start + size > vma->vm_end) { |
763ecb03 LH |
1170 | VMA_ITERATOR(vmi, mm, vma->vm_end); |
1171 | struct vm_area_struct *next, *prev = vma; | |
48f7df32 | 1172 | |
763ecb03 | 1173 | for_each_vma_range(vmi, next, start + size) { |
48f7df32 | 1174 | /* hole between vmas ? */ |
763ecb03 | 1175 | if (next->vm_start != prev->vm_end) |
48f7df32 KS |
1176 | goto out; |
1177 | ||
1178 | if (next->vm_file != vma->vm_file) | |
1179 | goto out; | |
1180 | ||
1181 | if (next->vm_flags != vma->vm_flags) | |
1182 | goto out; | |
1183 | ||
1db43d3f LH |
1184 | if (start + size <= next->vm_end) |
1185 | break; | |
1186 | ||
763ecb03 | 1187 | prev = next; |
48f7df32 KS |
1188 | } |
1189 | ||
1190 | if (!next) | |
1191 | goto out; | |
c8d78c18 KS |
1192 | } |
1193 | ||
45e55300 | 1194 | ret = do_mmap(vma->vm_file, start, size, |
592b5fad | 1195 | prot, flags, 0, pgoff, &populate, NULL); |
c8d78c18 | 1196 | out: |
d8ed45c5 | 1197 | mmap_write_unlock(mm); |
58a039e6 | 1198 | fput(file); |
c8d78c18 KS |
1199 | if (populate) |
1200 | mm_populate(ret, populate); | |
1201 | if (!IS_ERR_VALUE(ret)) | |
1202 | ret = 0; | |
1203 | return ret; | |
1204 | } | |
1205 | ||
bfbe7110 | 1206 | int vm_brk_flags(unsigned long addr, unsigned long request, vm_flags_t vm_flags) |
e4eb1ff6 LT |
1207 | { |
1208 | struct mm_struct *mm = current->mm; | |
2e7ce7d3 | 1209 | struct vm_area_struct *vma = NULL; |
bb177a73 | 1210 | unsigned long len; |
5d22fc25 | 1211 | int ret; |
128557ff | 1212 | bool populate; |
897ab3e0 | 1213 | LIST_HEAD(uf); |
92fed820 | 1214 | VMA_ITERATOR(vmi, mm, addr); |
e4eb1ff6 | 1215 | |
bb177a73 MH |
1216 | len = PAGE_ALIGN(request); |
1217 | if (len < request) | |
1218 | return -ENOMEM; | |
1219 | if (!len) | |
1220 | return 0; | |
1221 | ||
2e7ce7d3 | 1222 | /* Until we need other flags, refuse anything except VM_EXEC. */ |
bfbe7110 | 1223 | if ((vm_flags & (~VM_EXEC)) != 0) |
2e7ce7d3 LH |
1224 | return -EINVAL; |
1225 | ||
e0f81ab1 SO |
1226 | if (mmap_write_lock_killable(mm)) |
1227 | return -EINTR; | |
1228 | ||
2e7ce7d3 LH |
1229 | ret = check_brk_limits(addr, len); |
1230 | if (ret) | |
1231 | goto limits_failed; | |
1232 | ||
183654ce | 1233 | ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0); |
2e7ce7d3 LH |
1234 | if (ret) |
1235 | goto munmap_failed; | |
1236 | ||
92fed820 | 1237 | vma = vma_prev(&vmi); |
bfbe7110 | 1238 | ret = do_brk_flags(&vmi, vma, addr, len, vm_flags); |
128557ff | 1239 | populate = ((mm->def_flags & VM_LOCKED) != 0); |
d8ed45c5 | 1240 | mmap_write_unlock(mm); |
897ab3e0 | 1241 | userfaultfd_unmap_complete(mm, &uf); |
5d22fc25 | 1242 | if (populate && !ret) |
128557ff | 1243 | mm_populate(addr, len); |
e4eb1ff6 | 1244 | return ret; |
2e7ce7d3 LH |
1245 | |
1246 | munmap_failed: | |
1247 | limits_failed: | |
1248 | mmap_write_unlock(mm); | |
1249 | return ret; | |
e4eb1ff6 | 1250 | } |
16e72e9b DV |
1251 | EXPORT_SYMBOL(vm_brk_flags); |
1252 | ||
1da177e4 LT |
1253 | /* Release all mmaps. */ |
1254 | void exit_mmap(struct mm_struct *mm) | |
1255 | { | |
d16dfc55 | 1256 | struct mmu_gather tlb; |
ba470de4 | 1257 | struct vm_area_struct *vma; |
1da177e4 | 1258 | unsigned long nr_accounted = 0; |
d4e6b397 | 1259 | VMA_ITERATOR(vmi, mm, 0); |
763ecb03 | 1260 | int count = 0; |
1da177e4 | 1261 | |
d6dd61c8 | 1262 | /* mm's last user has gone, and its about to be pulled down */ |
cddb8a5c | 1263 | mmu_notifier_release(mm); |
d6dd61c8 | 1264 | |
bf3980c8 | 1265 | mmap_read_lock(mm); |
9480c53e JF |
1266 | arch_exit_mmap(mm); |
1267 | ||
d4e6b397 | 1268 | vma = vma_next(&vmi); |
d2406291 | 1269 | if (!vma || unlikely(xa_is_zero(vma))) { |
64591e86 | 1270 | /* Can happen if dup_mmap() received an OOM */ |
bf3980c8 | 1271 | mmap_read_unlock(mm); |
d2406291 PZ |
1272 | mmap_write_lock(mm); |
1273 | goto destroy; | |
64591e86 | 1274 | } |
9480c53e | 1275 | |
1da177e4 | 1276 | flush_cache_mm(mm); |
d8b45053 | 1277 | tlb_gather_mmu_fullmm(&tlb, mm); |
901608d9 | 1278 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
763ecb03 | 1279 | /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ |
d4e6b397 | 1280 | unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); |
bf3980c8 SB |
1281 | mmap_read_unlock(mm); |
1282 | ||
1283 | /* | |
1284 | * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper | |
b3541d91 | 1285 | * because the memory has been already freed. |
bf3980c8 SB |
1286 | */ |
1287 | set_bit(MMF_OOM_SKIP, &mm->flags); | |
1288 | mmap_write_lock(mm); | |
3dd44325 | 1289 | mt_clear_in_rcu(&mm->mm_mt); |
d4e6b397 YD |
1290 | vma_iter_set(&vmi, vma->vm_end); |
1291 | free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS, | |
98e51a22 | 1292 | USER_PGTABLES_CEILING, true); |
ae8eba8b | 1293 | tlb_finish_mmu(&tlb); |
1da177e4 | 1294 | |
763ecb03 LH |
1295 | /* |
1296 | * Walk the list again, actually closing and freeing it, with preemption | |
1297 | * enabled, without holding any MM locks besides the unreachable | |
1298 | * mmap_write_lock. | |
1299 | */ | |
d4e6b397 | 1300 | vma_iter_set(&vmi, vma->vm_end); |
763ecb03 | 1301 | do { |
4f74d2c8 LT |
1302 | if (vma->vm_flags & VM_ACCOUNT) |
1303 | nr_accounted += vma_pages(vma); | |
31041385 SB |
1304 | vma_mark_detached(vma); |
1305 | remove_vma(vma); | |
763ecb03 | 1306 | count++; |
0a3b3c25 | 1307 | cond_resched(); |
d4e6b397 | 1308 | vma = vma_next(&vmi); |
d2406291 | 1309 | } while (vma && likely(!xa_is_zero(vma))); |
763ecb03 LH |
1310 | |
1311 | BUG_ON(count != mm->map_count); | |
d4af56c5 LH |
1312 | |
1313 | trace_exit_mmap(mm); | |
d2406291 | 1314 | destroy: |
d4af56c5 | 1315 | __mt_destroy(&mm->mm_mt); |
64591e86 | 1316 | mmap_write_unlock(mm); |
4f74d2c8 | 1317 | vm_unacct_memory(nr_accounted); |
1da177e4 LT |
1318 | } |
1319 | ||
119f657c | 1320 | /* |
1321 | * Return true if the calling process may expand its vm space by the passed | |
1322 | * number of pages | |
1323 | */ | |
84638335 | 1324 | bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) |
119f657c | 1325 | { |
84638335 KK |
1326 | if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) |
1327 | return false; | |
119f657c | 1328 | |
d977d56c KK |
1329 | if (is_data_mapping(flags) && |
1330 | mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { | |
f4fcd558 KK |
1331 | /* Workaround for Valgrind */ |
1332 | if (rlimit(RLIMIT_DATA) == 0 && | |
1333 | mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) | |
1334 | return true; | |
57a7702b DW |
1335 | |
1336 | pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n", | |
1337 | current->comm, current->pid, | |
1338 | (mm->data_vm + npages) << PAGE_SHIFT, | |
1339 | rlimit(RLIMIT_DATA), | |
1340 | ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data"); | |
1341 | ||
1342 | if (!ignore_rlimit_data) | |
d977d56c KK |
1343 | return false; |
1344 | } | |
119f657c | 1345 | |
84638335 KK |
1346 | return true; |
1347 | } | |
1348 | ||
1349 | void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) | |
1350 | { | |
7866076b | 1351 | WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages); |
84638335 | 1352 | |
d977d56c | 1353 | if (is_exec_mapping(flags)) |
84638335 | 1354 | mm->exec_vm += npages; |
d977d56c | 1355 | else if (is_stack_mapping(flags)) |
84638335 | 1356 | mm->stack_vm += npages; |
d977d56c | 1357 | else if (is_data_mapping(flags)) |
84638335 | 1358 | mm->data_vm += npages; |
119f657c | 1359 | } |
fa5dc22f | 1360 | |
b3ec9f33 | 1361 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf); |
a62c34bd AL |
1362 | |
1363 | /* | |
223febc6 ME |
1364 | * Close hook, called for unmap() and on the old vma for mremap(). |
1365 | * | |
a62c34bd AL |
1366 | * Having a close hook prevents vma merging regardless of flags. |
1367 | */ | |
1368 | static void special_mapping_close(struct vm_area_struct *vma) | |
1369 | { | |
223febc6 ME |
1370 | const struct vm_special_mapping *sm = vma->vm_private_data; |
1371 | ||
1372 | if (sm->close) | |
1373 | sm->close(sm, vma); | |
a62c34bd AL |
1374 | } |
1375 | ||
1376 | static const char *special_mapping_name(struct vm_area_struct *vma) | |
1377 | { | |
1378 | return ((struct vm_special_mapping *)vma->vm_private_data)->name; | |
1379 | } | |
1380 | ||
14d07113 | 1381 | static int special_mapping_mremap(struct vm_area_struct *new_vma) |
b059a453 DS |
1382 | { |
1383 | struct vm_special_mapping *sm = new_vma->vm_private_data; | |
1384 | ||
280e87e9 DS |
1385 | if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) |
1386 | return -EFAULT; | |
1387 | ||
b059a453 DS |
1388 | if (sm->mremap) |
1389 | return sm->mremap(sm, new_vma); | |
280e87e9 | 1390 | |
b059a453 DS |
1391 | return 0; |
1392 | } | |
1393 | ||
871402e0 DS |
1394 | static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) |
1395 | { | |
1396 | /* | |
1397 | * Forbid splitting special mappings - kernel has expectations over | |
1398 | * the number of pages in mapping. Together with VM_DONTEXPAND | |
1399 | * the size of vma should stay the same over the special mapping's | |
1400 | * lifetime. | |
1401 | */ | |
1402 | return -EINVAL; | |
1403 | } | |
1404 | ||
a62c34bd AL |
1405 | static const struct vm_operations_struct special_mapping_vmops = { |
1406 | .close = special_mapping_close, | |
1407 | .fault = special_mapping_fault, | |
b059a453 | 1408 | .mremap = special_mapping_mremap, |
a62c34bd | 1409 | .name = special_mapping_name, |
af34ebeb DS |
1410 | /* vDSO code relies that VVAR can't be accessed remotely */ |
1411 | .access = NULL, | |
871402e0 | 1412 | .may_split = special_mapping_split, |
a62c34bd AL |
1413 | }; |
1414 | ||
b3ec9f33 | 1415 | static vm_fault_t special_mapping_fault(struct vm_fault *vmf) |
fa5dc22f | 1416 | { |
11bac800 | 1417 | struct vm_area_struct *vma = vmf->vma; |
b1d0e4f5 | 1418 | pgoff_t pgoff; |
fa5dc22f | 1419 | struct page **pages; |
497258df | 1420 | struct vm_special_mapping *sm = vma->vm_private_data; |
fa5dc22f | 1421 | |
497258df LT |
1422 | if (sm->fault) |
1423 | return sm->fault(sm, vmf->vma, vmf); | |
f872f540 | 1424 | |
497258df | 1425 | pages = sm->pages; |
a62c34bd | 1426 | |
8a9cc3b5 | 1427 | for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) |
b1d0e4f5 | 1428 | pgoff--; |
fa5dc22f RM |
1429 | |
1430 | if (*pages) { | |
1431 | struct page *page = *pages; | |
1432 | get_page(page); | |
b1d0e4f5 NP |
1433 | vmf->page = page; |
1434 | return 0; | |
fa5dc22f RM |
1435 | } |
1436 | ||
b1d0e4f5 | 1437 | return VM_FAULT_SIGBUS; |
fa5dc22f RM |
1438 | } |
1439 | ||
a62c34bd AL |
1440 | static struct vm_area_struct *__install_special_mapping( |
1441 | struct mm_struct *mm, | |
1442 | unsigned long addr, unsigned long len, | |
bfbe7110 | 1443 | vm_flags_t vm_flags, void *priv, |
27f28b97 | 1444 | const struct vm_operations_struct *ops) |
fa5dc22f | 1445 | { |
462e635e | 1446 | int ret; |
fa5dc22f RM |
1447 | struct vm_area_struct *vma; |
1448 | ||
490fc053 | 1449 | vma = vm_area_alloc(mm); |
fa5dc22f | 1450 | if (unlikely(vma == NULL)) |
3935ed6a | 1451 | return ERR_PTR(-ENOMEM); |
fa5dc22f | 1452 | |
412c6ef9 | 1453 | vma_set_range(vma, addr, addr + len, 0); |
e430a95a SB |
1454 | vm_flags_init(vma, (vm_flags | mm->def_flags | |
1455 | VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK); | |
3ed75eb8 | 1456 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
fa5dc22f | 1457 | |
a62c34bd AL |
1458 | vma->vm_ops = ops; |
1459 | vma->vm_private_data = priv; | |
fa5dc22f | 1460 | |
462e635e TO |
1461 | ret = insert_vm_struct(mm, vma); |
1462 | if (ret) | |
1463 | goto out; | |
fa5dc22f | 1464 | |
84638335 | 1465 | vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); |
fa5dc22f | 1466 | |
cdd6c482 | 1467 | perf_event_mmap(vma); |
089dd79d | 1468 | |
3935ed6a | 1469 | return vma; |
462e635e TO |
1470 | |
1471 | out: | |
3928d4f5 | 1472 | vm_area_free(vma); |
3935ed6a SS |
1473 | return ERR_PTR(ret); |
1474 | } | |
1475 | ||
2eefd878 DS |
1476 | bool vma_is_special_mapping(const struct vm_area_struct *vma, |
1477 | const struct vm_special_mapping *sm) | |
1478 | { | |
1479 | return vma->vm_private_data == sm && | |
497258df | 1480 | vma->vm_ops == &special_mapping_vmops; |
2eefd878 DS |
1481 | } |
1482 | ||
a62c34bd | 1483 | /* |
c1e8d7c6 | 1484 | * Called with mm->mmap_lock held for writing. |
a62c34bd AL |
1485 | * Insert a new vma covering the given region, with the given flags. |
1486 | * Its pages are supplied by the given array of struct page *. | |
1487 | * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. | |
1488 | * The region past the last page supplied will always produce SIGBUS. | |
1489 | * The array pointer and the pages it points to are assumed to stay alive | |
1490 | * for as long as this mapping might exist. | |
1491 | */ | |
1492 | struct vm_area_struct *_install_special_mapping( | |
1493 | struct mm_struct *mm, | |
1494 | unsigned long addr, unsigned long len, | |
bfbe7110 | 1495 | vm_flags_t vm_flags, const struct vm_special_mapping *spec) |
a62c34bd | 1496 | { |
27f28b97 CG |
1497 | return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec, |
1498 | &special_mapping_vmops); | |
a62c34bd AL |
1499 | } |
1500 | ||
aacdde72 KY |
1501 | #ifdef CONFIG_SYSCTL |
1502 | #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ | |
1503 | defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) | |
1504 | int sysctl_legacy_va_layout; | |
1505 | #endif | |
1506 | ||
1507 | static const struct ctl_table mmap_table[] = { | |
1508 | { | |
1509 | .procname = "max_map_count", | |
1510 | .data = &sysctl_max_map_count, | |
1511 | .maxlen = sizeof(sysctl_max_map_count), | |
1512 | .mode = 0644, | |
1513 | .proc_handler = proc_dointvec_minmax, | |
1514 | .extra1 = SYSCTL_ZERO, | |
1515 | }, | |
1516 | #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ | |
1517 | defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) | |
1518 | { | |
1519 | .procname = "legacy_va_layout", | |
1520 | .data = &sysctl_legacy_va_layout, | |
1521 | .maxlen = sizeof(sysctl_legacy_va_layout), | |
1522 | .mode = 0644, | |
1523 | .proc_handler = proc_dointvec_minmax, | |
1524 | .extra1 = SYSCTL_ZERO, | |
1525 | }, | |
1526 | #endif | |
1527 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS | |
1528 | { | |
1529 | .procname = "mmap_rnd_bits", | |
1530 | .data = &mmap_rnd_bits, | |
1531 | .maxlen = sizeof(mmap_rnd_bits), | |
1532 | .mode = 0600, | |
1533 | .proc_handler = proc_dointvec_minmax, | |
1534 | .extra1 = (void *)&mmap_rnd_bits_min, | |
1535 | .extra2 = (void *)&mmap_rnd_bits_max, | |
1536 | }, | |
1537 | #endif | |
1538 | #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | |
1539 | { | |
1540 | .procname = "mmap_rnd_compat_bits", | |
1541 | .data = &mmap_rnd_compat_bits, | |
1542 | .maxlen = sizeof(mmap_rnd_compat_bits), | |
1543 | .mode = 0600, | |
1544 | .proc_handler = proc_dointvec_minmax, | |
1545 | .extra1 = (void *)&mmap_rnd_compat_bits_min, | |
1546 | .extra2 = (void *)&mmap_rnd_compat_bits_max, | |
1547 | }, | |
1548 | #endif | |
1549 | }; | |
1550 | #endif /* CONFIG_SYSCTL */ | |
1551 | ||
8feae131 | 1552 | /* |
3e43e260 | 1553 | * initialise the percpu counter for VM, initialise VMA state. |
8feae131 DH |
1554 | */ |
1555 | void __init mmap_init(void) | |
1556 | { | |
00a62ce9 KM |
1557 | int ret; |
1558 | ||
908c7f19 | 1559 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
00a62ce9 | 1560 | VM_BUG_ON(ret); |
aacdde72 KY |
1561 | #ifdef CONFIG_SYSCTL |
1562 | register_sysctl_init("vm", mmap_table); | |
1563 | #endif | |
3e43e260 | 1564 | vma_state_init(); |
8feae131 | 1565 | } |
c9b1d098 AS |
1566 | |
1567 | /* | |
1568 | * Initialise sysctl_user_reserve_kbytes. | |
1569 | * | |
1570 | * This is intended to prevent a user from starting a single memory hogging | |
1571 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER | |
1572 | * mode. | |
1573 | * | |
1574 | * The default value is min(3% of free memory, 128MB) | |
1575 | * 128MB is enough to recover with sshd/login, bash, and top/kill. | |
1576 | */ | |
1640879a | 1577 | static int init_user_reserve(void) |
c9b1d098 AS |
1578 | { |
1579 | unsigned long free_kbytes; | |
1580 | ||
b1773e0e | 1581 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
c9b1d098 | 1582 | |
9c793854 | 1583 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K); |
c9b1d098 AS |
1584 | return 0; |
1585 | } | |
a64fb3cd | 1586 | subsys_initcall(init_user_reserve); |
4eeab4f5 AS |
1587 | |
1588 | /* | |
1589 | * Initialise sysctl_admin_reserve_kbytes. | |
1590 | * | |
1591 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin | |
1592 | * to log in and kill a memory hogging process. | |
1593 | * | |
1594 | * Systems with more than 256MB will reserve 8MB, enough to recover | |
1595 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will | |
1596 | * only reserve 3% of free pages by default. | |
1597 | */ | |
1640879a | 1598 | static int init_admin_reserve(void) |
4eeab4f5 AS |
1599 | { |
1600 | unsigned long free_kbytes; | |
1601 | ||
b1773e0e | 1602 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
4eeab4f5 | 1603 | |
9c793854 | 1604 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K); |
4eeab4f5 AS |
1605 | return 0; |
1606 | } | |
a64fb3cd | 1607 | subsys_initcall(init_admin_reserve); |
1640879a AS |
1608 | |
1609 | /* | |
1610 | * Reinititalise user and admin reserves if memory is added or removed. | |
1611 | * | |
1612 | * The default user reserve max is 128MB, and the default max for the | |
1613 | * admin reserve is 8MB. These are usually, but not always, enough to | |
1614 | * enable recovery from a memory hogging process using login/sshd, a shell, | |
1615 | * and tools like top. It may make sense to increase or even disable the | |
1616 | * reserve depending on the existence of swap or variations in the recovery | |
1617 | * tools. So, the admin may have changed them. | |
1618 | * | |
1619 | * If memory is added and the reserves have been eliminated or increased above | |
1620 | * the default max, then we'll trust the admin. | |
1621 | * | |
1622 | * If memory is removed and there isn't enough free memory, then we | |
1623 | * need to reset the reserves. | |
1624 | * | |
1625 | * Otherwise keep the reserve set by the admin. | |
1626 | */ | |
1627 | static int reserve_mem_notifier(struct notifier_block *nb, | |
1628 | unsigned long action, void *data) | |
1629 | { | |
1630 | unsigned long tmp, free_kbytes; | |
1631 | ||
1632 | switch (action) { | |
1633 | case MEM_ONLINE: | |
1634 | /* Default max is 128MB. Leave alone if modified by operator. */ | |
1635 | tmp = sysctl_user_reserve_kbytes; | |
9c793854 | 1636 | if (tmp > 0 && tmp < SZ_128K) |
1640879a AS |
1637 | init_user_reserve(); |
1638 | ||
1639 | /* Default max is 8MB. Leave alone if modified by operator. */ | |
1640 | tmp = sysctl_admin_reserve_kbytes; | |
9c793854 | 1641 | if (tmp > 0 && tmp < SZ_8K) |
1640879a AS |
1642 | init_admin_reserve(); |
1643 | ||
1644 | break; | |
1645 | case MEM_OFFLINE: | |
b1773e0e | 1646 | free_kbytes = K(global_zone_page_state(NR_FREE_PAGES)); |
1640879a AS |
1647 | |
1648 | if (sysctl_user_reserve_kbytes > free_kbytes) { | |
1649 | init_user_reserve(); | |
1650 | pr_info("vm.user_reserve_kbytes reset to %lu\n", | |
1651 | sysctl_user_reserve_kbytes); | |
1652 | } | |
1653 | ||
1654 | if (sysctl_admin_reserve_kbytes > free_kbytes) { | |
1655 | init_admin_reserve(); | |
1656 | pr_info("vm.admin_reserve_kbytes reset to %lu\n", | |
1657 | sysctl_admin_reserve_kbytes); | |
1658 | } | |
1659 | break; | |
1660 | default: | |
1661 | break; | |
1662 | } | |
1663 | return NOTIFY_OK; | |
1664 | } | |
1665 | ||
1640879a AS |
1666 | static int __meminit init_reserve_notifier(void) |
1667 | { | |
1eeaa4fd | 1668 | if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI)) |
b1de0d13 | 1669 | pr_err("Failed registering memory add/remove notifier for admin reserve\n"); |
1640879a AS |
1670 | |
1671 | return 0; | |
1672 | } | |
a64fb3cd | 1673 | subsys_initcall(init_reserve_notifier); |
d61f0d59 | 1674 | |
7a571499 LS |
1675 | /* |
1676 | * Obtain a read lock on mm->mmap_lock, if the specified address is below the | |
1677 | * start of the VMA, the intent is to perform a write, and it is a | |
1678 | * downward-growing stack, then attempt to expand the stack to contain it. | |
1679 | * | |
1680 | * This function is intended only for obtaining an argument page from an ELF | |
1681 | * image, and is almost certainly NOT what you want to use for any other | |
1682 | * purpose. | |
1683 | * | |
1684 | * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the | |
1685 | * VMA referenced must not be linked in any user-visible tree, i.e. it must be a | |
1686 | * new VMA being mapped. | |
1687 | * | |
1688 | * The function assumes that addr is either contained within the VMA or below | |
1689 | * it, and makes no attempt to validate this value beyond that. | |
1690 | * | |
1691 | * Returns true if the read lock was obtained and a stack was perhaps expanded, | |
1692 | * false if the stack expansion failed. | |
1693 | * | |
1694 | * On stack expansion the function temporarily acquires an mmap write lock | |
1695 | * before downgrading it. | |
1696 | */ | |
1697 | bool mmap_read_lock_maybe_expand(struct mm_struct *mm, | |
1698 | struct vm_area_struct *new_vma, | |
1699 | unsigned long addr, bool write) | |
1700 | { | |
1701 | if (!write || addr >= new_vma->vm_start) { | |
1702 | mmap_read_lock(mm); | |
1703 | return true; | |
1704 | } | |
1705 | ||
1706 | if (!(new_vma->vm_flags & VM_GROWSDOWN)) | |
1707 | return false; | |
1708 | ||
1709 | mmap_write_lock(mm); | |
1710 | if (expand_downwards(new_vma, addr)) { | |
1711 | mmap_write_unlock(mm); | |
1712 | return false; | |
1713 | } | |
1714 | ||
1715 | mmap_write_downgrade(mm); | |
1716 | return true; | |
1717 | } | |
26a8f577 LS |
1718 | |
1719 | __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |
7a571499 | 1720 | { |
26a8f577 LS |
1721 | struct vm_area_struct *mpnt, *tmp; |
1722 | int retval; | |
1723 | unsigned long charge = 0; | |
1724 | LIST_HEAD(uf); | |
1725 | VMA_ITERATOR(vmi, mm, 0); | |
1726 | ||
1727 | if (mmap_write_lock_killable(oldmm)) | |
1728 | return -EINTR; | |
1729 | flush_cache_dup_mm(oldmm); | |
1730 | uprobe_dup_mmap(oldmm, mm); | |
1731 | /* | |
1732 | * Not linked in yet - no deadlock potential: | |
1733 | */ | |
1734 | mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); | |
1735 | ||
1736 | /* No ordering required: file already has been exposed. */ | |
1737 | dup_mm_exe_file(mm, oldmm); | |
1738 | ||
1739 | mm->total_vm = oldmm->total_vm; | |
1740 | mm->data_vm = oldmm->data_vm; | |
1741 | mm->exec_vm = oldmm->exec_vm; | |
1742 | mm->stack_vm = oldmm->stack_vm; | |
1743 | ||
1744 | /* Use __mt_dup() to efficiently build an identical maple tree. */ | |
1745 | retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL); | |
1746 | if (unlikely(retval)) | |
1747 | goto out; | |
1748 | ||
1749 | mt_clear_in_rcu(vmi.mas.tree); | |
1750 | for_each_vma(vmi, mpnt) { | |
1751 | struct file *file; | |
1752 | ||
1753 | vma_start_write(mpnt); | |
1754 | if (mpnt->vm_flags & VM_DONTCOPY) { | |
1755 | retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, | |
1756 | mpnt->vm_end, GFP_KERNEL); | |
1757 | if (retval) | |
1758 | goto loop_out; | |
1759 | ||
1760 | vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); | |
1761 | continue; | |
1762 | } | |
1763 | charge = 0; | |
1764 | /* | |
1765 | * Don't duplicate many vmas if we've been oom-killed (for | |
1766 | * example) | |
1767 | */ | |
1768 | if (fatal_signal_pending(current)) { | |
1769 | retval = -EINTR; | |
1770 | goto loop_out; | |
1771 | } | |
1772 | if (mpnt->vm_flags & VM_ACCOUNT) { | |
1773 | unsigned long len = vma_pages(mpnt); | |
1774 | ||
1775 | if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ | |
1776 | goto fail_nomem; | |
1777 | charge = len; | |
1778 | } | |
1779 | ||
1780 | tmp = vm_area_dup(mpnt); | |
1781 | if (!tmp) | |
1782 | goto fail_nomem; | |
26a8f577 LS |
1783 | retval = vma_dup_policy(mpnt, tmp); |
1784 | if (retval) | |
1785 | goto fail_nomem_policy; | |
1786 | tmp->vm_mm = mm; | |
1787 | retval = dup_userfaultfd(tmp, &uf); | |
1788 | if (retval) | |
1789 | goto fail_nomem_anon_vma_fork; | |
1790 | if (tmp->vm_flags & VM_WIPEONFORK) { | |
1791 | /* | |
1792 | * VM_WIPEONFORK gets a clean slate in the child. | |
1793 | * Don't prepare anon_vma until fault since we don't | |
1794 | * copy page for current vma. | |
1795 | */ | |
1796 | tmp->anon_vma = NULL; | |
1797 | } else if (anon_vma_fork(tmp, mpnt)) | |
1798 | goto fail_nomem_anon_vma_fork; | |
1799 | vm_flags_clear(tmp, VM_LOCKED_MASK); | |
1800 | /* | |
1801 | * Copy/update hugetlb private vma information. | |
1802 | */ | |
1803 | if (is_vm_hugetlb_page(tmp)) | |
1804 | hugetlb_dup_vma_private(tmp); | |
1805 | ||
1806 | /* | |
1807 | * Link the vma into the MT. After using __mt_dup(), memory | |
1808 | * allocation is not necessary here, so it cannot fail. | |
1809 | */ | |
1810 | vma_iter_bulk_store(&vmi, tmp); | |
1811 | ||
1812 | mm->map_count++; | |
1813 | ||
1814 | if (tmp->vm_ops && tmp->vm_ops->open) | |
1815 | tmp->vm_ops->open(tmp); | |
1816 | ||
1817 | file = tmp->vm_file; | |
1818 | if (file) { | |
1819 | struct address_space *mapping = file->f_mapping; | |
1820 | ||
1821 | get_file(file); | |
1822 | i_mmap_lock_write(mapping); | |
1823 | if (vma_is_shared_maywrite(tmp)) | |
1824 | mapping_allow_writable(mapping); | |
1825 | flush_dcache_mmap_lock(mapping); | |
1826 | /* insert tmp into the share list, just after mpnt */ | |
1827 | vma_interval_tree_insert_after(tmp, mpnt, | |
1828 | &mapping->i_mmap); | |
1829 | flush_dcache_mmap_unlock(mapping); | |
1830 | i_mmap_unlock_write(mapping); | |
1831 | } | |
1832 | ||
1833 | if (!(tmp->vm_flags & VM_WIPEONFORK)) | |
1834 | retval = copy_page_range(tmp, mpnt); | |
1835 | ||
1836 | if (retval) { | |
1837 | mpnt = vma_next(&vmi); | |
1838 | goto loop_out; | |
1839 | } | |
1840 | } | |
1841 | /* a new mm has just been created */ | |
1842 | retval = arch_dup_mmap(oldmm, mm); | |
1843 | loop_out: | |
1844 | vma_iter_free(&vmi); | |
1845 | if (!retval) { | |
1846 | mt_set_in_rcu(vmi.mas.tree); | |
1847 | ksm_fork(mm, oldmm); | |
1848 | khugepaged_fork(mm, oldmm); | |
1849 | } else { | |
1850 | ||
1851 | /* | |
1852 | * The entire maple tree has already been duplicated. If the | |
1853 | * mmap duplication fails, mark the failure point with | |
1854 | * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, | |
1855 | * stop releasing VMAs that have not been duplicated after this | |
1856 | * point. | |
1857 | */ | |
1858 | if (mpnt) { | |
1859 | mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); | |
1860 | mas_store(&vmi.mas, XA_ZERO_ENTRY); | |
1861 | /* Avoid OOM iterating a broken tree */ | |
1862 | set_bit(MMF_OOM_SKIP, &mm->flags); | |
1863 | } | |
1864 | /* | |
1865 | * The mm_struct is going to exit, but the locks will be dropped | |
1866 | * first. Set the mm_struct as unstable is advisable as it is | |
1867 | * not fully initialised. | |
1868 | */ | |
1869 | set_bit(MMF_UNSTABLE, &mm->flags); | |
1870 | } | |
1871 | out: | |
1872 | mmap_write_unlock(mm); | |
1873 | flush_tlb_mm(oldmm); | |
1874 | mmap_write_unlock(oldmm); | |
1875 | if (!retval) | |
1876 | dup_userfaultfd_complete(&uf); | |
1877 | else | |
1878 | dup_userfaultfd_fail(&uf); | |
1879 | return retval; | |
1880 | ||
1881 | fail_nomem_anon_vma_fork: | |
1882 | mpol_put(vma_policy(tmp)); | |
1883 | fail_nomem_policy: | |
1884 | vm_area_free(tmp); | |
1885 | fail_nomem: | |
1886 | retval = -ENOMEM; | |
1887 | vm_unacct_memory(charge); | |
1888 | goto loop_out; | |
7a571499 | 1889 | } |