sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[linux-block.git] / mm / util.c
CommitLineData
16d69265 1#include <linux/mm.h>
30992c97
MM
2#include <linux/slab.h>
3#include <linux/string.h>
3b32123d 4#include <linux/compiler.h>
b95f1b31 5#include <linux/export.h>
96840aa0 6#include <linux/err.h>
3b8f14b4 7#include <linux/sched.h>
6e84f315 8#include <linux/sched/mm.h>
eb36c587 9#include <linux/security.h>
9800339b 10#include <linux/swap.h>
33806f06 11#include <linux/swapops.h>
00619bcc
JM
12#include <linux/mman.h>
13#include <linux/hugetlb.h>
39f1f78d 14#include <linux/vmalloc.h>
897ab3e0 15#include <linux/userfaultfd_k.h>
00619bcc 16
a4bb1e43 17#include <asm/sections.h>
7c0f6ba6 18#include <linux/uaccess.h>
30992c97 19
6038def0
NK
20#include "internal.h"
21
a4bb1e43
AH
22static inline int is_kernel_rodata(unsigned long addr)
23{
24 return addr >= (unsigned long)__start_rodata &&
25 addr < (unsigned long)__end_rodata;
26}
27
28/**
29 * kfree_const - conditionally free memory
30 * @x: pointer to the memory
31 *
32 * Function calls kfree only if @x is not in .rodata section.
33 */
34void kfree_const(const void *x)
35{
36 if (!is_kernel_rodata((unsigned long)x))
37 kfree(x);
38}
39EXPORT_SYMBOL(kfree_const);
40
30992c97 41/**
30992c97 42 * kstrdup - allocate space for and copy an existing string
30992c97
MM
43 * @s: the string to duplicate
44 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
45 */
46char *kstrdup(const char *s, gfp_t gfp)
47{
48 size_t len;
49 char *buf;
50
51 if (!s)
52 return NULL;
53
54 len = strlen(s) + 1;
1d2c8eea 55 buf = kmalloc_track_caller(len, gfp);
30992c97
MM
56 if (buf)
57 memcpy(buf, s, len);
58 return buf;
59}
60EXPORT_SYMBOL(kstrdup);
96840aa0 61
a4bb1e43
AH
62/**
63 * kstrdup_const - conditionally duplicate an existing const string
64 * @s: the string to duplicate
65 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
66 *
67 * Function returns source string if it is in .rodata section otherwise it
68 * fallbacks to kstrdup.
69 * Strings allocated by kstrdup_const should be freed by kfree_const.
70 */
71const char *kstrdup_const(const char *s, gfp_t gfp)
72{
73 if (is_kernel_rodata((unsigned long)s))
74 return s;
75
76 return kstrdup(s, gfp);
77}
78EXPORT_SYMBOL(kstrdup_const);
79
1e66df3e
JF
80/**
81 * kstrndup - allocate space for and copy an existing string
82 * @s: the string to duplicate
83 * @max: read at most @max chars from @s
84 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
85 */
86char *kstrndup(const char *s, size_t max, gfp_t gfp)
87{
88 size_t len;
89 char *buf;
90
91 if (!s)
92 return NULL;
93
94 len = strnlen(s, max);
95 buf = kmalloc_track_caller(len+1, gfp);
96 if (buf) {
97 memcpy(buf, s, len);
98 buf[len] = '\0';
99 }
100 return buf;
101}
102EXPORT_SYMBOL(kstrndup);
103
1a2f67b4
AD
104/**
105 * kmemdup - duplicate region of memory
106 *
107 * @src: memory region to duplicate
108 * @len: memory region length
109 * @gfp: GFP mask to use
110 */
111void *kmemdup(const void *src, size_t len, gfp_t gfp)
112{
113 void *p;
114
1d2c8eea 115 p = kmalloc_track_caller(len, gfp);
1a2f67b4
AD
116 if (p)
117 memcpy(p, src, len);
118 return p;
119}
120EXPORT_SYMBOL(kmemdup);
121
610a77e0
LZ
122/**
123 * memdup_user - duplicate memory region from user space
124 *
125 * @src: source address in user space
126 * @len: number of bytes to copy
127 *
128 * Returns an ERR_PTR() on failure.
129 */
130void *memdup_user(const void __user *src, size_t len)
131{
132 void *p;
133
134 /*
135 * Always use GFP_KERNEL, since copy_from_user() can sleep and
136 * cause pagefault, which makes it pointless to use GFP_NOFS
137 * or GFP_ATOMIC.
138 */
139 p = kmalloc_track_caller(len, GFP_KERNEL);
140 if (!p)
141 return ERR_PTR(-ENOMEM);
142
143 if (copy_from_user(p, src, len)) {
144 kfree(p);
145 return ERR_PTR(-EFAULT);
146 }
147
148 return p;
149}
150EXPORT_SYMBOL(memdup_user);
151
96840aa0
DA
152/*
153 * strndup_user - duplicate an existing string from user space
96840aa0
DA
154 * @s: The string to duplicate
155 * @n: Maximum number of bytes to copy, including the trailing NUL.
156 */
157char *strndup_user(const char __user *s, long n)
158{
159 char *p;
160 long length;
161
162 length = strnlen_user(s, n);
163
164 if (!length)
165 return ERR_PTR(-EFAULT);
166
167 if (length > n)
168 return ERR_PTR(-EINVAL);
169
90d74045 170 p = memdup_user(s, length);
96840aa0 171
90d74045
JL
172 if (IS_ERR(p))
173 return p;
96840aa0
DA
174
175 p[length - 1] = '\0';
176
177 return p;
178}
179EXPORT_SYMBOL(strndup_user);
16d69265 180
e9d408e1
AV
181/**
182 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
183 *
184 * @src: source address in user space
185 * @len: number of bytes to copy
186 *
187 * Returns an ERR_PTR() on failure.
188 */
189void *memdup_user_nul(const void __user *src, size_t len)
190{
191 char *p;
192
193 /*
194 * Always use GFP_KERNEL, since copy_from_user() can sleep and
195 * cause pagefault, which makes it pointless to use GFP_NOFS
196 * or GFP_ATOMIC.
197 */
198 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
199 if (!p)
200 return ERR_PTR(-ENOMEM);
201
202 if (copy_from_user(p, src, len)) {
203 kfree(p);
204 return ERR_PTR(-EFAULT);
205 }
206 p[len] = '\0';
207
208 return p;
209}
210EXPORT_SYMBOL(memdup_user_nul);
211
6038def0
NK
212void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
213 struct vm_area_struct *prev, struct rb_node *rb_parent)
214{
215 struct vm_area_struct *next;
216
217 vma->vm_prev = prev;
218 if (prev) {
219 next = prev->vm_next;
220 prev->vm_next = vma;
221 } else {
222 mm->mmap = vma;
223 if (rb_parent)
224 next = rb_entry(rb_parent,
225 struct vm_area_struct, vm_rb);
226 else
227 next = NULL;
228 }
229 vma->vm_next = next;
230 if (next)
231 next->vm_prev = vma;
232}
233
b7643757 234/* Check if the vma is being used as a stack by this task */
d17af505 235int vma_is_stack_for_current(struct vm_area_struct *vma)
b7643757 236{
d17af505
AL
237 struct task_struct * __maybe_unused t = current;
238
b7643757
SP
239 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
240}
241
efc1a3b1 242#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
16d69265
AM
243void arch_pick_mmap_layout(struct mm_struct *mm)
244{
245 mm->mmap_base = TASK_UNMAPPED_BASE;
246 mm->get_unmapped_area = arch_get_unmapped_area;
16d69265
AM
247}
248#endif
912985dc 249
45888a0c
XG
250/*
251 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
252 * back to the regular GUP.
25985edc 253 * If the architecture not support this function, simply return with no
45888a0c
XG
254 * page pinned
255 */
3b32123d 256int __weak __get_user_pages_fast(unsigned long start,
45888a0c
XG
257 int nr_pages, int write, struct page **pages)
258{
259 return 0;
260}
261EXPORT_SYMBOL_GPL(__get_user_pages_fast);
262
9de100d0
AG
263/**
264 * get_user_pages_fast() - pin user pages in memory
265 * @start: starting user address
266 * @nr_pages: number of pages from start to pin
267 * @write: whether pages will be written to
268 * @pages: array that receives pointers to the pages pinned.
269 * Should be at least nr_pages long.
270 *
9de100d0
AG
271 * Returns number of pages pinned. This may be fewer than the number
272 * requested. If nr_pages is 0 or negative, returns 0. If no pages
273 * were pinned, returns -errno.
d2bf6be8
NP
274 *
275 * get_user_pages_fast provides equivalent functionality to get_user_pages,
276 * operating on current and current->mm, with force=0 and vma=NULL. However
277 * unlike get_user_pages, it must be called without mmap_sem held.
278 *
279 * get_user_pages_fast may take mmap_sem and page table locks, so no
280 * assumptions can be made about lack of locking. get_user_pages_fast is to be
281 * implemented in a way that is advantageous (vs get_user_pages()) when the
282 * user memory area is already faulted in and present in ptes. However if the
283 * pages have to be faulted in, it may turn out to be slightly slower so
284 * callers need to carefully consider what to use. On many architectures,
285 * get_user_pages_fast simply falls back to get_user_pages.
9de100d0 286 */
3b32123d 287int __weak get_user_pages_fast(unsigned long start,
912985dc
RR
288 int nr_pages, int write, struct page **pages)
289{
c164154f
LS
290 return get_user_pages_unlocked(start, nr_pages, pages,
291 write ? FOLL_WRITE : 0);
912985dc
RR
292}
293EXPORT_SYMBOL_GPL(get_user_pages_fast);
ca2b84cb 294
eb36c587
AV
295unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
296 unsigned long len, unsigned long prot,
9fbeb5ab 297 unsigned long flag, unsigned long pgoff)
eb36c587
AV
298{
299 unsigned long ret;
300 struct mm_struct *mm = current->mm;
41badc15 301 unsigned long populate;
897ab3e0 302 LIST_HEAD(uf);
eb36c587
AV
303
304 ret = security_mmap_file(file, prot, flag);
305 if (!ret) {
9fbeb5ab
MH
306 if (down_write_killable(&mm->mmap_sem))
307 return -EINTR;
bebeb3d6 308 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
897ab3e0 309 &populate, &uf);
eb36c587 310 up_write(&mm->mmap_sem);
897ab3e0 311 userfaultfd_unmap_complete(mm, &uf);
41badc15
ML
312 if (populate)
313 mm_populate(ret, populate);
eb36c587
AV
314 }
315 return ret;
316}
317
318unsigned long vm_mmap(struct file *file, unsigned long addr,
319 unsigned long len, unsigned long prot,
320 unsigned long flag, unsigned long offset)
321{
322 if (unlikely(offset + PAGE_ALIGN(len) < offset))
323 return -EINVAL;
ea53cde0 324 if (unlikely(offset_in_page(offset)))
eb36c587
AV
325 return -EINVAL;
326
9fbeb5ab 327 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
eb36c587
AV
328}
329EXPORT_SYMBOL(vm_mmap);
330
39f1f78d
AV
331void kvfree(const void *addr)
332{
333 if (is_vmalloc_addr(addr))
334 vfree(addr);
335 else
336 kfree(addr);
337}
338EXPORT_SYMBOL(kvfree);
339
e39155ea
KS
340static inline void *__page_rmapping(struct page *page)
341{
342 unsigned long mapping;
343
344 mapping = (unsigned long)page->mapping;
345 mapping &= ~PAGE_MAPPING_FLAGS;
346
347 return (void *)mapping;
348}
349
350/* Neutral page->mapping pointer to address_space or anon_vma or other */
351void *page_rmapping(struct page *page)
352{
353 page = compound_head(page);
354 return __page_rmapping(page);
355}
356
1aa8aea5
AM
357/*
358 * Return true if this page is mapped into pagetables.
359 * For compound page it returns true if any subpage of compound page is mapped.
360 */
361bool page_mapped(struct page *page)
362{
363 int i;
364
365 if (likely(!PageCompound(page)))
366 return atomic_read(&page->_mapcount) >= 0;
367 page = compound_head(page);
368 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
369 return true;
370 if (PageHuge(page))
371 return false;
372 for (i = 0; i < hpage_nr_pages(page); i++) {
373 if (atomic_read(&page[i]._mapcount) >= 0)
374 return true;
375 }
376 return false;
377}
378EXPORT_SYMBOL(page_mapped);
379
e39155ea
KS
380struct anon_vma *page_anon_vma(struct page *page)
381{
382 unsigned long mapping;
383
384 page = compound_head(page);
385 mapping = (unsigned long)page->mapping;
386 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
387 return NULL;
388 return __page_rmapping(page);
389}
390
9800339b
SL
391struct address_space *page_mapping(struct page *page)
392{
1c290f64
KS
393 struct address_space *mapping;
394
395 page = compound_head(page);
9800339b 396
03e5ac2f
MP
397 /* This happens if someone calls flush_dcache_page on slab page */
398 if (unlikely(PageSlab(page)))
399 return NULL;
400
33806f06
SL
401 if (unlikely(PageSwapCache(page))) {
402 swp_entry_t entry;
403
404 entry.val = page_private(page);
e39155ea
KS
405 return swap_address_space(entry);
406 }
407
1c290f64 408 mapping = page->mapping;
bda807d4 409 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
e39155ea 410 return NULL;
bda807d4
MK
411
412 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
9800339b 413}
bda807d4 414EXPORT_SYMBOL(page_mapping);
9800339b 415
b20ce5e0
KS
416/* Slow path of page_mapcount() for compound pages */
417int __page_mapcount(struct page *page)
418{
419 int ret;
420
421 ret = atomic_read(&page->_mapcount) + 1;
dd78fedd
KS
422 /*
423 * For file THP page->_mapcount contains total number of mapping
424 * of the page: no need to look into compound_mapcount.
425 */
426 if (!PageAnon(page) && !PageHuge(page))
427 return ret;
b20ce5e0
KS
428 page = compound_head(page);
429 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
430 if (PageDoubleMap(page))
431 ret--;
432 return ret;
433}
434EXPORT_SYMBOL_GPL(__page_mapcount);
435
39a1aa8e
AR
436int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
437int sysctl_overcommit_ratio __read_mostly = 50;
438unsigned long sysctl_overcommit_kbytes __read_mostly;
439int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
440unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
441unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
442
49f0ce5f
JM
443int overcommit_ratio_handler(struct ctl_table *table, int write,
444 void __user *buffer, size_t *lenp,
445 loff_t *ppos)
446{
447 int ret;
448
449 ret = proc_dointvec(table, write, buffer, lenp, ppos);
450 if (ret == 0 && write)
451 sysctl_overcommit_kbytes = 0;
452 return ret;
453}
454
455int overcommit_kbytes_handler(struct ctl_table *table, int write,
456 void __user *buffer, size_t *lenp,
457 loff_t *ppos)
458{
459 int ret;
460
461 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
462 if (ret == 0 && write)
463 sysctl_overcommit_ratio = 0;
464 return ret;
465}
466
00619bcc
JM
467/*
468 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
469 */
470unsigned long vm_commit_limit(void)
471{
49f0ce5f
JM
472 unsigned long allowed;
473
474 if (sysctl_overcommit_kbytes)
475 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
476 else
477 allowed = ((totalram_pages - hugetlb_total_pages())
478 * sysctl_overcommit_ratio / 100);
479 allowed += total_swap_pages;
480
481 return allowed;
00619bcc
JM
482}
483
39a1aa8e
AR
484/*
485 * Make sure vm_committed_as in one cacheline and not cacheline shared with
486 * other variables. It can be updated by several CPUs frequently.
487 */
488struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
489
490/*
491 * The global memory commitment made in the system can be a metric
492 * that can be used to drive ballooning decisions when Linux is hosted
493 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
494 * balancing memory across competing virtual machines that are hosted.
495 * Several metrics drive this policy engine including the guest reported
496 * memory commitment.
497 */
498unsigned long vm_memory_committed(void)
499{
500 return percpu_counter_read_positive(&vm_committed_as);
501}
502EXPORT_SYMBOL_GPL(vm_memory_committed);
503
504/*
505 * Check that a process has enough memory to allocate a new virtual
506 * mapping. 0 means there is enough memory for the allocation to
507 * succeed and -ENOMEM implies there is not.
508 *
509 * We currently support three overcommit policies, which are set via the
510 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
511 *
512 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
513 * Additional code 2002 Jul 20 by Robert Love.
514 *
515 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
516 *
517 * Note this is a helper function intended to be used by LSMs which
518 * wish to use this logic.
519 */
520int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
521{
522 long free, allowed, reserve;
523
524 VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
525 -(s64)vm_committed_as_batch * num_online_cpus(),
526 "memory commitment underflow");
527
528 vm_acct_memory(pages);
529
530 /*
531 * Sometimes we want to use more memory than we have
532 */
533 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
534 return 0;
535
536 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
537 free = global_page_state(NR_FREE_PAGES);
11fb9989 538 free += global_node_page_state(NR_FILE_PAGES);
39a1aa8e
AR
539
540 /*
541 * shmem pages shouldn't be counted as free in this
542 * case, they can't be purged, only swapped out, and
543 * that won't affect the overall amount of available
544 * memory in the system.
545 */
11fb9989 546 free -= global_node_page_state(NR_SHMEM);
39a1aa8e
AR
547
548 free += get_nr_swap_pages();
549
550 /*
551 * Any slabs which are created with the
552 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
553 * which are reclaimable, under pressure. The dentry
554 * cache and most inode caches should fall into this
555 */
556 free += global_page_state(NR_SLAB_RECLAIMABLE);
557
558 /*
559 * Leave reserved pages. The pages are not for anonymous pages.
560 */
561 if (free <= totalreserve_pages)
562 goto error;
563 else
564 free -= totalreserve_pages;
565
566 /*
567 * Reserve some for root
568 */
569 if (!cap_sys_admin)
570 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
571
572 if (free > pages)
573 return 0;
574
575 goto error;
576 }
577
578 allowed = vm_commit_limit();
579 /*
580 * Reserve some for root
581 */
582 if (!cap_sys_admin)
583 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
584
585 /*
586 * Don't let a single process grow so big a user can't recover
587 */
588 if (mm) {
589 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
590 allowed -= min_t(long, mm->total_vm / 32, reserve);
591 }
592
593 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
594 return 0;
595error:
596 vm_unacct_memory(pages);
597
598 return -ENOMEM;
599}
600
a9090253
WR
601/**
602 * get_cmdline() - copy the cmdline value to a buffer.
603 * @task: the task whose cmdline value to copy.
604 * @buffer: the buffer to copy to.
605 * @buflen: the length of the buffer. Larger cmdline values are truncated
606 * to this length.
607 * Returns the size of the cmdline field copied. Note that the copy does
608 * not guarantee an ending NULL byte.
609 */
610int get_cmdline(struct task_struct *task, char *buffer, int buflen)
611{
612 int res = 0;
613 unsigned int len;
614 struct mm_struct *mm = get_task_mm(task);
a3b609ef 615 unsigned long arg_start, arg_end, env_start, env_end;
a9090253
WR
616 if (!mm)
617 goto out;
618 if (!mm->arg_end)
619 goto out_mm; /* Shh! No looking before we're done */
620
a3b609ef
MG
621 down_read(&mm->mmap_sem);
622 arg_start = mm->arg_start;
623 arg_end = mm->arg_end;
624 env_start = mm->env_start;
625 env_end = mm->env_end;
626 up_read(&mm->mmap_sem);
627
628 len = arg_end - arg_start;
a9090253
WR
629
630 if (len > buflen)
631 len = buflen;
632
f307ab6d 633 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
a9090253
WR
634
635 /*
636 * If the nul at the end of args has been overwritten, then
637 * assume application is using setproctitle(3).
638 */
639 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
640 len = strnlen(buffer, res);
641 if (len < res) {
642 res = len;
643 } else {
a3b609ef 644 len = env_end - env_start;
a9090253
WR
645 if (len > buflen - res)
646 len = buflen - res;
a3b609ef 647 res += access_process_vm(task, env_start,
f307ab6d
LS
648 buffer+res, len,
649 FOLL_FORCE);
a9090253
WR
650 res = strnlen(buffer, res);
651 }
652 }
653out_mm:
654 mmput(mm);
655out:
656 return res;
657}