doc: update documentation for swap_activate and swap_rw
[linux-block.git] / mm / madvise.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
05b74384 12#include <linux/mempolicy.h>
afcf938e 13#include <linux/page-isolation.h>
9c276cc6 14#include <linux/page_idle.h>
05ce7724 15#include <linux/userfaultfd_k.h>
1da177e4 16#include <linux/hugetlb.h>
3f31d075 17#include <linux/falloc.h>
692fe624 18#include <linux/fadvise.h>
e8edc6e0 19#include <linux/sched.h>
ecb8ac8b 20#include <linux/sched/mm.h>
17fca131 21#include <linux/mm_inline.h>
9a10064f 22#include <linux/string.h>
ecb8ac8b 23#include <linux/uio.h>
f8af4da3 24#include <linux/ksm.h>
3f31d075 25#include <linux/fs.h>
9ab4233d 26#include <linux/file.h>
1998cc04 27#include <linux/blkdev.h>
66114cad 28#include <linux/backing-dev.h>
a520110e 29#include <linux/pagewalk.h>
1998cc04
SL
30#include <linux/swap.h>
31#include <linux/swapops.h>
3a4f8a0b 32#include <linux/shmem_fs.h>
854e9ed0
MK
33#include <linux/mmu_notifier.h>
34
35#include <asm/tlb.h>
1da177e4 36
23519073 37#include "internal.h"
014bb1de 38#include "swap.h"
23519073 39
d616d512
MK
40struct madvise_walk_private {
41 struct mmu_gather *tlb;
42 bool pageout;
43};
44
0a27a14a
NP
45/*
46 * Any behaviour which results in changes to the vma->vm_flags needs to
c1e8d7c6 47 * take mmap_lock for writing. Others, which simply traverse vmas, need
0a27a14a
NP
48 * to only take it for reading.
49 */
50static int madvise_need_mmap_write(int behavior)
51{
52 switch (behavior) {
53 case MADV_REMOVE:
54 case MADV_WILLNEED:
55 case MADV_DONTNEED:
9457056a 56 case MADV_DONTNEED_LOCKED:
9c276cc6 57 case MADV_COLD:
1a4e58cc 58 case MADV_PAGEOUT:
854e9ed0 59 case MADV_FREE:
4ca9b385
DH
60 case MADV_POPULATE_READ:
61 case MADV_POPULATE_WRITE:
0a27a14a
NP
62 return 0;
63 default:
64 /* be safe, default to 1. list exceptions explicitly */
65 return 1;
66 }
67}
68
9a10064f 69#ifdef CONFIG_ANON_VMA_NAME
5c26f6ac 70struct anon_vma_name *anon_vma_name_alloc(const char *name)
78db3412
SB
71{
72 struct anon_vma_name *anon_name;
73 size_t count;
74
75 /* Add 1 for NUL terminator at the end of the anon_name->name */
76 count = strlen(name) + 1;
77 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL);
78 if (anon_name) {
79 kref_init(&anon_name->kref);
80 memcpy(anon_name->name, name, count);
81 }
82
83 return anon_name;
84}
85
5c26f6ac 86void anon_vma_name_free(struct kref *kref)
78db3412
SB
87{
88 struct anon_vma_name *anon_name =
89 container_of(kref, struct anon_vma_name, kref);
90 kfree(anon_name);
91}
92
5c26f6ac 93struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
9a10064f 94{
9a10064f
CC
95 mmap_assert_locked(vma->vm_mm);
96
5c26f6ac
SB
97 if (vma->vm_file)
98 return NULL;
9a10064f 99
5c26f6ac 100 return vma->anon_name;
9a10064f
CC
101}
102
103/* mmap_lock should be write-locked */
5c26f6ac
SB
104static int replace_anon_vma_name(struct vm_area_struct *vma,
105 struct anon_vma_name *anon_name)
9a10064f 106{
5c26f6ac 107 struct anon_vma_name *orig_name = anon_vma_name(vma);
78db3412 108
5c26f6ac
SB
109 if (!anon_name) {
110 vma->anon_name = NULL;
111 anon_vma_name_put(orig_name);
9a10064f
CC
112 return 0;
113 }
114
5c26f6ac
SB
115 if (anon_vma_name_eq(orig_name, anon_name))
116 return 0;
9a10064f 117
96403e11 118 vma->anon_name = anon_vma_name_reuse(anon_name);
5c26f6ac 119 anon_vma_name_put(orig_name);
9a10064f
CC
120
121 return 0;
122}
123#else /* CONFIG_ANON_VMA_NAME */
5c26f6ac
SB
124static int replace_anon_vma_name(struct vm_area_struct *vma,
125 struct anon_vma_name *anon_name)
9a10064f 126{
5c26f6ac 127 if (anon_name)
9a10064f
CC
128 return -EINVAL;
129
130 return 0;
131}
132#endif /* CONFIG_ANON_VMA_NAME */
1da177e4 133/*
ac1e9acc
CC
134 * Update the vm_flags on region of a vma, splitting it or merging it as
135 * necessary. Must be called with mmap_sem held for writing;
942341dc
SB
136 * Caller should ensure anon_name stability by raising its refcount even when
137 * anon_name belongs to a valid vma because this function might free that vma.
1da177e4 138 */
ac1e9acc
CC
139static int madvise_update_vma(struct vm_area_struct *vma,
140 struct vm_area_struct **prev, unsigned long start,
9a10064f 141 unsigned long end, unsigned long new_flags,
5c26f6ac 142 struct anon_vma_name *anon_name)
1da177e4 143{
ec9bed9d 144 struct mm_struct *mm = vma->vm_mm;
ac1e9acc 145 int error;
05b74384 146 pgoff_t pgoff;
e798c6e8 147
5c26f6ac 148 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
05b74384 149 *prev = vma;
ac1e9acc 150 return 0;
05b74384
PM
151 }
152
153 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
154 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
19a809af 155 vma->vm_file, pgoff, vma_policy(vma),
5c26f6ac 156 vma->vm_userfaultfd_ctx, anon_name);
05b74384
PM
157 if (*prev) {
158 vma = *prev;
159 goto success;
160 }
161
162 *prev = vma;
1da177e4
LT
163
164 if (start != vma->vm_start) {
ac1e9acc
CC
165 if (unlikely(mm->map_count >= sysctl_max_map_count))
166 return -ENOMEM;
def5efe0 167 error = __split_vma(mm, vma, start, 1);
f3bc0dba 168 if (error)
ac1e9acc 169 return error;
1da177e4
LT
170 }
171
172 if (end != vma->vm_end) {
ac1e9acc
CC
173 if (unlikely(mm->map_count >= sysctl_max_map_count))
174 return -ENOMEM;
def5efe0 175 error = __split_vma(mm, vma, end, 0);
f3bc0dba 176 if (error)
ac1e9acc 177 return error;
1da177e4
LT
178 }
179
836d5ffd 180success:
1da177e4 181 /*
c1e8d7c6 182 * vm_flags is protected by the mmap_lock held in write mode.
1da177e4 183 */
e798c6e8 184 vma->vm_flags = new_flags;
9a10064f 185 if (!vma->vm_file) {
5c26f6ac 186 error = replace_anon_vma_name(vma, anon_name);
9a10064f
CC
187 if (error)
188 return error;
189 }
f3bc0dba 190
ac1e9acc 191 return 0;
1da177e4
LT
192}
193
1998cc04
SL
194#ifdef CONFIG_SWAP
195static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
196 unsigned long end, struct mm_walk *walk)
197{
198 pte_t *orig_pte;
199 struct vm_area_struct *vma = walk->private;
200 unsigned long index;
201
202 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
203 return 0;
204
205 for (index = start; index != end; index += PAGE_SIZE) {
206 pte_t pte;
207 swp_entry_t entry;
208 struct page *page;
209 spinlock_t *ptl;
210
211 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
212 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
213 pte_unmap_unlock(orig_pte, ptl);
214
0661a336 215 if (pte_present(pte) || pte_none(pte))
1998cc04
SL
216 continue;
217 entry = pte_to_swp_entry(pte);
218 if (unlikely(non_swap_entry(entry)))
219 continue;
220
221 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
23955622 222 vma, index, false);
1998cc04 223 if (page)
09cbfeaf 224 put_page(page);
1998cc04
SL
225 }
226
227 return 0;
228}
229
7b86ac33
CH
230static const struct mm_walk_ops swapin_walk_ops = {
231 .pmd_entry = swapin_walk_pmd_entry,
232};
1998cc04
SL
233
234static void force_shm_swapin_readahead(struct vm_area_struct *vma,
235 unsigned long start, unsigned long end,
236 struct address_space *mapping)
237{
e6e88712 238 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
66383800 239 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
1998cc04 240 struct page *page;
1998cc04 241
e6e88712
MWO
242 rcu_read_lock();
243 xas_for_each(&xas, page, end_index) {
244 swp_entry_t swap;
1998cc04 245
e6e88712 246 if (!xa_is_value(page))
1998cc04 247 continue;
e6e88712
MWO
248 xas_pause(&xas);
249 rcu_read_unlock();
250
1998cc04
SL
251 swap = radix_to_swp_entry(page);
252 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
23955622 253 NULL, 0, false);
1998cc04 254 if (page)
09cbfeaf 255 put_page(page);
e6e88712
MWO
256
257 rcu_read_lock();
1998cc04 258 }
e6e88712 259 rcu_read_unlock();
1998cc04
SL
260
261 lru_add_drain(); /* Push any new pages onto the LRU now */
262}
263#endif /* CONFIG_SWAP */
264
1da177e4
LT
265/*
266 * Schedule all required I/O operations. Do not wait for completion.
267 */
ec9bed9d
VC
268static long madvise_willneed(struct vm_area_struct *vma,
269 struct vm_area_struct **prev,
1da177e4
LT
270 unsigned long start, unsigned long end)
271{
0726b01e 272 struct mm_struct *mm = vma->vm_mm;
1da177e4 273 struct file *file = vma->vm_file;
692fe624 274 loff_t offset;
1da177e4 275
6ea8d958 276 *prev = vma;
1998cc04 277#ifdef CONFIG_SWAP
97b713ba 278 if (!file) {
7b86ac33
CH
279 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
280 lru_add_drain(); /* Push any new pages onto the LRU now */
1998cc04
SL
281 return 0;
282 }
1998cc04 283
97b713ba 284 if (shmem_mapping(file->f_mapping)) {
97b713ba
CH
285 force_shm_swapin_readahead(vma, start, end,
286 file->f_mapping);
287 return 0;
288 }
289#else
1bef4003
S
290 if (!file)
291 return -EBADF;
97b713ba 292#endif
1bef4003 293
e748dcd0 294 if (IS_DAX(file_inode(file))) {
fe77ba6f
CO
295 /* no bad return value, but ignore advice */
296 return 0;
297 }
298
692fe624
JK
299 /*
300 * Filesystem's fadvise may need to take various locks. We need to
301 * explicitly grab a reference because the vma (and hence the
302 * vma's reference to the file) can go away as soon as we drop
c1e8d7c6 303 * mmap_lock.
692fe624 304 */
c1e8d7c6 305 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
692fe624 306 get_file(file);
692fe624
JK
307 offset = (loff_t)(start - vma->vm_start)
308 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
0726b01e 309 mmap_read_unlock(mm);
692fe624
JK
310 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
311 fput(file);
0726b01e 312 mmap_read_lock(mm);
1da177e4
LT
313 return 0;
314}
315
d616d512
MK
316static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
317 unsigned long addr, unsigned long end,
318 struct mm_walk *walk)
9c276cc6 319{
d616d512
MK
320 struct madvise_walk_private *private = walk->private;
321 struct mmu_gather *tlb = private->tlb;
322 bool pageout = private->pageout;
9c276cc6
MK
323 struct mm_struct *mm = tlb->mm;
324 struct vm_area_struct *vma = walk->vma;
325 pte_t *orig_pte, *pte, ptent;
326 spinlock_t *ptl;
d616d512
MK
327 struct page *page = NULL;
328 LIST_HEAD(page_list);
329
330 if (fatal_signal_pending(current))
331 return -EINTR;
9c276cc6
MK
332
333#ifdef CONFIG_TRANSPARENT_HUGEPAGE
334 if (pmd_trans_huge(*pmd)) {
335 pmd_t orig_pmd;
336 unsigned long next = pmd_addr_end(addr, end);
337
338 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
339 ptl = pmd_trans_huge_lock(pmd, vma);
340 if (!ptl)
341 return 0;
342
343 orig_pmd = *pmd;
344 if (is_huge_zero_pmd(orig_pmd))
345 goto huge_unlock;
346
347 if (unlikely(!pmd_present(orig_pmd))) {
348 VM_BUG_ON(thp_migration_supported() &&
349 !is_pmd_migration_entry(orig_pmd));
350 goto huge_unlock;
351 }
352
353 page = pmd_page(orig_pmd);
12e967fd
MH
354
355 /* Do not interfere with other mappings of this page */
356 if (page_mapcount(page) != 1)
357 goto huge_unlock;
358
9c276cc6
MK
359 if (next - addr != HPAGE_PMD_SIZE) {
360 int err;
361
9c276cc6
MK
362 get_page(page);
363 spin_unlock(ptl);
364 lock_page(page);
365 err = split_huge_page(page);
366 unlock_page(page);
367 put_page(page);
368 if (!err)
369 goto regular_page;
370 return 0;
371 }
372
373 if (pmd_young(orig_pmd)) {
374 pmdp_invalidate(vma, addr, pmd);
375 orig_pmd = pmd_mkold(orig_pmd);
376
377 set_pmd_at(mm, addr, pmd, orig_pmd);
378 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
379 }
380
d616d512 381 ClearPageReferenced(page);
9c276cc6 382 test_and_clear_page_young(page);
d616d512 383 if (pageout) {
82072962 384 if (!isolate_lru_page(page)) {
385 if (PageUnevictable(page))
386 putback_lru_page(page);
387 else
388 list_add(&page->lru, &page_list);
389 }
d616d512
MK
390 } else
391 deactivate_page(page);
9c276cc6
MK
392huge_unlock:
393 spin_unlock(ptl);
d616d512
MK
394 if (pageout)
395 reclaim_pages(&page_list);
9c276cc6
MK
396 return 0;
397 }
398
ce268425 399regular_page:
9c276cc6
MK
400 if (pmd_trans_unstable(pmd))
401 return 0;
9c276cc6
MK
402#endif
403 tlb_change_page_size(tlb, PAGE_SIZE);
404 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
405 flush_tlb_batched_pending(mm);
406 arch_enter_lazy_mmu_mode();
407 for (; addr < end; pte++, addr += PAGE_SIZE) {
408 ptent = *pte;
409
410 if (pte_none(ptent))
411 continue;
412
413 if (!pte_present(ptent))
414 continue;
415
416 page = vm_normal_page(vma, addr, ptent);
417 if (!page)
418 continue;
419
420 /*
421 * Creating a THP page is expensive so split it only if we
422 * are sure it's worth. Split it if we are only owner.
423 */
424 if (PageTransCompound(page)) {
425 if (page_mapcount(page) != 1)
426 break;
427 get_page(page);
428 if (!trylock_page(page)) {
429 put_page(page);
430 break;
431 }
432 pte_unmap_unlock(orig_pte, ptl);
433 if (split_huge_page(page)) {
434 unlock_page(page);
435 put_page(page);
f3b9e8cc 436 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
9c276cc6
MK
437 break;
438 }
439 unlock_page(page);
440 put_page(page);
f3b9e8cc 441 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
9c276cc6
MK
442 pte--;
443 addr -= PAGE_SIZE;
444 continue;
445 }
446
12e967fd
MH
447 /* Do not interfere with other mappings of this page */
448 if (page_mapcount(page) != 1)
449 continue;
450
9c276cc6
MK
451 VM_BUG_ON_PAGE(PageTransCompound(page), page);
452
453 if (pte_young(ptent)) {
454 ptent = ptep_get_and_clear_full(mm, addr, pte,
455 tlb->fullmm);
456 ptent = pte_mkold(ptent);
457 set_pte_at(mm, addr, pte, ptent);
458 tlb_remove_tlb_entry(tlb, pte, addr);
459 }
460
461 /*
462 * We are deactivating a page for accelerating reclaiming.
463 * VM couldn't reclaim the page unless we clear PG_young.
464 * As a side effect, it makes confuse idle-page tracking
465 * because they will miss recent referenced history.
466 */
d616d512 467 ClearPageReferenced(page);
9c276cc6 468 test_and_clear_page_young(page);
d616d512 469 if (pageout) {
82072962 470 if (!isolate_lru_page(page)) {
471 if (PageUnevictable(page))
472 putback_lru_page(page);
473 else
474 list_add(&page->lru, &page_list);
475 }
d616d512
MK
476 } else
477 deactivate_page(page);
9c276cc6
MK
478 }
479
480 arch_leave_lazy_mmu_mode();
481 pte_unmap_unlock(orig_pte, ptl);
d616d512
MK
482 if (pageout)
483 reclaim_pages(&page_list);
9c276cc6
MK
484 cond_resched();
485
486 return 0;
487}
488
489static const struct mm_walk_ops cold_walk_ops = {
d616d512 490 .pmd_entry = madvise_cold_or_pageout_pte_range,
9c276cc6
MK
491};
492
493static void madvise_cold_page_range(struct mmu_gather *tlb,
494 struct vm_area_struct *vma,
495 unsigned long addr, unsigned long end)
496{
d616d512
MK
497 struct madvise_walk_private walk_private = {
498 .pageout = false,
499 .tlb = tlb,
500 };
501
9c276cc6 502 tlb_start_vma(tlb, vma);
d616d512 503 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
9c276cc6
MK
504 tlb_end_vma(tlb, vma);
505}
506
a213e5cf
HD
507static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
508{
9457056a 509 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
a213e5cf
HD
510}
511
9c276cc6
MK
512static long madvise_cold(struct vm_area_struct *vma,
513 struct vm_area_struct **prev,
514 unsigned long start_addr, unsigned long end_addr)
515{
516 struct mm_struct *mm = vma->vm_mm;
517 struct mmu_gather tlb;
518
519 *prev = vma;
520 if (!can_madv_lru_vma(vma))
521 return -EINVAL;
522
523 lru_add_drain();
a72afd87 524 tlb_gather_mmu(&tlb, mm);
9c276cc6 525 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
ae8eba8b 526 tlb_finish_mmu(&tlb);
9c276cc6
MK
527
528 return 0;
529}
530
1a4e58cc
MK
531static void madvise_pageout_page_range(struct mmu_gather *tlb,
532 struct vm_area_struct *vma,
533 unsigned long addr, unsigned long end)
534{
d616d512
MK
535 struct madvise_walk_private walk_private = {
536 .pageout = true,
537 .tlb = tlb,
538 };
539
1a4e58cc 540 tlb_start_vma(tlb, vma);
d616d512 541 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
1a4e58cc
MK
542 tlb_end_vma(tlb, vma);
543}
544
545static inline bool can_do_pageout(struct vm_area_struct *vma)
546{
547 if (vma_is_anonymous(vma))
548 return true;
549 if (!vma->vm_file)
550 return false;
551 /*
552 * paging out pagecache only for non-anonymous mappings that correspond
553 * to the files the calling process could (if tried) open for writing;
554 * otherwise we'd be including shared non-exclusive mappings, which
555 * opens a side channel.
556 */
21cb47be
CB
557 return inode_owner_or_capable(&init_user_ns,
558 file_inode(vma->vm_file)) ||
02f92b38 559 file_permission(vma->vm_file, MAY_WRITE) == 0;
1a4e58cc
MK
560}
561
562static long madvise_pageout(struct vm_area_struct *vma,
563 struct vm_area_struct **prev,
564 unsigned long start_addr, unsigned long end_addr)
565{
566 struct mm_struct *mm = vma->vm_mm;
567 struct mmu_gather tlb;
568
569 *prev = vma;
570 if (!can_madv_lru_vma(vma))
571 return -EINVAL;
572
573 if (!can_do_pageout(vma))
574 return 0;
575
576 lru_add_drain();
a72afd87 577 tlb_gather_mmu(&tlb, mm);
1a4e58cc 578 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
ae8eba8b 579 tlb_finish_mmu(&tlb);
1a4e58cc
MK
580
581 return 0;
582}
583
854e9ed0
MK
584static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
585 unsigned long end, struct mm_walk *walk)
586
587{
588 struct mmu_gather *tlb = walk->private;
589 struct mm_struct *mm = tlb->mm;
590 struct vm_area_struct *vma = walk->vma;
591 spinlock_t *ptl;
592 pte_t *orig_pte, *pte, ptent;
593 struct page *page;
64b42bc1 594 int nr_swap = 0;
b8d3c4c3
MK
595 unsigned long next;
596
597 next = pmd_addr_end(addr, end);
598 if (pmd_trans_huge(*pmd))
599 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
600 goto next;
854e9ed0 601
854e9ed0
MK
602 if (pmd_trans_unstable(pmd))
603 return 0;
604
ed6a7935 605 tlb_change_page_size(tlb, PAGE_SIZE);
854e9ed0 606 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
3ea27719 607 flush_tlb_batched_pending(mm);
854e9ed0
MK
608 arch_enter_lazy_mmu_mode();
609 for (; addr != end; pte++, addr += PAGE_SIZE) {
610 ptent = *pte;
611
64b42bc1 612 if (pte_none(ptent))
854e9ed0 613 continue;
64b42bc1
MK
614 /*
615 * If the pte has swp_entry, just clear page table to
616 * prevent swap-in which is more expensive rather than
617 * (page allocation + zeroing).
618 */
619 if (!pte_present(ptent)) {
620 swp_entry_t entry;
621
622 entry = pte_to_swp_entry(ptent);
623 if (non_swap_entry(entry))
624 continue;
625 nr_swap--;
626 free_swap_and_cache(entry);
627 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
628 continue;
629 }
854e9ed0 630
25b2995a 631 page = vm_normal_page(vma, addr, ptent);
854e9ed0
MK
632 if (!page)
633 continue;
634
635 /*
636 * If pmd isn't transhuge but the page is THP and
637 * is owned by only this process, split it and
638 * deactivate all pages.
639 */
640 if (PageTransCompound(page)) {
641 if (page_mapcount(page) != 1)
642 goto out;
643 get_page(page);
644 if (!trylock_page(page)) {
645 put_page(page);
646 goto out;
647 }
648 pte_unmap_unlock(orig_pte, ptl);
649 if (split_huge_page(page)) {
650 unlock_page(page);
651 put_page(page);
f3b9e8cc 652 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
854e9ed0
MK
653 goto out;
654 }
854e9ed0 655 unlock_page(page);
263630e8 656 put_page(page);
f3b9e8cc 657 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
854e9ed0
MK
658 pte--;
659 addr -= PAGE_SIZE;
660 continue;
661 }
662
663 VM_BUG_ON_PAGE(PageTransCompound(page), page);
664
665 if (PageSwapCache(page) || PageDirty(page)) {
666 if (!trylock_page(page))
667 continue;
668 /*
669 * If page is shared with others, we couldn't clear
670 * PG_dirty of the page.
671 */
672 if (page_mapcount(page) != 1) {
673 unlock_page(page);
674 continue;
675 }
676
677 if (PageSwapCache(page) && !try_to_free_swap(page)) {
678 unlock_page(page);
679 continue;
680 }
681
682 ClearPageDirty(page);
683 unlock_page(page);
684 }
685
686 if (pte_young(ptent) || pte_dirty(ptent)) {
687 /*
688 * Some of architecture(ex, PPC) don't update TLB
689 * with set_pte_at and tlb_remove_tlb_entry so for
690 * the portability, remap the pte with old|clean
691 * after pte clearing.
692 */
693 ptent = ptep_get_and_clear_full(mm, addr, pte,
694 tlb->fullmm);
695
696 ptent = pte_mkold(ptent);
697 ptent = pte_mkclean(ptent);
698 set_pte_at(mm, addr, pte, ptent);
699 tlb_remove_tlb_entry(tlb, pte, addr);
700 }
802a3a92 701 mark_page_lazyfree(page);
854e9ed0
MK
702 }
703out:
64b42bc1
MK
704 if (nr_swap) {
705 if (current->mm == mm)
706 sync_mm_rss(mm);
707
708 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
709 }
854e9ed0
MK
710 arch_leave_lazy_mmu_mode();
711 pte_unmap_unlock(orig_pte, ptl);
712 cond_resched();
b8d3c4c3 713next:
854e9ed0
MK
714 return 0;
715}
716
7b86ac33
CH
717static const struct mm_walk_ops madvise_free_walk_ops = {
718 .pmd_entry = madvise_free_pte_range,
719};
854e9ed0
MK
720
721static int madvise_free_single_vma(struct vm_area_struct *vma,
722 unsigned long start_addr, unsigned long end_addr)
723{
854e9ed0 724 struct mm_struct *mm = vma->vm_mm;
ac46d4f3 725 struct mmu_notifier_range range;
854e9ed0
MK
726 struct mmu_gather tlb;
727
854e9ed0
MK
728 /* MADV_FREE works for only anon vma at the moment */
729 if (!vma_is_anonymous(vma))
730 return -EINVAL;
731
ac46d4f3
JG
732 range.start = max(vma->vm_start, start_addr);
733 if (range.start >= vma->vm_end)
854e9ed0 734 return -EINVAL;
ac46d4f3
JG
735 range.end = min(vma->vm_end, end_addr);
736 if (range.end <= vma->vm_start)
854e9ed0 737 return -EINVAL;
7269f999 738 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
6f4f13e8 739 range.start, range.end);
854e9ed0
MK
740
741 lru_add_drain();
a72afd87 742 tlb_gather_mmu(&tlb, mm);
854e9ed0
MK
743 update_hiwater_rss(mm);
744
ac46d4f3 745 mmu_notifier_invalidate_range_start(&range);
7b86ac33
CH
746 tlb_start_vma(&tlb, vma);
747 walk_page_range(vma->vm_mm, range.start, range.end,
748 &madvise_free_walk_ops, &tlb);
749 tlb_end_vma(&tlb, vma);
ac46d4f3 750 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 751 tlb_finish_mmu(&tlb);
854e9ed0
MK
752
753 return 0;
754}
755
1da177e4
LT
756/*
757 * Application no longer needs these pages. If the pages are dirty,
758 * it's OK to just throw them away. The app will be more careful about
759 * data it wants to keep. Be sure to free swap resources too. The
7e6cbea3 760 * zap_page_range call sets things up for shrink_active_list to actually free
1da177e4
LT
761 * these pages later if no one else has touched them in the meantime,
762 * although we could add these pages to a global reuse list for
7e6cbea3 763 * shrink_active_list to pick up before reclaiming other pages.
1da177e4
LT
764 *
765 * NB: This interface discards data rather than pushes it out to swap,
766 * as some implementations do. This has performance implications for
767 * applications like large transactional databases which want to discard
768 * pages in anonymous maps after committing to backing store the data
769 * that was kept in them. There is no reason to write this data out to
770 * the swap area if the application is discarding it.
771 *
772 * An interface that causes the system to free clean pages and flush
773 * dirty pages is already available as msync(MS_INVALIDATE).
774 */
230ca982
MR
775static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
776 unsigned long start, unsigned long end)
777{
778 zap_page_range(vma, start, end - start);
779 return 0;
780}
781
90e7e7f5
MK
782static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
783 unsigned long start,
784 unsigned long *end,
785 int behavior)
786{
9457056a
JW
787 if (!is_vm_hugetlb_page(vma)) {
788 unsigned int forbidden = VM_PFNMAP;
789
790 if (behavior != MADV_DONTNEED_LOCKED)
791 forbidden |= VM_LOCKED;
792
793 return !(vma->vm_flags & forbidden);
794 }
90e7e7f5 795
9457056a 796 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED)
90e7e7f5
MK
797 return false;
798 if (start & ~huge_page_mask(hstate_vma(vma)))
799 return false;
800
801 *end = ALIGN(*end, huge_page_size(hstate_vma(vma)));
802 return true;
803}
804
230ca982
MR
805static long madvise_dontneed_free(struct vm_area_struct *vma,
806 struct vm_area_struct **prev,
807 unsigned long start, unsigned long end,
808 int behavior)
1da177e4 809{
0726b01e
MK
810 struct mm_struct *mm = vma->vm_mm;
811
05b74384 812 *prev = vma;
90e7e7f5 813 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
1da177e4
LT
814 return -EINVAL;
815
70ccb92f 816 if (!userfaultfd_remove(vma, start, end)) {
c1e8d7c6 817 *prev = NULL; /* mmap_lock has been dropped, prev is stale */
70ccb92f 818
0726b01e
MK
819 mmap_read_lock(mm);
820 vma = find_vma(mm, start);
70ccb92f
AA
821 if (!vma)
822 return -ENOMEM;
823 if (start < vma->vm_start) {
824 /*
825 * This "vma" under revalidation is the one
826 * with the lowest vma->vm_start where start
827 * is also < vma->vm_end. If start <
828 * vma->vm_start it means an hole materialized
829 * in the user address space within the
230ca982
MR
830 * virtual range passed to MADV_DONTNEED
831 * or MADV_FREE.
70ccb92f
AA
832 */
833 return -ENOMEM;
834 }
90e7e7f5
MK
835 /*
836 * Potential end adjustment for hugetlb vma is OK as
837 * the check below keeps end within vma.
838 */
839 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
840 behavior))
70ccb92f
AA
841 return -EINVAL;
842 if (end > vma->vm_end) {
843 /*
844 * Don't fail if end > vma->vm_end. If the old
f0953a1b 845 * vma was split while the mmap_lock was
70ccb92f 846 * released the effect of the concurrent
230ca982 847 * operation may not cause madvise() to
70ccb92f
AA
848 * have an undefined result. There may be an
849 * adjacent next vma that we'll walk
850 * next. userfaultfd_remove() will generate an
851 * UFFD_EVENT_REMOVE repetition on the
852 * end-vma->vm_end range, but the manager can
853 * handle a repetition fine.
854 */
855 end = vma->vm_end;
856 }
857 VM_WARN_ON(start >= end);
858 }
230ca982 859
9457056a 860 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
230ca982
MR
861 return madvise_dontneed_single_vma(vma, start, end);
862 else if (behavior == MADV_FREE)
863 return madvise_free_single_vma(vma, start, end);
864 else
865 return -EINVAL;
1da177e4
LT
866}
867
4ca9b385
DH
868static long madvise_populate(struct vm_area_struct *vma,
869 struct vm_area_struct **prev,
870 unsigned long start, unsigned long end,
871 int behavior)
872{
873 const bool write = behavior == MADV_POPULATE_WRITE;
874 struct mm_struct *mm = vma->vm_mm;
875 unsigned long tmp_end;
876 int locked = 1;
877 long pages;
878
879 *prev = vma;
880
881 while (start < end) {
882 /*
883 * We might have temporarily dropped the lock. For example,
884 * our VMA might have been split.
885 */
886 if (!vma || start >= vma->vm_end) {
531037a0
ML
887 vma = vma_lookup(mm, start);
888 if (!vma)
4ca9b385
DH
889 return -ENOMEM;
890 }
891
892 tmp_end = min_t(unsigned long, end, vma->vm_end);
893 /* Populate (prefault) page tables readable/writable. */
894 pages = faultin_vma_page_range(vma, start, tmp_end, write,
895 &locked);
896 if (!locked) {
897 mmap_read_lock(mm);
898 locked = 1;
899 *prev = NULL;
900 vma = NULL;
901 }
902 if (pages < 0) {
903 switch (pages) {
904 case -EINTR:
905 return -EINTR;
eb2faa51 906 case -EINVAL: /* Incompatible mappings / permissions. */
4ca9b385
DH
907 return -EINVAL;
908 case -EHWPOISON:
909 return -EHWPOISON;
eb2faa51
DH
910 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
911 return -EFAULT;
4ca9b385
DH
912 default:
913 pr_warn_once("%s: unhandled return value: %ld\n",
914 __func__, pages);
915 fallthrough;
916 case -ENOMEM:
917 return -ENOMEM;
918 }
919 }
920 start += pages * PAGE_SIZE;
921 }
922 return 0;
923}
924
f6b3ec23
BP
925/*
926 * Application wants to free up the pages and associated backing store.
927 * This is effectively punching a hole into the middle of a file.
f6b3ec23
BP
928 */
929static long madvise_remove(struct vm_area_struct *vma,
00e9fa2d 930 struct vm_area_struct **prev,
f6b3ec23
BP
931 unsigned long start, unsigned long end)
932{
3f31d075 933 loff_t offset;
90ed52eb 934 int error;
9ab4233d 935 struct file *f;
0726b01e 936 struct mm_struct *mm = vma->vm_mm;
f6b3ec23 937
c1e8d7c6 938 *prev = NULL; /* tell sys_madvise we drop mmap_lock */
00e9fa2d 939
72079ba0 940 if (vma->vm_flags & VM_LOCKED)
f6b3ec23
BP
941 return -EINVAL;
942
9ab4233d
AL
943 f = vma->vm_file;
944
945 if (!f || !f->f_mapping || !f->f_mapping->host) {
f6b3ec23
BP
946 return -EINVAL;
947 }
948
69cf0fac
HD
949 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
950 return -EACCES;
951
f6b3ec23
BP
952 offset = (loff_t)(start - vma->vm_start)
953 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
90ed52eb 954
9ab4233d 955 /*
9608703e 956 * Filesystem's fallocate may need to take i_rwsem. We need to
9ab4233d
AL
957 * explicitly grab a reference because the vma (and hence the
958 * vma's reference to the file) can go away as soon as we drop
c1e8d7c6 959 * mmap_lock.
9ab4233d
AL
960 */
961 get_file(f);
70ccb92f 962 if (userfaultfd_remove(vma, start, end)) {
c1e8d7c6 963 /* mmap_lock was not released by userfaultfd_remove() */
0726b01e 964 mmap_read_unlock(mm);
70ccb92f 965 }
72c72bdf 966 error = vfs_fallocate(f,
3f31d075
HD
967 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
968 offset, end - start);
9ab4233d 969 fput(f);
0726b01e 970 mmap_read_lock(mm);
90ed52eb 971 return error;
f6b3ec23
BP
972}
973
ac1e9acc
CC
974/*
975 * Apply an madvise behavior to a region of a vma. madvise_update_vma
976 * will handle splitting a vm area into separate areas, each area with its own
977 * behavior.
978 */
979static int madvise_vma_behavior(struct vm_area_struct *vma,
980 struct vm_area_struct **prev,
981 unsigned long start, unsigned long end,
982 unsigned long behavior)
983{
984 int error;
942341dc 985 struct anon_vma_name *anon_name;
ac1e9acc
CC
986 unsigned long new_flags = vma->vm_flags;
987
988 switch (behavior) {
989 case MADV_REMOVE:
990 return madvise_remove(vma, prev, start, end);
991 case MADV_WILLNEED:
992 return madvise_willneed(vma, prev, start, end);
993 case MADV_COLD:
994 return madvise_cold(vma, prev, start, end);
995 case MADV_PAGEOUT:
996 return madvise_pageout(vma, prev, start, end);
997 case MADV_FREE:
998 case MADV_DONTNEED:
9457056a 999 case MADV_DONTNEED_LOCKED:
ac1e9acc
CC
1000 return madvise_dontneed_free(vma, prev, start, end, behavior);
1001 case MADV_POPULATE_READ:
1002 case MADV_POPULATE_WRITE:
1003 return madvise_populate(vma, prev, start, end, behavior);
1004 case MADV_NORMAL:
1005 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
1006 break;
1007 case MADV_SEQUENTIAL:
1008 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
1009 break;
1010 case MADV_RANDOM:
1011 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1012 break;
1013 case MADV_DONTFORK:
1014 new_flags |= VM_DONTCOPY;
1015 break;
1016 case MADV_DOFORK:
1017 if (vma->vm_flags & VM_IO)
1018 return -EINVAL;
1019 new_flags &= ~VM_DONTCOPY;
1020 break;
1021 case MADV_WIPEONFORK:
1022 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1023 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1024 return -EINVAL;
1025 new_flags |= VM_WIPEONFORK;
1026 break;
1027 case MADV_KEEPONFORK:
1028 new_flags &= ~VM_WIPEONFORK;
1029 break;
1030 case MADV_DONTDUMP:
1031 new_flags |= VM_DONTDUMP;
1032 break;
1033 case MADV_DODUMP:
1034 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1035 return -EINVAL;
1036 new_flags &= ~VM_DONTDUMP;
1037 break;
1038 case MADV_MERGEABLE:
1039 case MADV_UNMERGEABLE:
1040 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1041 if (error)
1042 goto out;
1043 break;
1044 case MADV_HUGEPAGE:
1045 case MADV_NOHUGEPAGE:
1046 error = hugepage_madvise(vma, &new_flags, behavior);
1047 if (error)
1048 goto out;
1049 break;
1050 }
1051
942341dc
SB
1052 anon_name = anon_vma_name(vma);
1053 anon_vma_name_get(anon_name);
9a10064f 1054 error = madvise_update_vma(vma, prev, start, end, new_flags,
942341dc
SB
1055 anon_name);
1056 anon_vma_name_put(anon_name);
ac1e9acc
CC
1057
1058out:
1059 /*
1060 * madvise() returns EAGAIN if kernel resources, such as
1061 * slab, are temporarily unavailable.
1062 */
1063 if (error == -ENOMEM)
1064 error = -EAGAIN;
1065 return error;
1066}
1067
9893e49d
AK
1068#ifdef CONFIG_MEMORY_FAILURE
1069/*
1070 * Error injection support for memory error handling.
1071 */
97167a76
AK
1072static int madvise_inject_error(int behavior,
1073 unsigned long start, unsigned long end)
9893e49d 1074{
d3cd257c 1075 unsigned long size;
97167a76 1076
9893e49d
AK
1077 if (!capable(CAP_SYS_ADMIN))
1078 return -EPERM;
97167a76 1079
19bfbe22 1080
d3cd257c 1081 for (; start < end; start += size) {
23e7b5c2 1082 unsigned long pfn;
dc7560b4 1083 struct page *page;
325c4ef5
AM
1084 int ret;
1085
97167a76 1086 ret = get_user_pages_fast(start, 1, 0, &page);
9893e49d
AK
1087 if (ret != 1)
1088 return ret;
23e7b5c2 1089 pfn = page_to_pfn(page);
325c4ef5 1090
19bfbe22
AM
1091 /*
1092 * When soft offlining hugepages, after migrating the page
1093 * we dissolve it, therefore in the second loop "page" will
d3cd257c 1094 * no longer be a compound page.
19bfbe22 1095 */
d3cd257c 1096 size = page_size(compound_head(page));
19bfbe22 1097
97167a76
AK
1098 if (behavior == MADV_SOFT_OFFLINE) {
1099 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
dc7560b4 1100 pfn, start);
feec24a6 1101 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
dc7560b4
OS
1102 } else {
1103 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1104 pfn, start);
1e8aaedb 1105 ret = memory_failure(pfn, MF_COUNT_INCREASED);
d1fe111f 1106 if (ret == -EOPNOTSUPP)
1107 ret = 0;
afcf938e 1108 }
23e7b5c2 1109
23a003bf
NH
1110 if (ret)
1111 return ret;
9893e49d 1112 }
c461ad6a 1113
325c4ef5 1114 return 0;
9893e49d
AK
1115}
1116#endif
1117
1ecef9ed 1118static bool
75927af8
NP
1119madvise_behavior_valid(int behavior)
1120{
1121 switch (behavior) {
1122 case MADV_DOFORK:
1123 case MADV_DONTFORK:
1124 case MADV_NORMAL:
1125 case MADV_SEQUENTIAL:
1126 case MADV_RANDOM:
1127 case MADV_REMOVE:
1128 case MADV_WILLNEED:
1129 case MADV_DONTNEED:
9457056a 1130 case MADV_DONTNEED_LOCKED:
854e9ed0 1131 case MADV_FREE:
9c276cc6 1132 case MADV_COLD:
1a4e58cc 1133 case MADV_PAGEOUT:
4ca9b385
DH
1134 case MADV_POPULATE_READ:
1135 case MADV_POPULATE_WRITE:
f8af4da3
HD
1136#ifdef CONFIG_KSM
1137 case MADV_MERGEABLE:
1138 case MADV_UNMERGEABLE:
0af4e98b
AA
1139#endif
1140#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1141 case MADV_HUGEPAGE:
a664b2d8 1142 case MADV_NOHUGEPAGE:
f8af4da3 1143#endif
accb61fe
JB
1144 case MADV_DONTDUMP:
1145 case MADV_DODUMP:
d2cd9ede
RR
1146 case MADV_WIPEONFORK:
1147 case MADV_KEEPONFORK:
5e451be7
AK
1148#ifdef CONFIG_MEMORY_FAILURE
1149 case MADV_SOFT_OFFLINE:
1150 case MADV_HWPOISON:
1151#endif
1ecef9ed 1152 return true;
75927af8
NP
1153
1154 default:
1ecef9ed 1155 return false;
75927af8
NP
1156 }
1157}
3866ea90 1158
ecb8ac8b
MK
1159static bool
1160process_madvise_behavior_valid(int behavior)
1161{
1162 switch (behavior) {
1163 case MADV_COLD:
1164 case MADV_PAGEOUT:
d5fffc5a 1165 case MADV_WILLNEED:
ecb8ac8b
MK
1166 return true;
1167 default:
1168 return false;
1169 }
1170}
1171
ac1e9acc
CC
1172/*
1173 * Walk the vmas in range [start,end), and call the visit function on each one.
1174 * The visit function will get start and end parameters that cover the overlap
1175 * between the current vma and the original range. Any unmapped regions in the
1176 * original range will result in this function returning -ENOMEM while still
1177 * calling the visit function on all of the existing vmas in the range.
1178 * Must be called with the mmap_lock held for reading or writing.
1179 */
1180static
1181int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
1182 unsigned long end, unsigned long arg,
1183 int (*visit)(struct vm_area_struct *vma,
1184 struct vm_area_struct **prev, unsigned long start,
1185 unsigned long end, unsigned long arg))
1186{
1187 struct vm_area_struct *vma;
1188 struct vm_area_struct *prev;
1189 unsigned long tmp;
1190 int unmapped_error = 0;
1191
1192 /*
1193 * If the interval [start,end) covers some unmapped address
1194 * ranges, just ignore them, but return -ENOMEM at the end.
1195 * - different from the way of handling in mlock etc.
1196 */
1197 vma = find_vma_prev(mm, start, &prev);
1198 if (vma && start > vma->vm_start)
1199 prev = vma;
1200
1201 for (;;) {
1202 int error;
1203
1204 /* Still start < end. */
1205 if (!vma)
1206 return -ENOMEM;
1207
1208 /* Here start < (end|vma->vm_end). */
1209 if (start < vma->vm_start) {
1210 unmapped_error = -ENOMEM;
1211 start = vma->vm_start;
1212 if (start >= end)
1213 break;
1214 }
1215
1216 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1217 tmp = vma->vm_end;
1218 if (end < tmp)
1219 tmp = end;
1220
1221 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1222 error = visit(vma, &prev, start, tmp, arg);
1223 if (error)
1224 return error;
1225 start = tmp;
1226 if (prev && start < prev->vm_end)
1227 start = prev->vm_end;
1228 if (start >= end)
1229 break;
1230 if (prev)
1231 vma = prev->vm_next;
1232 else /* madvise_remove dropped mmap_lock */
1233 vma = find_vma(mm, start);
1234 }
1235
1236 return unmapped_error;
1237}
1238
9a10064f
CC
1239#ifdef CONFIG_ANON_VMA_NAME
1240static int madvise_vma_anon_name(struct vm_area_struct *vma,
1241 struct vm_area_struct **prev,
1242 unsigned long start, unsigned long end,
5c26f6ac 1243 unsigned long anon_name)
9a10064f
CC
1244{
1245 int error;
1246
1247 /* Only anonymous mappings can be named */
1248 if (vma->vm_file)
1249 return -EBADF;
1250
1251 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
5c26f6ac 1252 (struct anon_vma_name *)anon_name);
9a10064f
CC
1253
1254 /*
1255 * madvise() returns EAGAIN if kernel resources, such as
1256 * slab, are temporarily unavailable.
1257 */
1258 if (error == -ENOMEM)
1259 error = -EAGAIN;
1260 return error;
1261}
1262
1263int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
5c26f6ac 1264 unsigned long len_in, struct anon_vma_name *anon_name)
9a10064f
CC
1265{
1266 unsigned long end;
1267 unsigned long len;
1268
1269 if (start & ~PAGE_MASK)
1270 return -EINVAL;
1271 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
1272
1273 /* Check to see whether len was rounded up from small -ve to zero */
1274 if (len_in && !len)
1275 return -EINVAL;
1276
1277 end = start + len;
1278 if (end < start)
1279 return -EINVAL;
1280
1281 if (end == start)
1282 return 0;
1283
5c26f6ac 1284 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
9a10064f
CC
1285 madvise_vma_anon_name);
1286}
1287#endif /* CONFIG_ANON_VMA_NAME */
1da177e4
LT
1288/*
1289 * The madvise(2) system call.
1290 *
1291 * Applications can use madvise() to advise the kernel how it should
1292 * handle paging I/O in this VM area. The idea is to help the kernel
1293 * use appropriate read-ahead and caching techniques. The information
1294 * provided is advisory only, and can be safely disregarded by the
1295 * kernel without affecting the correct operation of the application.
1296 *
1297 * behavior values:
1298 * MADV_NORMAL - the default behavior is to read clusters. This
1299 * results in some read-ahead and read-behind.
1300 * MADV_RANDOM - the system should read the minimum amount of data
1301 * on any access, since it is unlikely that the appli-
1302 * cation will need more than what it asks for.
1303 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
1304 * once, so they can be aggressively read ahead, and
1305 * can be freed soon after they are accessed.
1306 * MADV_WILLNEED - the application is notifying the system to read
1307 * some pages ahead.
1308 * MADV_DONTNEED - the application is finished with the given range,
1309 * so the kernel can free resources associated with it.
d7206a70
NH
1310 * MADV_FREE - the application marks pages in the given range as lazy free,
1311 * where actual purges are postponed until memory pressure happens.
f6b3ec23
BP
1312 * MADV_REMOVE - the application wants to free up the given range of
1313 * pages and associated backing store.
3866ea90
HD
1314 * MADV_DONTFORK - omit this area from child's address space when forking:
1315 * typically, to avoid COWing pages pinned by get_user_pages().
1316 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
c02c3009
YS
1317 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
1318 * range after a fork.
1319 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
d7206a70
NH
1320 * MADV_HWPOISON - trigger memory error handler as if the given memory range
1321 * were corrupted by unrecoverable hardware memory failure.
1322 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
f8af4da3
HD
1323 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1324 * this area with pages of identical content from other such areas.
1325 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
d7206a70
NH
1326 * MADV_HUGEPAGE - the application wants to back the given range by transparent
1327 * huge pages in the future. Existing pages might be coalesced and
1328 * new pages might be allocated as THP.
1329 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1330 * transparent huge pages so the existing pages will not be
1331 * coalesced into THP and new pages will not be allocated as THP.
1332 * MADV_DONTDUMP - the application wants to prevent pages in the given range
1333 * from being included in its core dump.
1334 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
ecb8ac8b
MK
1335 * MADV_COLD - the application is not expected to use this memory soon,
1336 * deactivate pages in this range so that they can be reclaimed
f0953a1b 1337 * easily if memory pressure happens.
ecb8ac8b
MK
1338 * MADV_PAGEOUT - the application is not expected to use this memory soon,
1339 * page out the pages in this range immediately.
4ca9b385
DH
1340 * MADV_POPULATE_READ - populate (prefault) page tables readable by
1341 * triggering read faults if required
1342 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1343 * triggering write faults if required
1da177e4
LT
1344 *
1345 * return values:
1346 * zero - success
1347 * -EINVAL - start + len < 0, start is not page-aligned,
1348 * "behavior" is not a valid value, or application
c02c3009
YS
1349 * is attempting to release locked or shared pages,
1350 * or the specified address range includes file, Huge TLB,
1351 * MAP_SHARED or VMPFNMAP range.
1da177e4
LT
1352 * -ENOMEM - addresses in the specified range are not currently
1353 * mapped, or are outside the AS of the process.
1354 * -EIO - an I/O error occurred while paging in data.
1355 * -EBADF - map exists, but area maps something that isn't a file.
1356 * -EAGAIN - a kernel resource was temporarily unavailable.
1357 */
0726b01e 1358int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1da177e4 1359{
ac1e9acc
CC
1360 unsigned long end;
1361 int error;
f7977793 1362 int write;
1da177e4 1363 size_t len;
1998cc04 1364 struct blk_plug plug;
1da177e4 1365
057d3389
AK
1366 start = untagged_addr(start);
1367
75927af8 1368 if (!madvise_behavior_valid(behavior))
ac1e9acc 1369 return -EINVAL;
75927af8 1370
df6c6500 1371 if (!PAGE_ALIGNED(start))
ac1e9acc 1372 return -EINVAL;
df6c6500 1373 len = PAGE_ALIGN(len_in);
1da177e4
LT
1374
1375 /* Check to see whether len was rounded up from small -ve to zero */
1376 if (len_in && !len)
ac1e9acc 1377 return -EINVAL;
1da177e4
LT
1378
1379 end = start + len;
1380 if (end < start)
ac1e9acc 1381 return -EINVAL;
1da177e4 1382
1da177e4 1383 if (end == start)
ac1e9acc 1384 return 0;
84d96d89 1385
5e451be7
AK
1386#ifdef CONFIG_MEMORY_FAILURE
1387 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1388 return madvise_inject_error(behavior, start, start + len_in);
1389#endif
1390
84d96d89 1391 write = madvise_need_mmap_write(behavior);
dc0ef0df 1392 if (write) {
0726b01e 1393 if (mmap_write_lock_killable(mm))
dc0ef0df
MH
1394 return -EINTR;
1395 } else {
0726b01e 1396 mmap_read_lock(mm);
dc0ef0df 1397 }
1da177e4 1398
1998cc04 1399 blk_start_plug(&plug);
ac1e9acc
CC
1400 error = madvise_walk_vmas(mm, start, end, behavior,
1401 madvise_vma_behavior);
84d96d89 1402 blk_finish_plug(&plug);
f7977793 1403 if (write)
0726b01e 1404 mmap_write_unlock(mm);
0a27a14a 1405 else
0726b01e 1406 mmap_read_unlock(mm);
0a27a14a 1407
1da177e4
LT
1408 return error;
1409}
db08ca25
JA
1410
1411SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1412{
0726b01e 1413 return do_madvise(current->mm, start, len_in, behavior);
db08ca25 1414}
ecb8ac8b
MK
1415
1416SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1417 size_t, vlen, int, behavior, unsigned int, flags)
1418{
1419 ssize_t ret;
1420 struct iovec iovstack[UIO_FASTIOV], iovec;
1421 struct iovec *iov = iovstack;
1422 struct iov_iter iter;
ecb8ac8b
MK
1423 struct task_struct *task;
1424 struct mm_struct *mm;
1425 size_t total_len;
1426 unsigned int f_flags;
1427
1428 if (flags != 0) {
1429 ret = -EINVAL;
1430 goto out;
1431 }
1432
1433 ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1434 if (ret < 0)
1435 goto out;
1436
ee9955d6
CB
1437 task = pidfd_get_task(pidfd, &f_flags);
1438 if (IS_ERR(task)) {
1439 ret = PTR_ERR(task);
ecb8ac8b
MK
1440 goto free_iov;
1441 }
1442
a68a0262 1443 if (!process_madvise_behavior_valid(behavior)) {
ecb8ac8b
MK
1444 ret = -EINVAL;
1445 goto release_task;
1446 }
1447
96cfe2c0
SB
1448 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1449 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
ecb8ac8b
MK
1450 if (IS_ERR_OR_NULL(mm)) {
1451 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1452 goto release_task;
1453 }
1454
96cfe2c0
SB
1455 /*
1456 * Require CAP_SYS_NICE for influencing process performance. Note that
1457 * only non-destructive hints are currently supported.
1458 */
1459 if (!capable(CAP_SYS_NICE)) {
1460 ret = -EPERM;
1461 goto release_mm;
1462 }
1463
ecb8ac8b
MK
1464 total_len = iov_iter_count(&iter);
1465
1466 while (iov_iter_count(&iter)) {
1467 iovec = iov_iter_iovec(&iter);
1468 ret = do_madvise(mm, (unsigned long)iovec.iov_base,
1469 iovec.iov_len, behavior);
e6b0a7b3 1470 if (ret < 0)
ecb8ac8b
MK
1471 break;
1472 iov_iter_advance(&iter, iovec.iov_len);
1473 }
1474
5bd009c7 1475 ret = (total_len - iov_iter_count(&iter)) ? : ret;
ecb8ac8b 1476
96cfe2c0 1477release_mm:
ecb8ac8b 1478 mmput(mm);
ecb8ac8b
MK
1479release_task:
1480 put_task_struct(task);
ecb8ac8b
MK
1481free_iov:
1482 kfree(iov);
1483out:
1484 return ret;
1485}