mm/userfaultfd: don't consider uffd-wp bit of writable migration entries
[linux-2.6-block.git] / mm / mprotect.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * mm/mprotect.c
4 *
5 * (C) Copyright 1994 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
7 *
046c6884 8 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
9 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
10 */
11
a520110e 12#include <linux/pagewalk.h>
1da177e4 13#include <linux/hugetlb.h>
1da177e4
LT
14#include <linux/shm.h>
15#include <linux/mman.h>
16#include <linux/fs.h>
17#include <linux/highmem.h>
18#include <linux/security.h>
19#include <linux/mempolicy.h>
20#include <linux/personality.h>
21#include <linux/syscalls.h>
0697212a
CL
22#include <linux/swap.h>
23#include <linux/swapops.h>
cddb8a5c 24#include <linux/mmu_notifier.h>
64cdd548 25#include <linux/migrate.h>
cdd6c482 26#include <linux/perf_event.h>
e8c24d3a 27#include <linux/pkeys.h>
64a9a34e 28#include <linux/ksm.h>
7c0f6ba6 29#include <linux/uaccess.h>
09a913a7 30#include <linux/mm_inline.h>
ca5999fd 31#include <linux/pgtable.h>
a1a3a2fc 32#include <linux/sched/sysctl.h>
fe2567eb 33#include <linux/userfaultfd_k.h>
467b171a 34#include <linux/memory-tiers.h>
1da177e4 35#include <asm/cacheflush.h>
e8c24d3a 36#include <asm/mmu_context.h>
1da177e4 37#include <asm/tlbflush.h>
4a18419f 38#include <asm/tlb.h>
1da177e4 39
36f88188
KS
40#include "internal.h"
41
6a56ccbc
DH
42bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
43 pte_t pte)
64fe24a3
DH
44{
45 struct page *page;
46
7ea7e333
DH
47 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
48 return false;
64fe24a3 49
7ea7e333 50 /* Don't touch entries that are not even readable. */
d8488773 51 if (pte_protnone(pte))
64fe24a3
DH
52 return false;
53
54 /* Do we need write faults for softdirty tracking? */
76aefad6 55 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
64fe24a3
DH
56 return false;
57
58 /* Do we need write faults for uffd-wp tracking? */
59 if (userfaultfd_pte_wp(vma, pte))
60 return false;
61
62 if (!(vma->vm_flags & VM_SHARED)) {
63 /*
7ea7e333
DH
64 * Writable MAP_PRIVATE mapping: We can only special-case on
65 * exclusive anonymous pages, because we know that our
66 * write-fault handler similarly would map them writable without
67 * any additional checks while holding the PT lock.
64fe24a3
DH
68 */
69 page = vm_normal_page(vma, addr, pte);
d8488773 70 return page && PageAnon(page) && PageAnonExclusive(page);
64fe24a3
DH
71 }
72
7ea7e333
DH
73 /*
74 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
75 * needs a real write-fault for writenotify
76 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
77 * FS was already notified and we can simply mark the PTE writable
78 * just like the write-fault handler would do.
79 */
d8488773 80 return pte_dirty(pte);
64fe24a3
DH
81}
82
a79390f5 83static long change_pte_range(struct mmu_gather *tlb,
4a18419f
NA
84 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
85 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
1da177e4 86{
0697212a 87 pte_t *pte, oldpte;
705e87c0 88 spinlock_t *ptl;
a79390f5 89 long pages = 0;
3e321587 90 int target_node = NUMA_NO_NODE;
58705444 91 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
292924b2
PX
92 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
93 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1da177e4 94
4a18419f
NA
95 tlb_change_page_size(tlb, PAGE_SIZE);
96
175ad4f1 97 /*
c1e8d7c6 98 * Can be called with only the mmap_lock for reading by
175ad4f1
AA
99 * prot_numa so we must check the pmd isn't constantly
100 * changing from under us from pmd_none to pmd_trans_huge
101 * and/or the other way around.
102 */
103 if (pmd_trans_unstable(pmd))
104 return 0;
105
106 /*
107 * The pmd points to a regular pte so the pmd can't change
c1e8d7c6 108 * from under us even if the mmap_lock is only hold for
175ad4f1
AA
109 * reading.
110 */
111 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1ad9f620 112
3e321587
AK
113 /* Get target node for single threaded private VMAs */
114 if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
115 atomic_read(&vma->vm_mm->mm_users) == 1)
116 target_node = numa_node_id();
117
3ea27719 118 flush_tlb_batched_pending(vma->vm_mm);
6606c3e0 119 arch_enter_lazy_mmu_mode();
1da177e4 120 do {
0697212a
CL
121 oldpte = *pte;
122 if (pte_present(oldpte)) {
1da177e4
LT
123 pte_t ptent;
124
e944fd67
MG
125 /*
126 * Avoid trapping faults against the zero or KSM
127 * pages. See similar comment in change_huge_pmd.
128 */
129 if (prot_numa) {
130 struct page *page;
a1a3a2fc 131 int nid;
33024536 132 bool toptier;
e944fd67 133
a818f536
HY
134 /* Avoid TLB flush if possible */
135 if (pte_protnone(oldpte))
136 continue;
137
e944fd67 138 page = vm_normal_page(vma, addr, oldpte);
3218f871 139 if (!page || is_zone_device_page(page) || PageKsm(page))
e944fd67 140 continue;
10c1045f 141
859d4adc
HW
142 /* Also skip shared copy-on-write pages */
143 if (is_cow_mapping(vma->vm_flags) &&
80d47f5d 144 page_count(page) != 1)
859d4adc
HW
145 continue;
146
09a913a7
MG
147 /*
148 * While migration can move some dirty pages,
149 * it cannot move them all from MIGRATE_ASYNC
150 * context.
151 */
9de4f22a 152 if (page_is_file_lru(page) && PageDirty(page))
09a913a7
MG
153 continue;
154
3e321587
AK
155 /*
156 * Don't mess with PTEs if page is already on the node
157 * a single-threaded process is running on.
158 */
a1a3a2fc
HY
159 nid = page_to_nid(page);
160 if (target_node == nid)
161 continue;
33024536 162 toptier = node_is_toptier(nid);
a1a3a2fc
HY
163
164 /*
165 * Skip scanning top tier node if normal numa
166 * balancing is disabled
167 */
168 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
33024536 169 toptier)
3e321587 170 continue;
33024536
HY
171 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
172 !toptier)
173 xchg_page_access_time(page,
174 jiffies_to_msecs(jiffies));
e944fd67
MG
175 }
176
04a86453
AK
177 oldpte = ptep_modify_prot_start(vma, addr, pte);
178 ptent = pte_modify(oldpte, newprot);
4b10e7d5 179
f1eb1bac 180 if (uffd_wp)
292924b2 181 ptent = pte_mkuffd_wp(ptent);
f1eb1bac 182 else if (uffd_wp_resolve)
292924b2 183 ptent = pte_clear_uffd_wp(ptent);
292924b2 184
64fe24a3
DH
185 /*
186 * In some writable, shared mappings, we might want
187 * to catch actual write access -- see
188 * vma_wants_writenotify().
189 *
190 * In all writable, private mappings, we have to
191 * properly handle COW.
192 *
193 * In both cases, we can sometimes still change PTEs
194 * writable and avoid the write-fault handler, for
195 * example, if a PTE is already dirty and no other
196 * COW or special handling is required.
197 */
198 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
199 !pte_write(ptent) &&
200 can_change_pte_writable(vma, addr, ptent))
8a0516ed 201 ptent = pte_mkwrite(ptent);
64fe24a3 202
04a86453 203 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
c9fe6656
NA
204 if (pte_needs_flush(oldpte, ptent))
205 tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
8a0516ed 206 pages++;
f45ec5ff 207 } else if (is_swap_pte(oldpte)) {
0697212a 208 swp_entry_t entry = pte_to_swp_entry(oldpte);
f45ec5ff 209 pte_t newpte;
0697212a 210
4dd845b5 211 if (is_writable_migration_entry(entry)) {
3d2f78f0
PX
212 struct page *page = pfn_swap_entry_to_page(entry);
213
0697212a
CL
214 /*
215 * A protection check is difficult so
216 * just be safe and disable write
217 */
6c287605
DH
218 if (PageAnon(page))
219 entry = make_readable_exclusive_migration_entry(
220 swp_offset(entry));
221 else
222 entry = make_readable_migration_entry(swp_offset(entry));
c3d16e16
CG
223 newpte = swp_entry_to_pte(entry);
224 if (pte_swp_soft_dirty(oldpte))
225 newpte = pte_swp_mksoft_dirty(newpte);
4dd845b5 226 } else if (is_writable_device_private_entry(entry)) {
5042db43
JG
227 /*
228 * We do not preserve soft-dirtiness. See
229 * copy_one_pte() for explanation.
230 */
4dd845b5
AP
231 entry = make_readable_device_private_entry(
232 swp_offset(entry));
5042db43 233 newpte = swp_entry_to_pte(entry);
f45ec5ff
PX
234 if (pte_swp_uffd_wp(oldpte))
235 newpte = pte_swp_mkuffd_wp(newpte);
b756a3b5
AP
236 } else if (is_writable_device_exclusive_entry(entry)) {
237 entry = make_readable_device_exclusive_entry(
238 swp_offset(entry));
239 newpte = swp_entry_to_pte(entry);
240 if (pte_swp_soft_dirty(oldpte))
241 newpte = pte_swp_mksoft_dirty(newpte);
242 if (pte_swp_uffd_wp(oldpte))
243 newpte = pte_swp_mkuffd_wp(newpte);
7e3ce3f8
PX
244 } else if (is_pte_marker_entry(entry)) {
245 /*
246 * Ignore swapin errors unconditionally,
247 * because any access should sigbus anyway.
248 */
249 if (is_swapin_error_entry(entry))
250 continue;
fe2567eb
PX
251 /*
252 * If this is uffd-wp pte marker and we'd like
253 * to unprotect it, drop it; the next page
254 * fault will trigger without uffd trapping.
255 */
256 if (uffd_wp_resolve) {
257 pte_clear(vma->vm_mm, addr, pte);
258 pages++;
259 }
5c041f5d 260 continue;
f45ec5ff
PX
261 } else {
262 newpte = oldpte;
263 }
5042db43 264
f45ec5ff
PX
265 if (uffd_wp)
266 newpte = pte_swp_mkuffd_wp(newpte);
267 else if (uffd_wp_resolve)
268 newpte = pte_swp_clear_uffd_wp(newpte);
269
270 if (!pte_same(oldpte, newpte)) {
271 set_pte_at(vma->vm_mm, addr, pte, newpte);
5042db43
JG
272 pages++;
273 }
fe2567eb
PX
274 } else {
275 /* It must be an none page, or what else?.. */
276 WARN_ON_ONCE(!pte_none(oldpte));
2bad466c
PX
277
278 /*
279 * Nobody plays with any none ptes besides
280 * userfaultfd when applying the protections.
281 */
282 if (likely(!uffd_wp))
283 continue;
284
285 if (userfaultfd_wp_use_markers(vma)) {
fe2567eb
PX
286 /*
287 * For file-backed mem, we need to be able to
288 * wr-protect a none pte, because even if the
289 * pte is none, the page/swap cache could
290 * exist. Doing that by install a marker.
291 */
292 set_pte_at(vma->vm_mm, addr, pte,
293 make_pte_marker(PTE_MARKER_UFFD_WP));
294 pages++;
295 }
1da177e4
LT
296 }
297 } while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0 298 arch_leave_lazy_mmu_mode();
705e87c0 299 pte_unmap_unlock(pte - 1, ptl);
7da4d641
PZ
300
301 return pages;
1da177e4
LT
302}
303
8b272b3c
MG
304/*
305 * Used when setting automatic NUMA hinting protection where it is
306 * critical that a numa hinting PMD is not confused with a bad PMD.
307 */
308static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
309{
dab6e717 310 pmd_t pmdval = pmdp_get_lockless(pmd);
8b272b3c
MG
311
312 /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
313#ifdef CONFIG_TRANSPARENT_HUGEPAGE
314 barrier();
315#endif
316
317 if (pmd_none(pmdval))
318 return 1;
319 if (pmd_trans_huge(pmdval))
320 return 0;
321 if (unlikely(pmd_bad(pmdval))) {
322 pmd_clear_bad(pmd);
323 return 1;
324 }
325
326 return 0;
327}
328
2bad466c
PX
329/*
330 * Return true if we want to split THPs into PTE mappings in change
331 * protection procedure, false otherwise.
332 */
fe2567eb 333static inline bool
2bad466c 334pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
fe2567eb 335{
2bad466c
PX
336 /*
337 * pte markers only resides in pte level, if we need pte markers,
338 * we need to split. We cannot wr-protect shmem thp because file
339 * thp is handled differently when split by erasing the pmd so far.
340 */
fe2567eb
PX
341 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
342}
343
344/*
2bad466c
PX
345 * Return true if we want to populate pgtables in change protection
346 * procedure, false otherwise
347 */
348static inline bool
349pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
350{
351 /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */
352 if (!(cp_flags & MM_CP_UFFD_WP))
353 return false;
354
355 /* Populate if the userfaultfd mode requires pte markers */
356 return userfaultfd_wp_use_markers(vma);
357}
358
359/*
360 * Populate the pgtable underneath for whatever reason if requested.
361 * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable
362 * allocation failures during page faults by kicking OOM and returning
363 * error.
fe2567eb
PX
364 */
365#define change_pmd_prepare(vma, pmd, cp_flags) \
d1751118
PX
366 ({ \
367 long err = 0; \
2bad466c 368 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
d1751118
PX
369 if (pte_alloc(vma->vm_mm, pmd)) \
370 err = -ENOMEM; \
fe2567eb 371 } \
d1751118
PX
372 err; \
373 })
374
fe2567eb
PX
375/*
376 * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
377 * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
378 * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
379 */
380#define change_prepare(vma, high, low, addr, cp_flags) \
d1751118
PX
381 ({ \
382 long err = 0; \
2bad466c 383 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
fe2567eb 384 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
d1751118
PX
385 if (p == NULL) \
386 err = -ENOMEM; \
fe2567eb 387 } \
d1751118
PX
388 err; \
389 })
fe2567eb 390
a79390f5 391static inline long change_pmd_range(struct mmu_gather *tlb,
4a18419f
NA
392 struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
393 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
1da177e4
LT
394{
395 pmd_t *pmd;
396 unsigned long next;
a79390f5 397 long pages = 0;
72403b4a 398 unsigned long nr_huge_updates = 0;
ac46d4f3
JG
399 struct mmu_notifier_range range;
400
401 range.start = 0;
1da177e4
LT
402
403 pmd = pmd_offset(pud, addr);
404 do {
d1751118 405 long ret;
25cbbef1 406
1da177e4 407 next = pmd_addr_end(addr, end);
8b272b3c 408
d1751118
PX
409 ret = change_pmd_prepare(vma, pmd, cp_flags);
410 if (ret) {
411 pages = ret;
412 break;
413 }
8b272b3c 414 /*
c1e8d7c6 415 * Automatic NUMA balancing walks the tables with mmap_lock
8b272b3c
MG
416 * held for read. It's possible a parallel update to occur
417 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
418 * check leading to a false positive and clearing.
419 * Hence, it's necessary to atomically read the PMD value
420 * for all the checks.
421 */
422 if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
423 pmd_none_or_clear_bad_unless_trans_huge(pmd))
4991c09c 424 goto next;
a5338093
RR
425
426 /* invoke the mmu notifier if the pmd is populated */
ac46d4f3 427 if (!range.start) {
7269f999
JG
428 mmu_notifier_range_init(&range,
429 MMU_NOTIFY_PROTECTION_VMA, 0,
7d4a8be0 430 vma->vm_mm, addr, end);
ac46d4f3 431 mmu_notifier_invalidate_range_start(&range);
a5338093
RR
432 }
433
84c3fc4e 434 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
019c2d8b 435 if ((next - addr != HPAGE_PMD_SIZE) ||
2bad466c 436 pgtable_split_needed(vma, cp_flags)) {
fd60775a 437 __split_huge_pmd(vma, pmd, addr, false, NULL);
019c2d8b
PX
438 /*
439 * For file-backed, the pmd could have been
440 * cleared; make sure pmd populated if
441 * necessary, then fall-through to pte level.
442 */
d1751118
PX
443 ret = change_pmd_prepare(vma, pmd, cp_flags);
444 if (ret) {
445 pages = ret;
446 break;
447 }
6b9116a6 448 } else {
4a18419f
NA
449 /*
450 * change_huge_pmd() does not defer TLB flushes,
451 * so no need to propagate the tlb argument.
452 */
453 int nr_ptes = change_huge_pmd(tlb, vma, pmd,
454 addr, newprot, cp_flags);
f123d74a
MG
455
456 if (nr_ptes) {
72403b4a
MG
457 if (nr_ptes == HPAGE_PMD_NR) {
458 pages += HPAGE_PMD_NR;
459 nr_huge_updates++;
460 }
1ad9f620
MG
461
462 /* huge pmd was handled */
4991c09c 463 goto next;
f123d74a 464 }
7da4d641 465 }
88a9ab6e 466 /* fall through, the trans huge pmd just split */
cd7548ab 467 }
d1751118
PX
468 pages += change_pte_range(tlb, vma, pmd, addr, next,
469 newprot, cp_flags);
4991c09c
AK
470next:
471 cond_resched();
1da177e4 472 } while (pmd++, addr = next, addr != end);
7da4d641 473
ac46d4f3
JG
474 if (range.start)
475 mmu_notifier_invalidate_range_end(&range);
a5338093 476
72403b4a
MG
477 if (nr_huge_updates)
478 count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
7da4d641 479 return pages;
1da177e4
LT
480}
481
a79390f5 482static inline long change_pud_range(struct mmu_gather *tlb,
4a18419f
NA
483 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
484 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
1da177e4
LT
485{
486 pud_t *pud;
487 unsigned long next;
d1751118 488 long pages = 0, ret;
1da177e4 489
c2febafc 490 pud = pud_offset(p4d, addr);
1da177e4
LT
491 do {
492 next = pud_addr_end(addr, end);
d1751118
PX
493 ret = change_prepare(vma, pud, pmd, addr, cp_flags);
494 if (ret)
495 return ret;
1da177e4
LT
496 if (pud_none_or_clear_bad(pud))
497 continue;
4a18419f 498 pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
58705444 499 cp_flags);
1da177e4 500 } while (pud++, addr = next, addr != end);
7da4d641
PZ
501
502 return pages;
1da177e4
LT
503}
504
a79390f5 505static inline long change_p4d_range(struct mmu_gather *tlb,
4a18419f
NA
506 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
507 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
c2febafc
KS
508{
509 p4d_t *p4d;
510 unsigned long next;
d1751118 511 long pages = 0, ret;
c2febafc
KS
512
513 p4d = p4d_offset(pgd, addr);
514 do {
515 next = p4d_addr_end(addr, end);
d1751118
PX
516 ret = change_prepare(vma, p4d, pud, addr, cp_flags);
517 if (ret)
518 return ret;
c2febafc
KS
519 if (p4d_none_or_clear_bad(p4d))
520 continue;
4a18419f 521 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
58705444 522 cp_flags);
c2febafc
KS
523 } while (p4d++, addr = next, addr != end);
524
525 return pages;
526}
527
a79390f5 528static long change_protection_range(struct mmu_gather *tlb,
4a18419f
NA
529 struct vm_area_struct *vma, unsigned long addr,
530 unsigned long end, pgprot_t newprot, unsigned long cp_flags)
1da177e4
LT
531{
532 struct mm_struct *mm = vma->vm_mm;
533 pgd_t *pgd;
534 unsigned long next;
d1751118 535 long pages = 0, ret;
1da177e4
LT
536
537 BUG_ON(addr >= end);
538 pgd = pgd_offset(mm, addr);
4a18419f 539 tlb_start_vma(tlb, vma);
1da177e4
LT
540 do {
541 next = pgd_addr_end(addr, end);
d1751118
PX
542 ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
543 if (ret) {
544 pages = ret;
545 break;
546 }
1da177e4
LT
547 if (pgd_none_or_clear_bad(pgd))
548 continue;
4a18419f 549 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
58705444 550 cp_flags);
1da177e4 551 } while (pgd++, addr = next, addr != end);
7da4d641 552
4a18419f 553 tlb_end_vma(tlb, vma);
7da4d641
PZ
554
555 return pages;
556}
557
a79390f5 558long change_protection(struct mmu_gather *tlb,
4a18419f 559 struct vm_area_struct *vma, unsigned long start,
1ef488ed 560 unsigned long end, unsigned long cp_flags)
7da4d641 561{
1ef488ed 562 pgprot_t newprot = vma->vm_page_prot;
a79390f5 563 long pages;
7da4d641 564
292924b2
PX
565 BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
566
1ef488ed
DH
567#ifdef CONFIG_NUMA_BALANCING
568 /*
569 * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking)
570 * are expected to reflect their requirements via VMA flags such that
571 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
572 */
573 if (cp_flags & MM_CP_PROT_NUMA)
574 newprot = PAGE_NONE;
575#else
576 WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA);
577#endif
578
7da4d641 579 if (is_vm_hugetlb_page(vma))
5a90d5a1
PX
580 pages = hugetlb_change_protection(vma, start, end, newprot,
581 cp_flags);
7da4d641 582 else
4a18419f 583 pages = change_protection_range(tlb, vma, start, end, newprot,
58705444 584 cp_flags);
7da4d641
PZ
585
586 return pages;
1da177e4
LT
587}
588
42e4089c
AK
589static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
590 unsigned long next, struct mm_walk *walk)
591{
592 return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
593 0 : -EACCES;
594}
595
596static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
597 unsigned long addr, unsigned long next,
598 struct mm_walk *walk)
599{
600 return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
601 0 : -EACCES;
602}
603
604static int prot_none_test(unsigned long addr, unsigned long next,
605 struct mm_walk *walk)
606{
607 return 0;
608}
609
7b86ac33
CH
610static const struct mm_walk_ops prot_none_walk_ops = {
611 .pte_entry = prot_none_pte_entry,
612 .hugetlb_entry = prot_none_hugetlb_entry,
613 .test_walk = prot_none_test,
614};
42e4089c 615
b6a2fea3 616int
2286a691
LH
617mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
618 struct vm_area_struct *vma, struct vm_area_struct **pprev,
619 unsigned long start, unsigned long end, unsigned long newflags)
1da177e4
LT
620{
621 struct mm_struct *mm = vma->vm_mm;
622 unsigned long oldflags = vma->vm_flags;
623 long nrpages = (end - start) >> PAGE_SHIFT;
eb309ec8 624 unsigned int mm_cp_flags = 0;
1da177e4 625 unsigned long charged = 0;
1da177e4
LT
626 pgoff_t pgoff;
627 int error;
628
629 if (newflags == oldflags) {
630 *pprev = vma;
631 return 0;
632 }
633
42e4089c
AK
634 /*
635 * Do PROT_NONE PFN permission checks here when we can still
636 * bail out without undoing a lot of state. This is a rather
637 * uncommon case, so doesn't need to be very optimized.
638 */
639 if (arch_has_pfn_modify_check() &&
640 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
6cb4d9a2 641 (newflags & VM_ACCESS_FLAGS) == 0) {
7b86ac33
CH
642 pgprot_t new_pgprot = vm_get_page_prot(newflags);
643
644 error = walk_page_range(current->mm, start, end,
645 &prot_none_walk_ops, &new_pgprot);
42e4089c
AK
646 if (error)
647 return error;
648 }
649
1da177e4
LT
650 /*
651 * If we make a private mapping writable we increase our commit;
652 * but (without finer accounting) cannot reduce our commit if we
5a6fe125
MG
653 * make it unwritable again. hugetlb mapping were accounted for
654 * even if read-only so there is no need to account for them here
1da177e4
LT
655 */
656 if (newflags & VM_WRITE) {
84638335
KK
657 /* Check space limits when area turns into data. */
658 if (!may_expand_vm(mm, newflags, nrpages) &&
659 may_expand_vm(mm, oldflags, nrpages))
660 return -ENOMEM;
5a6fe125 661 if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
cdfd4325 662 VM_SHARED|VM_NORESERVE))) {
1da177e4 663 charged = nrpages;
191c5424 664 if (security_vm_enough_memory_mm(mm, charged))
1da177e4
LT
665 return -ENOMEM;
666 newflags |= VM_ACCOUNT;
667 }
668 }
669
1da177e4
LT
670 /*
671 * First try to merge with previous and/or next vma.
672 */
673 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
9760ebff 674 *pprev = vma_merge(vmi, mm, *pprev, start, end, newflags,
19a809af 675 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
5c26f6ac 676 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1da177e4
LT
677 if (*pprev) {
678 vma = *pprev;
e86f15ee 679 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
1da177e4
LT
680 goto success;
681 }
682
683 *pprev = vma;
684
685 if (start != vma->vm_start) {
9760ebff 686 error = split_vma(vmi, vma, start, 1);
1da177e4
LT
687 if (error)
688 goto fail;
689 }
690
691 if (end != vma->vm_end) {
9760ebff 692 error = split_vma(vmi, vma, end, 0);
1da177e4
LT
693 if (error)
694 goto fail;
695 }
696
697success:
698 /*
c1e8d7c6 699 * vm_flags and vm_page_prot are protected by the mmap_lock
1da177e4
LT
700 * held in write mode.
701 */
1c71222e 702 vm_flags_reset(vma, newflags);
eb309ec8
DH
703 if (vma_wants_manual_pte_write_upgrade(vma))
704 mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
64e45507 705 vma_set_page_prot(vma);
d08b3851 706
1ef488ed 707 change_protection(tlb, vma, start, end, mm_cp_flags);
7da4d641 708
36f88188
KS
709 /*
710 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
711 * fault on access.
712 */
713 if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
714 (newflags & VM_WRITE)) {
715 populate_vma_page_range(vma, start, end, NULL);
716 }
717
84638335
KK
718 vm_stat_account(mm, oldflags, -nrpages);
719 vm_stat_account(mm, newflags, nrpages);
63bfd738 720 perf_event_mmap(vma);
1da177e4
LT
721 return 0;
722
723fail:
724 vm_unacct_memory(charged);
725 return error;
726}
727
7d06d9c9
DH
728/*
729 * pkey==-1 when doing a legacy mprotect()
730 */
731static int do_mprotect_pkey(unsigned long start, size_t len,
732 unsigned long prot, int pkey)
1da177e4 733{
62b5f7d0 734 unsigned long nstart, end, tmp, reqprot;
1da177e4 735 struct vm_area_struct *vma, *prev;
48725bbc 736 int error;
1da177e4 737 const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
f138556d
PK
738 const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
739 (prot & PROT_READ);
4a18419f 740 struct mmu_gather tlb;
2286a691 741 struct vma_iterator vmi;
f138556d 742
057d3389
AK
743 start = untagged_addr(start);
744
1da177e4
LT
745 prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
746 if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
747 return -EINVAL;
748
749 if (start & ~PAGE_MASK)
750 return -EINVAL;
751 if (!len)
752 return 0;
753 len = PAGE_ALIGN(len);
754 end = start + len;
755 if (end <= start)
756 return -ENOMEM;
9035cf9a 757 if (!arch_validate_prot(prot, start))
1da177e4
LT
758 return -EINVAL;
759
760 reqprot = prot;
1da177e4 761
d8ed45c5 762 if (mmap_write_lock_killable(current->mm))
dc0ef0df 763 return -EINTR;
1da177e4 764
e8c24d3a
DH
765 /*
766 * If userspace did not allocate the pkey, do not let
767 * them use it here.
768 */
769 error = -EINVAL;
770 if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
771 goto out;
772
2286a691
LH
773 vma_iter_init(&vmi, current->mm, start);
774 vma = vma_find(&vmi, end);
1da177e4
LT
775 error = -ENOMEM;
776 if (!vma)
777 goto out;
6af5fa0d 778
1da177e4
LT
779 if (unlikely(grows & PROT_GROWSDOWN)) {
780 if (vma->vm_start >= end)
781 goto out;
782 start = vma->vm_start;
783 error = -EINVAL;
784 if (!(vma->vm_flags & VM_GROWSDOWN))
785 goto out;
7d12efae 786 } else {
1da177e4
LT
787 if (vma->vm_start > start)
788 goto out;
789 if (unlikely(grows & PROT_GROWSUP)) {
790 end = vma->vm_end;
791 error = -EINVAL;
792 if (!(vma->vm_flags & VM_GROWSUP))
793 goto out;
794 }
795 }
6af5fa0d 796
2286a691 797 prev = vma_prev(&vmi);
1da177e4
LT
798 if (start > vma->vm_start)
799 prev = vma;
800
4a18419f 801 tlb_gather_mmu(&tlb, current->mm);
2286a691
LH
802 nstart = start;
803 tmp = vma->vm_start;
804 for_each_vma_range(vmi, vma, end) {
a8502b67 805 unsigned long mask_off_old_flags;
1da177e4 806 unsigned long newflags;
7d06d9c9 807 int new_vma_pkey;
1da177e4 808
2286a691
LH
809 if (vma->vm_start != tmp) {
810 error = -ENOMEM;
811 break;
812 }
1da177e4 813
f138556d
PK
814 /* Does the application expect PROT_READ to imply PROT_EXEC */
815 if (rier && (vma->vm_flags & VM_MAYEXEC))
816 prot |= PROT_EXEC;
817
a8502b67
DH
818 /*
819 * Each mprotect() call explicitly passes r/w/x permissions.
820 * If a permission is not passed to mprotect(), it must be
821 * cleared from the VMA.
822 */
e39ee675 823 mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
a8502b67 824
7d06d9c9
DH
825 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
826 newflags = calc_vm_prot_bits(prot, new_vma_pkey);
a8502b67 827 newflags |= (vma->vm_flags & ~mask_off_old_flags);
1da177e4 828
7e2cff42 829 /* newflags >> 4 shift VM_MAY% in place of VM_% */
6cb4d9a2 830 if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
1da177e4 831 error = -EACCES;
4a18419f 832 break;
1da177e4
LT
833 }
834
b507808e
JG
835 if (map_deny_write_exec(vma, newflags)) {
836 error = -EACCES;
3d27a95b 837 break;
b507808e
JG
838 }
839
c462ac28
CM
840 /* Allow architectures to sanity-check the new flags */
841 if (!arch_validate_flags(newflags)) {
842 error = -EINVAL;
4a18419f 843 break;
c462ac28
CM
844 }
845
1da177e4
LT
846 error = security_file_mprotect(vma, reqprot, prot);
847 if (error)
4a18419f 848 break;
1da177e4
LT
849
850 tmp = vma->vm_end;
851 if (tmp > end)
852 tmp = end;
95bb7c42 853
dbf53f75 854 if (vma->vm_ops && vma->vm_ops->mprotect) {
95bb7c42 855 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
dbf53f75 856 if (error)
4a18419f 857 break;
dbf53f75 858 }
95bb7c42 859
2286a691 860 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);
1da177e4 861 if (error)
4a18419f 862 break;
95bb7c42 863
2fcd07b7 864 tmp = vma_iter_end(&vmi);
1da177e4 865 nstart = tmp;
f138556d 866 prot = reqprot;
1da177e4 867 }
4a18419f 868 tlb_finish_mmu(&tlb);
2286a691 869
82f95134 870 if (!error && vma_iter_end(&vmi) < end)
2286a691
LH
871 error = -ENOMEM;
872
1da177e4 873out:
d8ed45c5 874 mmap_write_unlock(current->mm);
1da177e4
LT
875 return error;
876}
7d06d9c9
DH
877
878SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
879 unsigned long, prot)
880{
881 return do_mprotect_pkey(start, len, prot, -1);
882}
883
c7142aea
HC
884#ifdef CONFIG_ARCH_HAS_PKEYS
885
7d06d9c9
DH
886SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
887 unsigned long, prot, int, pkey)
888{
889 return do_mprotect_pkey(start, len, prot, pkey);
890}
e8c24d3a
DH
891
892SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
893{
894 int pkey;
895 int ret;
896
897 /* No flags supported yet. */
898 if (flags)
899 return -EINVAL;
900 /* check for unsupported init values */
901 if (init_val & ~PKEY_ACCESS_MASK)
902 return -EINVAL;
903
d8ed45c5 904 mmap_write_lock(current->mm);
e8c24d3a
DH
905 pkey = mm_pkey_alloc(current->mm);
906
907 ret = -ENOSPC;
908 if (pkey == -1)
909 goto out;
910
911 ret = arch_set_user_pkey_access(current, pkey, init_val);
912 if (ret) {
913 mm_pkey_free(current->mm, pkey);
914 goto out;
915 }
916 ret = pkey;
917out:
d8ed45c5 918 mmap_write_unlock(current->mm);
e8c24d3a
DH
919 return ret;
920}
921
922SYSCALL_DEFINE1(pkey_free, int, pkey)
923{
924 int ret;
925
d8ed45c5 926 mmap_write_lock(current->mm);
e8c24d3a 927 ret = mm_pkey_free(current->mm, pkey);
d8ed45c5 928 mmap_write_unlock(current->mm);
e8c24d3a
DH
929
930 /*
f0953a1b 931 * We could provide warnings or errors if any VMA still
e8c24d3a
DH
932 * has the pkey set here.
933 */
934 return ret;
935}
c7142aea
HC
936
937#endif /* CONFIG_ARCH_HAS_PKEYS */