Merge branch 'for-6.8/cxl-cper' into for-6.8/cxl
[linux-2.6-block.git] / mm / mlock.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/mlock.c
4 *
5 * (C) Copyright 1995 Linus Torvalds
6 * (C) Copyright 2002 Christoph Hellwig
7 */
8
c59ede7b 9#include <linux/capability.h>
1da177e4
LT
10#include <linux/mman.h>
11#include <linux/mm.h>
8703e8a4 12#include <linux/sched/user.h>
b291f000
NP
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/pagemap.h>
7225522b 16#include <linux/pagevec.h>
34b67923 17#include <linux/pagewalk.h>
1da177e4
LT
18#include <linux/mempolicy.h>
19#include <linux/syscalls.h>
e8edc6e0 20#include <linux/sched.h>
b95f1b31 21#include <linux/export.h>
b291f000
NP
22#include <linux/rmap.h>
23#include <linux/mmzone.h>
24#include <linux/hugetlb.h>
7225522b
VB
25#include <linux/memcontrol.h>
26#include <linux/mm_inline.h>
1507f512 27#include <linux/secretmem.h>
b291f000
NP
28
29#include "internal.h"
1da177e4 30
90d07210 31struct mlock_fbatch {
adb11e78 32 local_lock_t lock;
90d07210 33 struct folio_batch fbatch;
adb11e78
SAS
34};
35
90d07210 36static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
adb11e78
SAS
37 .lock = INIT_LOCAL_LOCK(lock),
38};
2fbb0c10 39
7f43add4 40bool can_do_mlock(void)
e8edc6e0 41{
59e99e5b 42 if (rlimit(RLIMIT_MEMLOCK) != 0)
7f43add4 43 return true;
a5a6579d 44 if (capable(CAP_IPC_LOCK))
7f43add4
WX
45 return true;
46 return false;
e8edc6e0
AD
47}
48EXPORT_SYMBOL(can_do_mlock);
1da177e4 49
b291f000 50/*
90d07210 51 * Mlocked folios are marked with the PG_mlocked flag for efficient testing
b291f000
NP
52 * in vmscan and, possibly, the fault path; and to support semi-accurate
53 * statistics.
54 *
90d07210
LS
55 * An mlocked folio [folio_test_mlocked(folio)] is unevictable. As such, it
56 * will be ostensibly placed on the LRU "unevictable" list (actually no such
57 * list exists), rather than the [in]active lists. PG_unevictable is set to
58 * indicate the unevictable state.
b291f000
NP
59 */
60
90d07210 61static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
b291f000 62{
2fbb0c10 63 /* There is nothing more we can do while it's off LRU */
90d07210 64 if (!folio_test_clear_lru(folio))
2fbb0c10 65 return lruvec;
0964730b 66
90d07210 67 lruvec = folio_lruvec_relock_irq(folio, lruvec);
b291f000 68
90d07210 69 if (unlikely(folio_evictable(folio))) {
b291f000 70 /*
90d07210
LS
71 * This is a little surprising, but quite possible: PG_mlocked
72 * must have got cleared already by another CPU. Could this
73 * folio be unevictable? I'm not sure, but move it now if so.
b291f000 74 */
90d07210
LS
75 if (folio_test_unevictable(folio)) {
76 lruvec_del_folio(lruvec, folio);
77 folio_clear_unevictable(folio);
78 lruvec_add_folio(lruvec, folio);
79
2fbb0c10 80 __count_vm_events(UNEVICTABLE_PGRESCUED,
90d07210 81 folio_nr_pages(folio));
2fbb0c10
HD
82 }
83 goto out;
b291f000 84 }
07ca7606 85
90d07210
LS
86 if (folio_test_unevictable(folio)) {
87 if (folio_test_mlocked(folio))
88 folio->mlock_count++;
07ca7606 89 goto out;
5344b7e6 90 }
07ca7606 91
90d07210
LS
92 lruvec_del_folio(lruvec, folio);
93 folio_clear_active(folio);
94 folio_set_unevictable(folio);
95 folio->mlock_count = !!folio_test_mlocked(folio);
96 lruvec_add_folio(lruvec, folio);
97 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
07ca7606 98out:
90d07210 99 folio_set_lru(folio);
2fbb0c10 100 return lruvec;
b291f000
NP
101}
102
90d07210 103static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
b291f000 104{
90d07210 105 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
b291f000 106
90d07210 107 lruvec = folio_lruvec_relock_irq(folio, lruvec);
e90309c9 108
2fbb0c10 109 /* As above, this is a little surprising, but possible */
90d07210 110 if (unlikely(folio_evictable(folio)))
2fbb0c10 111 goto out;
0964730b 112
90d07210
LS
113 folio_set_unevictable(folio);
114 folio->mlock_count = !!folio_test_mlocked(folio);
115 __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
2fbb0c10 116out:
90d07210
LS
117 lruvec_add_folio(lruvec, folio);
118 folio_set_lru(folio);
2fbb0c10 119 return lruvec;
b291f000
NP
120}
121
90d07210 122static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
7225522b 123{
90d07210 124 int nr_pages = folio_nr_pages(folio);
2fbb0c10
HD
125 bool isolated = false;
126
90d07210 127 if (!folio_test_clear_lru(folio))
2fbb0c10 128 goto munlock;
7225522b 129
2fbb0c10 130 isolated = true;
90d07210 131 lruvec = folio_lruvec_relock_irq(folio, lruvec);
7225522b 132
90d07210 133 if (folio_test_unevictable(folio)) {
07ca7606 134 /* Then mlock_count is maintained, but might undercount */
90d07210
LS
135 if (folio->mlock_count)
136 folio->mlock_count--;
137 if (folio->mlock_count)
07ca7606
HD
138 goto out;
139 }
140 /* else assume that was the last mlock: reclaim will fix it if not */
141
2fbb0c10 142munlock:
90d07210
LS
143 if (folio_test_clear_mlocked(folio)) {
144 __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145 if (isolated || !folio_test_unevictable(folio))
07ca7606
HD
146 __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147 else
148 __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149 }
150
90d07210
LS
151 /* folio_evictable() has to be checked *after* clearing Mlocked */
152 if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153 lruvec_del_folio(lruvec, folio);
154 folio_clear_unevictable(folio);
155 lruvec_add_folio(lruvec, folio);
07ca7606 156 __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
7225522b 157 }
07ca7606 158out:
2fbb0c10 159 if (isolated)
90d07210 160 folio_set_lru(folio);
2fbb0c10 161 return lruvec;
7225522b
VB
162}
163
164/*
90d07210 165 * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
7225522b 166 */
90d07210
LS
167#define LRU_FOLIO 0x1
168#define NEW_FOLIO 0x2
169static inline struct folio *mlock_lru(struct folio *folio)
7225522b 170{
90d07210 171 return (struct folio *)((unsigned long)folio + LRU_FOLIO);
2fbb0c10 172}
0964730b 173
90d07210 174static inline struct folio *mlock_new(struct folio *folio)
2fbb0c10 175{
90d07210 176 return (struct folio *)((unsigned long)folio + NEW_FOLIO);
7225522b
VB
177}
178
2fbb0c10 179/*
90d07210
LS
180 * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181 * make use of such folio pointer flags in future, but for now just keep it for
182 * mlock. We could use three separate folio batches instead, but one feels
183 * better (munlocking a full folio batch does not need to drain mlocking folio
184 * batches first).
b291f000 185 */
90d07210 186static void mlock_folio_batch(struct folio_batch *fbatch)
b291f000 187{
2fbb0c10
HD
188 struct lruvec *lruvec = NULL;
189 unsigned long mlock;
90d07210 190 struct folio *folio;
2fbb0c10 191 int i;
e90309c9 192
90d07210
LS
193 for (i = 0; i < folio_batch_count(fbatch); i++) {
194 folio = fbatch->folios[i];
195 mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196 folio = (struct folio *)((unsigned long)folio - mlock);
197 fbatch->folios[i] = folio;
2fbb0c10 198
90d07210
LS
199 if (mlock & LRU_FOLIO)
200 lruvec = __mlock_folio(folio, lruvec);
201 else if (mlock & NEW_FOLIO)
202 lruvec = __mlock_new_folio(folio, lruvec);
2fbb0c10 203 else
90d07210 204 lruvec = __munlock_folio(folio, lruvec);
655548bf 205 }
01cc2e58 206
2fbb0c10
HD
207 if (lruvec)
208 unlock_page_lruvec_irq(lruvec);
2bd7f621 209 folios_put(fbatch->folios, folio_batch_count(fbatch));
90d07210 210 folio_batch_reinit(fbatch);
2fbb0c10 211}
01cc2e58 212
96f97c43 213void mlock_drain_local(void)
adb11e78 214{
90d07210 215 struct folio_batch *fbatch;
adb11e78 216
90d07210
LS
217 local_lock(&mlock_fbatch.lock);
218 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
219 if (folio_batch_count(fbatch))
220 mlock_folio_batch(fbatch);
221 local_unlock(&mlock_fbatch.lock);
adb11e78
SAS
222}
223
96f97c43 224void mlock_drain_remote(int cpu)
2fbb0c10 225{
90d07210 226 struct folio_batch *fbatch;
01cc2e58 227
adb11e78 228 WARN_ON_ONCE(cpu_online(cpu));
90d07210
LS
229 fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
230 if (folio_batch_count(fbatch))
231 mlock_folio_batch(fbatch);
b291f000
NP
232}
233
96f97c43 234bool need_mlock_drain(int cpu)
9978ad58 235{
90d07210 236 return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
b291f000
NP
237}
238
2fbb0c10 239/**
dcc5d337
MWO
240 * mlock_folio - mlock a folio already on (or temporarily off) LRU
241 * @folio: folio to be mlocked.
56afe477 242 */
dcc5d337 243void mlock_folio(struct folio *folio)
56afe477 244{
90d07210 245 struct folio_batch *fbatch;
adb11e78 246
90d07210
LS
247 local_lock(&mlock_fbatch.lock);
248 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
56afe477 249
dcc5d337
MWO
250 if (!folio_test_set_mlocked(folio)) {
251 int nr_pages = folio_nr_pages(folio);
2fbb0c10 252
dcc5d337 253 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
2fbb0c10 254 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
56afe477
VB
255 }
256
dcc5d337 257 folio_get(folio);
90d07210 258 if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
dcc5d337 259 folio_test_large(folio) || lru_cache_disabled())
90d07210
LS
260 mlock_folio_batch(fbatch);
261 local_unlock(&mlock_fbatch.lock);
56afe477
VB
262}
263
2fbb0c10 264/**
96f97c43
LS
265 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
266 * @folio: folio to be mlocked, either normal or a THP head.
56afe477 267 */
96f97c43 268void mlock_new_folio(struct folio *folio)
56afe477 269{
90d07210 270 struct folio_batch *fbatch;
90d07210 271 int nr_pages = folio_nr_pages(folio);
2fbb0c10 272
90d07210
LS
273 local_lock(&mlock_fbatch.lock);
274 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
275 folio_set_mlocked(folio);
276
277 zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
2fbb0c10
HD
278 __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
279
90d07210
LS
280 folio_get(folio);
281 if (!folio_batch_add(fbatch, mlock_new(folio)) ||
282 folio_test_large(folio) || lru_cache_disabled())
283 mlock_folio_batch(fbatch);
284 local_unlock(&mlock_fbatch.lock);
56afe477
VB
285}
286
2fbb0c10 287/**
96f97c43
LS
288 * munlock_folio - munlock a folio
289 * @folio: folio to be munlocked, either normal or a THP head.
7225522b 290 */
96f97c43 291void munlock_folio(struct folio *folio)
7225522b 292{
90d07210 293 struct folio_batch *fbatch;
56afe477 294
90d07210
LS
295 local_lock(&mlock_fbatch.lock);
296 fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
5b40998a 297 /*
90d07210
LS
298 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
299 * which will check whether the folio is multiply mlocked.
5b40998a 300 */
90d07210
LS
301 folio_get(folio);
302 if (!folio_batch_add(fbatch, folio) ||
303 folio_test_large(folio) || lru_cache_disabled())
304 mlock_folio_batch(fbatch);
305 local_unlock(&mlock_fbatch.lock);
7a8010cd
VB
306}
307
dc68badc
YF
308static inline unsigned int folio_mlock_step(struct folio *folio,
309 pte_t *pte, unsigned long addr, unsigned long end)
310{
311 unsigned int count, i, nr = folio_nr_pages(folio);
312 unsigned long pfn = folio_pfn(folio);
313 pte_t ptent = ptep_get(pte);
314
315 if (!folio_test_large(folio))
316 return 1;
317
318 count = pfn + nr - pte_pfn(ptent);
319 count = min_t(unsigned int, count, (end - addr) >> PAGE_SHIFT);
320
321 for (i = 0; i < count; i++, pte++) {
322 pte_t entry = ptep_get(pte);
323
324 if (!pte_present(entry))
325 break;
326 if (pte_pfn(entry) - pfn >= nr)
327 break;
328 }
329
330 return i;
331}
332
333static inline bool allow_mlock_munlock(struct folio *folio,
334 struct vm_area_struct *vma, unsigned long start,
335 unsigned long end, unsigned int step)
336{
337 /*
338 * For unlock, allow munlock large folio which is partially
339 * mapped to VMA. As it's possible that large folio is
340 * mlocked and VMA is split later.
341 *
342 * During memory pressure, such kind of large folio can
343 * be split. And the pages are not in VM_LOCKed VMA
344 * can be reclaimed.
345 */
346 if (!(vma->vm_flags & VM_LOCKED))
347 return true;
348
b1454b46
HD
349 /* folio_within_range() cannot take KSM, but any small folio is OK */
350 if (!folio_test_large(folio))
351 return true;
352
dc68badc
YF
353 /* folio not in range [start, end), skip mlock */
354 if (!folio_within_range(folio, vma, start, end))
355 return false;
356
357 /* folio is not fully mapped, skip mlock */
358 if (step != folio_nr_pages(folio))
359 return false;
360
361 return true;
362}
363
34b67923
HD
364static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
365 unsigned long end, struct mm_walk *walk)
366
7a8010cd 367{
34b67923 368 struct vm_area_struct *vma = walk->vma;
7a8010cd 369 spinlock_t *ptl;
34b67923 370 pte_t *start_pte, *pte;
c33c7948 371 pte_t ptent;
96f97c43 372 struct folio *folio;
dc68badc
YF
373 unsigned int step = 1;
374 unsigned long start = addr;
7a8010cd 375
34b67923
HD
376 ptl = pmd_trans_huge_lock(pmd, vma);
377 if (ptl) {
378 if (!pmd_present(*pmd))
379 goto out;
380 if (is_huge_zero_pmd(*pmd))
381 goto out;
96f97c43 382 folio = page_folio(pmd_page(*pmd));
34b67923 383 if (vma->vm_flags & VM_LOCKED)
96f97c43 384 mlock_folio(folio);
34b67923 385 else
96f97c43 386 munlock_folio(folio);
34b67923
HD
387 goto out;
388 }
56afe477 389
34b67923 390 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
7780d040
HD
391 if (!start_pte) {
392 walk->action = ACTION_AGAIN;
393 return 0;
394 }
dc68badc 395
34b67923 396 for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
c33c7948
RR
397 ptent = ptep_get(pte);
398 if (!pte_present(ptent))
34b67923 399 continue;
c33c7948 400 folio = vm_normal_folio(vma, addr, ptent);
96f97c43 401 if (!folio || folio_is_zone_device(folio))
34b67923 402 continue;
dc68badc
YF
403
404 step = folio_mlock_step(folio, pte, addr, end);
405 if (!allow_mlock_munlock(folio, vma, start, end, step))
406 goto next_entry;
407
34b67923 408 if (vma->vm_flags & VM_LOCKED)
96f97c43 409 mlock_folio(folio);
34b67923 410 else
96f97c43 411 munlock_folio(folio);
dc68badc
YF
412
413next_entry:
414 pte += step - 1;
415 addr += (step - 1) << PAGE_SHIFT;
7a8010cd 416 }
34b67923
HD
417 pte_unmap(start_pte);
418out:
419 spin_unlock(ptl);
420 cond_resched();
421 return 0;
7225522b
VB
422}
423
b291f000 424/*
34b67923
HD
425 * mlock_vma_pages_range() - mlock any pages already in the range,
426 * or munlock all pages in the range.
427 * @vma - vma containing range to be mlock()ed or munlock()ed
ba470de4 428 * @start - start address in @vma of the range
34b67923
HD
429 * @end - end of range in @vma
430 * @newflags - the new set of flags for @vma.
ba470de4 431 *
34b67923
HD
432 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
433 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
b291f000 434 */
34b67923
HD
435static void mlock_vma_pages_range(struct vm_area_struct *vma,
436 unsigned long start, unsigned long end, vm_flags_t newflags)
b291f000 437{
34b67923
HD
438 static const struct mm_walk_ops mlock_walk_ops = {
439 .pmd_entry = mlock_pte_range,
49b06385 440 .walk_lock = PGWALK_WRLOCK_VERIFY,
34b67923 441 };
408e82b7 442
34b67923
HD
443 /*
444 * There is a slight chance that concurrent page migration,
445 * or page reclaim finding a page of this now-VM_LOCKED vma,
7efecffb 446 * will call mlock_vma_folio() and raise page's mlock_count:
34b67923 447 * double counting, leaving the page unevictable indefinitely.
7efecffb 448 * Communicate this danger to mlock_vma_folio() with VM_IO,
34b67923
HD
449 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
450 * mmap_lock is held in write mode here, so this weird
451 * combination should not be visible to other mmap_lock users;
452 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
453 */
454 if (newflags & VM_LOCKED)
455 newflags |= VM_IO;
60081bf1 456 vma_start_write(vma);
601c3c29 457 vm_flags_reset_once(vma, newflags);
ff6a6da6 458
34b67923
HD
459 lru_add_drain();
460 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
461 lru_add_drain();
408e82b7 462
34b67923
HD
463 if (newflags & VM_IO) {
464 newflags &= ~VM_IO;
601c3c29 465 vm_flags_reset_once(vma, newflags);
408e82b7 466 }
b291f000
NP
467}
468
469/*
470 * mlock_fixup - handle mlock[all]/munlock[all] requests.
471 *
472 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
473 * munlock is a no-op. However, for some special vmas, we go ahead and
cea10a19 474 * populate the ptes.
b291f000
NP
475 *
476 * For vmas that pass the filters, merge/split as appropriate.
477 */
37598f5a
LH
478static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
479 struct vm_area_struct **prev, unsigned long start,
480 unsigned long end, vm_flags_t newflags)
1da177e4 481{
b291f000 482 struct mm_struct *mm = vma->vm_mm;
b291f000 483 int nr_pages;
1da177e4 484 int ret = 0;
34b67923 485 vm_flags_t oldflags = vma->vm_flags;
1da177e4 486
34b67923 487 if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
e1fb4a08 488 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
1507f512 489 vma_is_dax(vma) || vma_is_secretmem(vma))
b0f205c2
EM
490 /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
491 goto out;
b291f000 492
94d7d923
LS
493 vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
494 if (IS_ERR(vma)) {
495 ret = PTR_ERR(vma);
496 goto out;
1da177e4
LT
497 }
498
b291f000
NP
499 /*
500 * Keep track of amount of locked VM.
501 */
502 nr_pages = (end - start) >> PAGE_SHIFT;
34b67923 503 if (!(newflags & VM_LOCKED))
b291f000 504 nr_pages = -nr_pages;
34b67923 505 else if (oldflags & VM_LOCKED)
b155b4fd 506 nr_pages = 0;
b291f000
NP
507 mm->locked_vm += nr_pages;
508
1da177e4 509 /*
c1e8d7c6 510 * vm_flags is protected by the mmap_lock held in write mode.
1da177e4 511 * It's okay if try_to_unmap_one unmaps a page just after we
fc05f566 512 * set VM_LOCKED, populate_vma_page_range will bring it back.
1da177e4 513 */
34b67923
HD
514 if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
515 /* No work to do, and mlocking twice would be wrong */
60081bf1 516 vma_start_write(vma);
1c71222e 517 vm_flags_reset(vma, newflags);
34b67923
HD
518 } else {
519 mlock_vma_pages_range(vma, start, end, newflags);
520 }
1da177e4 521out:
b291f000 522 *prev = vma;
1da177e4
LT
523 return ret;
524}
525
1aab92ec
EM
526static int apply_vma_lock_flags(unsigned long start, size_t len,
527 vm_flags_t flags)
1da177e4
LT
528{
529 unsigned long nstart, end, tmp;
68d68ff6 530 struct vm_area_struct *vma, *prev;
37598f5a 531 VMA_ITERATOR(vmi, current->mm, start);
1da177e4 532
8fd9e488 533 VM_BUG_ON(offset_in_page(start));
fed067da 534 VM_BUG_ON(len != PAGE_ALIGN(len));
1da177e4
LT
535 end = start + len;
536 if (end < start)
537 return -EINVAL;
538 if (end == start)
539 return 0;
37598f5a 540 vma = vma_iter_load(&vmi);
33108b05 541 if (!vma)
1da177e4
LT
542 return -ENOMEM;
543
37598f5a 544 prev = vma_prev(&vmi);
1da177e4
LT
545 if (start > vma->vm_start)
546 prev = vma;
547
37598f5a
LH
548 nstart = start;
549 tmp = vma->vm_start;
550 for_each_vma_range(vmi, vma, end) {
2658f94d 551 int error;
37598f5a 552 vm_flags_t newflags;
1da177e4 553
37598f5a
LH
554 if (vma->vm_start != tmp)
555 return -ENOMEM;
1da177e4 556
e430a95a 557 newflags = vma->vm_flags & ~VM_LOCKED_MASK;
37598f5a 558 newflags |= flags;
1aab92ec 559 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
1da177e4
LT
560 tmp = vma->vm_end;
561 if (tmp > end)
562 tmp = end;
37598f5a 563 error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
1da177e4 564 if (error)
2658f94d
LH
565 return error;
566 tmp = vma_iter_end(&vmi);
1da177e4 567 nstart = tmp;
1da177e4 568 }
37598f5a 569
2658f94d 570 if (tmp < end)
37598f5a
LH
571 return -ENOMEM;
572
2658f94d 573 return 0;
1da177e4
LT
574}
575
0cf2f6f6
SG
576/*
577 * Go through vma areas and sum size of mlocked
578 * vma pages, as return value.
579 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
580 * is also counted.
581 * Return value: previously mlocked page counts
582 */
0874bb49 583static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
0cf2f6f6
SG
584 unsigned long start, size_t len)
585{
586 struct vm_area_struct *vma;
0874bb49 587 unsigned long count = 0;
33108b05
MWO
588 unsigned long end;
589 VMA_ITERATOR(vmi, mm, start);
0cf2f6f6 590
33108b05
MWO
591 /* Don't overflow past ULONG_MAX */
592 if (unlikely(ULONG_MAX - len < start))
593 end = ULONG_MAX;
594 else
595 end = start + len;
66071896 596
33108b05 597 for_each_vma_range(vmi, vma, end) {
0cf2f6f6
SG
598 if (vma->vm_flags & VM_LOCKED) {
599 if (start > vma->vm_start)
600 count -= (start - vma->vm_start);
33108b05
MWO
601 if (end < vma->vm_end) {
602 count += end - vma->vm_start;
0cf2f6f6
SG
603 break;
604 }
605 count += vma->vm_end - vma->vm_start;
606 }
607 }
608
609 return count >> PAGE_SHIFT;
610}
611
ebcbc6ea
HD
612/*
613 * convert get_user_pages() return value to posix mlock() error
614 */
615static int __mlock_posix_error_return(long retval)
616{
617 if (retval == -EFAULT)
618 retval = -ENOMEM;
619 else if (retval == -ENOMEM)
620 retval = -EAGAIN;
621 return retval;
622}
623
dc0ef0df 624static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
1da177e4
LT
625{
626 unsigned long locked;
627 unsigned long lock_limit;
628 int error = -ENOMEM;
629
057d3389
AK
630 start = untagged_addr(start);
631
1da177e4
LT
632 if (!can_do_mlock())
633 return -EPERM;
634
8fd9e488 635 len = PAGE_ALIGN(len + (offset_in_page(start)));
1da177e4
LT
636 start &= PAGE_MASK;
637
59e99e5b 638 lock_limit = rlimit(RLIMIT_MEMLOCK);
1da177e4 639 lock_limit >>= PAGE_SHIFT;
1f1cd705
DB
640 locked = len >> PAGE_SHIFT;
641
d8ed45c5 642 if (mmap_write_lock_killable(current->mm))
dc0ef0df 643 return -EINTR;
1f1cd705
DB
644
645 locked += current->mm->locked_vm;
0cf2f6f6
SG
646 if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
647 /*
648 * It is possible that the regions requested intersect with
649 * previously mlocked areas, that part area in "mm->locked_vm"
650 * should not be counted to new mlock increment count. So check
651 * and adjust locked count if necessary.
652 */
653 locked -= count_mm_mlocked_page_nr(current->mm,
654 start, len);
655 }
1da177e4
LT
656
657 /* check against resource limits */
658 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
1aab92ec 659 error = apply_vma_lock_flags(start, len, flags);
1f1cd705 660
d8ed45c5 661 mmap_write_unlock(current->mm);
c561259c
KS
662 if (error)
663 return error;
664
665 error = __mm_populate(start, len, 0);
666 if (error)
667 return __mlock_posix_error_return(error);
668 return 0;
1da177e4
LT
669}
670
1aab92ec
EM
671SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
672{
673 return do_mlock(start, len, VM_LOCKED);
674}
675
a8ca5d0e
EM
676SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
677{
b0f205c2
EM
678 vm_flags_t vm_flags = VM_LOCKED;
679
680 if (flags & ~MLOCK_ONFAULT)
a8ca5d0e
EM
681 return -EINVAL;
682
b0f205c2
EM
683 if (flags & MLOCK_ONFAULT)
684 vm_flags |= VM_LOCKONFAULT;
685
686 return do_mlock(start, len, vm_flags);
a8ca5d0e
EM
687}
688
6a6160a7 689SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
1da177e4
LT
690{
691 int ret;
692
057d3389
AK
693 start = untagged_addr(start);
694
8fd9e488 695 len = PAGE_ALIGN(len + (offset_in_page(start)));
1da177e4 696 start &= PAGE_MASK;
1f1cd705 697
d8ed45c5 698 if (mmap_write_lock_killable(current->mm))
dc0ef0df 699 return -EINTR;
1aab92ec 700 ret = apply_vma_lock_flags(start, len, 0);
d8ed45c5 701 mmap_write_unlock(current->mm);
1f1cd705 702
1da177e4
LT
703 return ret;
704}
705
b0f205c2
EM
706/*
707 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
708 * and translate into the appropriate modifications to mm->def_flags and/or the
709 * flags for all current VMAs.
710 *
711 * There are a couple of subtleties with this. If mlockall() is called multiple
712 * times with different flags, the values do not necessarily stack. If mlockall
713 * is called once including the MCL_FUTURE flag and then a second time without
714 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
715 */
1aab92ec 716static int apply_mlockall_flags(int flags)
1da177e4 717{
37598f5a 718 VMA_ITERATOR(vmi, current->mm, 0);
68d68ff6 719 struct vm_area_struct *vma, *prev = NULL;
b0f205c2 720 vm_flags_t to_add = 0;
1da177e4 721
e430a95a 722 current->mm->def_flags &= ~VM_LOCKED_MASK;
b0f205c2 723 if (flags & MCL_FUTURE) {
09a9f1d2 724 current->mm->def_flags |= VM_LOCKED;
1aab92ec 725
b0f205c2
EM
726 if (flags & MCL_ONFAULT)
727 current->mm->def_flags |= VM_LOCKONFAULT;
728
729 if (!(flags & MCL_CURRENT))
730 goto out;
731 }
732
733 if (flags & MCL_CURRENT) {
734 to_add |= VM_LOCKED;
735 if (flags & MCL_ONFAULT)
736 to_add |= VM_LOCKONFAULT;
737 }
1da177e4 738
37598f5a 739 for_each_vma(vmi, vma) {
ca16d140 740 vm_flags_t newflags;
1da177e4 741
e430a95a 742 newflags = vma->vm_flags & ~VM_LOCKED_MASK;
b0f205c2 743 newflags |= to_add;
1da177e4
LT
744
745 /* Ignore errors */
37598f5a
LH
746 mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
747 newflags);
50d4fb78 748 cond_resched();
1da177e4
LT
749 }
750out:
751 return 0;
752}
753
3480b257 754SYSCALL_DEFINE1(mlockall, int, flags)
1da177e4
LT
755{
756 unsigned long lock_limit;
86d2adcc 757 int ret;
1da177e4 758
dedca635
PS
759 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
760 flags == MCL_ONFAULT)
86d2adcc 761 return -EINVAL;
1da177e4 762
1da177e4 763 if (!can_do_mlock())
86d2adcc 764 return -EPERM;
1da177e4 765
59e99e5b 766 lock_limit = rlimit(RLIMIT_MEMLOCK);
1da177e4
LT
767 lock_limit >>= PAGE_SHIFT;
768
d8ed45c5 769 if (mmap_write_lock_killable(current->mm))
dc0ef0df 770 return -EINTR;
1f1cd705 771
dc0ef0df 772 ret = -ENOMEM;
1da177e4
LT
773 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
774 capable(CAP_IPC_LOCK))
1aab92ec 775 ret = apply_mlockall_flags(flags);
d8ed45c5 776 mmap_write_unlock(current->mm);
bebeb3d6
ML
777 if (!ret && (flags & MCL_CURRENT))
778 mm_populate(0, TASK_SIZE);
86d2adcc 779
1da177e4
LT
780 return ret;
781}
782
3480b257 783SYSCALL_DEFINE0(munlockall)
1da177e4
LT
784{
785 int ret;
786
d8ed45c5 787 if (mmap_write_lock_killable(current->mm))
dc0ef0df 788 return -EINTR;
1aab92ec 789 ret = apply_mlockall_flags(0);
d8ed45c5 790 mmap_write_unlock(current->mm);
1da177e4
LT
791 return ret;
792}
793
794/*
795 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
796 * shm segments) get accounted against the user_struct instead.
797 */
798static DEFINE_SPINLOCK(shmlock_user_lock);
799
d7c9e99a 800int user_shm_lock(size_t size, struct ucounts *ucounts)
1da177e4
LT
801{
802 unsigned long lock_limit, locked;
d7c9e99a 803 long memlock;
1da177e4
LT
804 int allowed = 0;
805
806 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
59e99e5b 807 lock_limit = rlimit(RLIMIT_MEMLOCK);
e97824ff
ML
808 if (lock_limit != RLIM_INFINITY)
809 lock_limit >>= PAGE_SHIFT;
1da177e4 810 spin_lock(&shmlock_user_lock);
d7c9e99a
AG
811 memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
812
e97824ff 813 if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
d7c9e99a
AG
814 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
815 goto out;
816 }
817 if (!get_ucounts(ucounts)) {
818 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
5c2a956c 819 allowed = 0;
1da177e4 820 goto out;
d7c9e99a 821 }
1da177e4
LT
822 allowed = 1;
823out:
824 spin_unlock(&shmlock_user_lock);
825 return allowed;
826}
827
d7c9e99a 828void user_shm_unlock(size_t size, struct ucounts *ucounts)
1da177e4
LT
829{
830 spin_lock(&shmlock_user_lock);
d7c9e99a 831 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1da177e4 832 spin_unlock(&shmlock_user_lock);
d7c9e99a 833 put_ucounts(ucounts);
1da177e4 834}