4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK))
28 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
32 EXPORT_SYMBOL(can_do_mlock);
34 #ifdef CONFIG_UNEVICTABLE_LRU
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate
40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
41 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
42 * The unevictable list is an LRU sibling list to the [in]active lists.
43 * PageUnevictable is set to indicate the unevictable state.
45 * When lazy mlocking via vmscan, it is important to ensure that the
46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
47 * may have mlocked a page that is being munlocked. So lazy mlock must take
48 * the mmap_sem for read, and verify that the vma really is locked
53 * LRU accounting for clear_page_mlock()
55 void __clear_page_mlock(struct page *page)
57 VM_BUG_ON(!PageLocked(page));
59 if (!page->mapping) { /* truncated ? */
63 if (!isolate_lru_page(page)) {
64 putback_lru_page(page);
67 * Page not on the LRU yet. Flush all pagevecs and retry.
70 if (!isolate_lru_page(page))
71 putback_lru_page(page);
76 * Mark page as mlocked if not already.
77 * If page on LRU, isolate and putback to move to unevictable list.
79 void mlock_vma_page(struct page *page)
81 BUG_ON(!PageLocked(page));
83 if (!TestSetPageMlocked(page) && !isolate_lru_page(page))
84 putback_lru_page(page);
88 * called from munlock()/munmap() path with page supposedly on the LRU.
90 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
91 * [in try_to_munlock()] and then attempt to isolate the page. We must
92 * isolate the page to keep others from messing with its unevictable
93 * and mlocked state while trying to munlock. However, we pre-clear the
94 * mlocked state anyway as we might lose the isolation race and we might
95 * not get another chance to clear PageMlocked. If we successfully
96 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
97 * mapping the page, it will restore the PageMlocked state, unless the page
98 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
99 * perhaps redundantly.
100 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
101 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
102 * either of which will restore the PageMlocked state by calling
103 * mlock_vma_page() above, if it can grab the vma's mmap sem.
105 static void munlock_vma_page(struct page *page)
107 BUG_ON(!PageLocked(page));
109 if (TestClearPageMlocked(page) && !isolate_lru_page(page)) {
110 try_to_munlock(page);
111 putback_lru_page(page);
116 * mlock a range of pages in the vma.
118 * This takes care of making the pages present too.
120 * vma->vm_mm->mmap_sem must be held for write.
122 static int __mlock_vma_pages_range(struct vm_area_struct *vma,
123 unsigned long start, unsigned long end)
125 struct mm_struct *mm = vma->vm_mm;
126 unsigned long addr = start;
127 struct page *pages[16]; /* 16 gives a reasonable batch */
128 int write = !!(vma->vm_flags & VM_WRITE);
129 int nr_pages = (end - start) / PAGE_SIZE;
132 VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
133 VM_BUG_ON(start < vma->vm_start || end > vma->vm_end);
134 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
136 lru_add_drain_all(); /* push cached pages to LRU */
138 while (nr_pages > 0) {
144 * get_user_pages makes pages present if we are
145 * setting mlock. and this extra reference count will
146 * disable migration of this page. However, page may
147 * still be truncated out from under us.
149 ret = get_user_pages(current, mm, addr,
150 min_t(int, nr_pages, ARRAY_SIZE(pages)),
151 write, 0, pages, NULL);
153 * This can happen for, e.g., VM_NONLINEAR regions before
154 * a page has been allocated and mapped at a given offset,
155 * or for addresses that map beyond end of a file.
156 * We'll mlock the the pages if/when they get faulted in.
162 * We know the vma is there, so the only time
163 * we cannot get a single page should be an
164 * error (ret < 0) case.
170 lru_add_drain(); /* push cached pages to LRU */
172 for (i = 0; i < ret; i++) {
173 struct page *page = pages[i];
177 * Because we lock page here and migration is blocked
178 * by the elevated reference, we need only check for
179 * page truncation (file-cache only).
182 mlock_vma_page(page);
184 put_page(page); /* ref from get_user_pages() */
187 * here we assume that get_user_pages() has given us
188 * a list of virtually contiguous pages.
190 addr += PAGE_SIZE; /* for next get_user_pages() */
195 lru_add_drain_all(); /* to update stats */
197 return 0; /* count entire vma as locked_vm */
201 * private structure for munlock page table walk
203 struct munlock_page_walk {
204 struct vm_area_struct *vma;
205 pmd_t *pmd; /* for migration_entry_wait() */
209 * munlock normal pages for present ptes
211 static int __munlock_pte_handler(pte_t *ptep, unsigned long addr,
212 unsigned long end, struct mm_walk *walk)
214 struct munlock_page_walk *mpw = walk->private;
222 * If it's a swap pte, we might be racing with page migration.
224 if (unlikely(!pte_present(pte))) {
225 if (!is_swap_pte(pte))
227 entry = pte_to_swp_entry(pte);
228 if (is_migration_entry(entry)) {
229 migration_entry_wait(mpw->vma->vm_mm, mpw->pmd, addr);
235 page = vm_normal_page(mpw->vma, addr, pte);
240 if (!page->mapping) {
244 munlock_vma_page(page);
252 * Save pmd for pte handler for waiting on migration entries
254 static int __munlock_pmd_handler(pmd_t *pmd, unsigned long addr,
255 unsigned long end, struct mm_walk *walk)
257 struct munlock_page_walk *mpw = walk->private;
265 * munlock a range of pages in the vma using standard page table walk.
267 * vma->vm_mm->mmap_sem must be held for write.
269 static void __munlock_vma_pages_range(struct vm_area_struct *vma,
270 unsigned long start, unsigned long end)
272 struct mm_struct *mm = vma->vm_mm;
273 struct munlock_page_walk mpw = {
276 struct mm_walk munlock_page_walk = {
277 .pmd_entry = __munlock_pmd_handler,
278 .pte_entry = __munlock_pte_handler,
283 VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
284 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
285 VM_BUG_ON(start < vma->vm_start);
286 VM_BUG_ON(end > vma->vm_end);
288 lru_add_drain_all(); /* push cached pages to LRU */
289 walk_page_range(start, end, &munlock_page_walk);
290 lru_add_drain_all(); /* to update stats */
293 #else /* CONFIG_UNEVICTABLE_LRU */
296 * Just make pages present if VM_LOCKED. No-op if unlocking.
298 static int __mlock_vma_pages_range(struct vm_area_struct *vma,
299 unsigned long start, unsigned long end)
301 if (vma->vm_flags & VM_LOCKED)
302 make_pages_present(start, end);
307 * munlock a range of pages in the vma -- no-op.
309 static void __munlock_vma_pages_range(struct vm_area_struct *vma,
310 unsigned long start, unsigned long end)
313 #endif /* CONFIG_UNEVICTABLE_LRU */
316 * mlock all pages in this vma range. For mmap()/mremap()/...
318 int mlock_vma_pages_range(struct vm_area_struct *vma,
319 unsigned long start, unsigned long end)
321 int nr_pages = (end - start) / PAGE_SIZE;
322 BUG_ON(!(vma->vm_flags & VM_LOCKED));
325 * filter unlockable vmas
327 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
330 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
331 is_vm_hugetlb_page(vma) ||
332 vma == get_gate_vma(current)))
333 return __mlock_vma_pages_range(vma, start, end);
336 * User mapped kernel pages or huge pages:
337 * make these pages present to populate the ptes, but
338 * fall thru' to reset VM_LOCKED--no need to unlock, and
339 * return nr_pages so these don't get counted against task's
340 * locked limit. huge pages are already counted against
343 make_pages_present(start, end);
346 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
347 return nr_pages; /* pages NOT mlocked */
352 * munlock all pages in vma. For munmap() and exit().
354 void munlock_vma_pages_all(struct vm_area_struct *vma)
356 vma->vm_flags &= ~VM_LOCKED;
357 __munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
361 * mlock_fixup - handle mlock[all]/munlock[all] requests.
363 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
364 * munlock is a no-op. However, for some special vmas, we go ahead and
365 * populate the ptes via make_pages_present().
367 * For vmas that pass the filters, merge/split as appropriate.
369 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
370 unsigned long start, unsigned long end, unsigned int newflags)
372 struct mm_struct *mm = vma->vm_mm;
376 int lock = newflags & VM_LOCKED;
378 if (newflags == vma->vm_flags ||
379 (vma->vm_flags & (VM_IO | VM_PFNMAP)))
380 goto out; /* don't set VM_LOCKED, don't count */
382 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
383 is_vm_hugetlb_page(vma) ||
384 vma == get_gate_vma(current)) {
386 make_pages_present(start, end);
387 goto out; /* don't set VM_LOCKED, don't count */
390 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
391 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
392 vma->vm_file, pgoff, vma_policy(vma));
398 if (start != vma->vm_start) {
399 ret = split_vma(mm, vma, start, 1);
404 if (end != vma->vm_end) {
405 ret = split_vma(mm, vma, end, 0);
412 * Keep track of amount of locked VM.
414 nr_pages = (end - start) >> PAGE_SHIFT;
416 nr_pages = -nr_pages;
417 mm->locked_vm += nr_pages;
420 * vm_flags is protected by the mmap_sem held in write mode.
421 * It's okay if try_to_unmap_one unmaps a page just after we
422 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
424 vma->vm_flags = newflags;
427 ret = __mlock_vma_pages_range(vma, start, end);
429 mm->locked_vm -= ret;
433 __munlock_vma_pages_range(vma, start, end);
440 static int do_mlock(unsigned long start, size_t len, int on)
442 unsigned long nstart, end, tmp;
443 struct vm_area_struct * vma, * prev;
446 len = PAGE_ALIGN(len);
452 vma = find_vma_prev(current->mm, start, &prev);
453 if (!vma || vma->vm_start > start)
456 if (start > vma->vm_start)
459 for (nstart = start ; ; ) {
460 unsigned int newflags;
462 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
464 newflags = vma->vm_flags | VM_LOCKED;
466 newflags &= ~VM_LOCKED;
471 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
475 if (nstart < prev->vm_end)
476 nstart = prev->vm_end;
481 if (!vma || vma->vm_start != nstart) {
489 asmlinkage long sys_mlock(unsigned long start, size_t len)
491 unsigned long locked;
492 unsigned long lock_limit;
498 down_write(¤t->mm->mmap_sem);
499 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
502 locked = len >> PAGE_SHIFT;
503 locked += current->mm->locked_vm;
505 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
506 lock_limit >>= PAGE_SHIFT;
508 /* check against resource limits */
509 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
510 error = do_mlock(start, len, 1);
511 up_write(¤t->mm->mmap_sem);
515 asmlinkage long sys_munlock(unsigned long start, size_t len)
519 down_write(¤t->mm->mmap_sem);
520 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
522 ret = do_mlock(start, len, 0);
523 up_write(¤t->mm->mmap_sem);
527 static int do_mlockall(int flags)
529 struct vm_area_struct * vma, * prev = NULL;
530 unsigned int def_flags = 0;
532 if (flags & MCL_FUTURE)
533 def_flags = VM_LOCKED;
534 current->mm->def_flags = def_flags;
535 if (flags == MCL_FUTURE)
538 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
539 unsigned int newflags;
541 newflags = vma->vm_flags | VM_LOCKED;
542 if (!(flags & MCL_CURRENT))
543 newflags &= ~VM_LOCKED;
546 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
552 asmlinkage long sys_mlockall(int flags)
554 unsigned long lock_limit;
557 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
564 down_write(¤t->mm->mmap_sem);
566 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
567 lock_limit >>= PAGE_SHIFT;
570 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
571 capable(CAP_IPC_LOCK))
572 ret = do_mlockall(flags);
573 up_write(¤t->mm->mmap_sem);
578 asmlinkage long sys_munlockall(void)
582 down_write(¤t->mm->mmap_sem);
583 ret = do_mlockall(0);
584 up_write(¤t->mm->mmap_sem);
589 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
590 * shm segments) get accounted against the user_struct instead.
592 static DEFINE_SPINLOCK(shmlock_user_lock);
594 int user_shm_lock(size_t size, struct user_struct *user)
596 unsigned long lock_limit, locked;
599 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
600 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
601 if (lock_limit == RLIM_INFINITY)
603 lock_limit >>= PAGE_SHIFT;
604 spin_lock(&shmlock_user_lock);
606 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
609 user->locked_shm += locked;
612 spin_unlock(&shmlock_user_lock);
616 void user_shm_unlock(size_t size, struct user_struct *user)
618 spin_lock(&shmlock_user_lock);
619 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
620 spin_unlock(&shmlock_user_lock);