mm: print more details for bad_page()
[linux-2.6-block.git] / mm / mlock.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/mlock.c
3 *
4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
6 */
7
c59ede7b 8#include <linux/capability.h>
1da177e4
LT
9#include <linux/mman.h>
10#include <linux/mm.h>
b291f000
NP
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/pagemap.h>
7225522b 14#include <linux/pagevec.h>
1da177e4
LT
15#include <linux/mempolicy.h>
16#include <linux/syscalls.h>
e8edc6e0 17#include <linux/sched.h>
b95f1b31 18#include <linux/export.h>
b291f000
NP
19#include <linux/rmap.h>
20#include <linux/mmzone.h>
21#include <linux/hugetlb.h>
7225522b
VB
22#include <linux/memcontrol.h>
23#include <linux/mm_inline.h>
b291f000
NP
24
25#include "internal.h"
1da177e4 26
e8edc6e0
AD
27int can_do_mlock(void)
28{
29 if (capable(CAP_IPC_LOCK))
30 return 1;
59e99e5b 31 if (rlimit(RLIMIT_MEMLOCK) != 0)
e8edc6e0
AD
32 return 1;
33 return 0;
34}
35EXPORT_SYMBOL(can_do_mlock);
1da177e4 36
b291f000
NP
37/*
38 * Mlocked pages are marked with PageMlocked() flag for efficient testing
39 * in vmscan and, possibly, the fault path; and to support semi-accurate
40 * statistics.
41 *
42 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
43 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
44 * The unevictable list is an LRU sibling list to the [in]active lists.
45 * PageUnevictable is set to indicate the unevictable state.
46 *
47 * When lazy mlocking via vmscan, it is important to ensure that the
48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
49 * may have mlocked a page that is being munlocked. So lazy mlock must take
50 * the mmap_sem for read, and verify that the vma really is locked
51 * (see mm/rmap.c).
52 */
53
54/*
55 * LRU accounting for clear_page_mlock()
56 */
e6c509f8 57void clear_page_mlock(struct page *page)
b291f000 58{
e6c509f8 59 if (!TestClearPageMlocked(page))
b291f000 60 return;
b291f000 61
8449d21f
DR
62 mod_zone_page_state(page_zone(page), NR_MLOCK,
63 -hpage_nr_pages(page));
5344b7e6 64 count_vm_event(UNEVICTABLE_PGCLEARED);
b291f000
NP
65 if (!isolate_lru_page(page)) {
66 putback_lru_page(page);
67 } else {
68 /*
8891d6da 69 * We lost the race. the page already moved to evictable list.
b291f000 70 */
8891d6da 71 if (PageUnevictable(page))
5344b7e6 72 count_vm_event(UNEVICTABLE_PGSTRANDED);
b291f000
NP
73 }
74}
75
76/*
77 * Mark page as mlocked if not already.
78 * If page on LRU, isolate and putback to move to unevictable list.
79 */
80void mlock_vma_page(struct page *page)
81{
82 BUG_ON(!PageLocked(page));
83
5344b7e6 84 if (!TestSetPageMlocked(page)) {
8449d21f
DR
85 mod_zone_page_state(page_zone(page), NR_MLOCK,
86 hpage_nr_pages(page));
5344b7e6
NP
87 count_vm_event(UNEVICTABLE_PGMLOCKED);
88 if (!isolate_lru_page(page))
89 putback_lru_page(page);
90 }
b291f000
NP
91}
92
7225522b
VB
93/*
94 * Finish munlock after successful page isolation
95 *
96 * Page must be locked. This is a wrapper for try_to_munlock()
97 * and putback_lru_page() with munlock accounting.
98 */
99static void __munlock_isolated_page(struct page *page)
100{
101 int ret = SWAP_AGAIN;
102
103 /*
104 * Optimization: if the page was mapped just once, that's our mapping
105 * and we don't need to check all the other vmas.
106 */
107 if (page_mapcount(page) > 1)
108 ret = try_to_munlock(page);
109
110 /* Did try_to_unlock() succeed or punt? */
111 if (ret != SWAP_MLOCK)
112 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
113
114 putback_lru_page(page);
115}
116
117/*
118 * Accounting for page isolation fail during munlock
119 *
120 * Performs accounting when page isolation fails in munlock. There is nothing
121 * else to do because it means some other task has already removed the page
122 * from the LRU. putback_lru_page() will take care of removing the page from
123 * the unevictable list, if necessary. vmscan [page_referenced()] will move
124 * the page back to the unevictable list if some other vma has it mlocked.
125 */
126static void __munlock_isolation_failed(struct page *page)
127{
128 if (PageUnevictable(page))
129 count_vm_event(UNEVICTABLE_PGSTRANDED);
130 else
131 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
132}
133
6927c1dd
LS
134/**
135 * munlock_vma_page - munlock a vma page
c424be1c
VB
136 * @page - page to be unlocked, either a normal page or THP page head
137 *
138 * returns the size of the page as a page mask (0 for normal page,
139 * HPAGE_PMD_NR - 1 for THP head page)
b291f000 140 *
6927c1dd
LS
141 * called from munlock()/munmap() path with page supposedly on the LRU.
142 * When we munlock a page, because the vma where we found the page is being
143 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
144 * page locked so that we can leave it on the unevictable lru list and not
145 * bother vmscan with it. However, to walk the page's rmap list in
146 * try_to_munlock() we must isolate the page from the LRU. If some other
147 * task has removed the page from the LRU, we won't be able to do that.
148 * So we clear the PageMlocked as we might not get another chance. If we
149 * can't isolate the page, we leave it for putback_lru_page() and vmscan
150 * [page_referenced()/try_to_unmap()] to deal with.
b291f000 151 */
ff6a6da6 152unsigned int munlock_vma_page(struct page *page)
b291f000 153{
c424be1c 154 unsigned int nr_pages;
ff6a6da6 155
b291f000
NP
156 BUG_ON(!PageLocked(page));
157
5344b7e6 158 if (TestClearPageMlocked(page)) {
c424be1c 159 nr_pages = hpage_nr_pages(page);
ff6a6da6 160 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
7225522b
VB
161 if (!isolate_lru_page(page))
162 __munlock_isolated_page(page);
163 else
164 __munlock_isolation_failed(page);
c424be1c
VB
165 } else {
166 nr_pages = hpage_nr_pages(page);
b291f000 167 }
ff6a6da6 168
c424be1c
VB
169 /*
170 * Regardless of the original PageMlocked flag, we determine nr_pages
171 * after touching the flag. This leaves a possible race with a THP page
172 * split, such that a whole THP page was munlocked, but nr_pages == 1.
173 * Returning a smaller mask due to that is OK, the worst that can
174 * happen is subsequent useless scanning of the former tail pages.
175 * The NR_MLOCK accounting can however become broken.
176 */
177 return nr_pages - 1;
b291f000
NP
178}
179
ba470de4 180/**
408e82b7 181 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
ba470de4
RR
182 * @vma: target vma
183 * @start: start address
184 * @end: end address
ba470de4 185 *
408e82b7 186 * This takes care of making the pages present too.
b291f000 187 *
ba470de4 188 * return 0 on success, negative error code on error.
b291f000 189 *
ba470de4 190 * vma->vm_mm->mmap_sem must be held for at least read.
b291f000 191 */
cea10a19
ML
192long __mlock_vma_pages_range(struct vm_area_struct *vma,
193 unsigned long start, unsigned long end, int *nonblocking)
b291f000
NP
194{
195 struct mm_struct *mm = vma->vm_mm;
28a35716 196 unsigned long nr_pages = (end - start) / PAGE_SIZE;
408e82b7 197 int gup_flags;
ba470de4
RR
198
199 VM_BUG_ON(start & ~PAGE_MASK);
200 VM_BUG_ON(end & ~PAGE_MASK);
201 VM_BUG_ON(start < vma->vm_start);
202 VM_BUG_ON(end > vma->vm_end);
408e82b7 203 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
b291f000 204
a1fde08c 205 gup_flags = FOLL_TOUCH | FOLL_MLOCK;
5ecfda04
ML
206 /*
207 * We want to touch writable mappings with a write fault in order
208 * to break COW, except for shared mappings because these don't COW
209 * and we would not want to dirty them for nothing.
210 */
211 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
58fa879e 212 gup_flags |= FOLL_WRITE;
b291f000 213
fdf4c587
ML
214 /*
215 * We want mlock to succeed for regions that have any permissions
216 * other than PROT_NONE.
217 */
218 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
219 gup_flags |= FOLL_FORCE;
220
4805b02e
JW
221 /*
222 * We made sure addr is within a VMA, so the following will
223 * not result in a stack expansion that recurses back here.
224 */
ff6a6da6 225 return __get_user_pages(current, mm, start, nr_pages, gup_flags,
53a7706d 226 NULL, NULL, nonblocking);
9978ad58
LS
227}
228
229/*
230 * convert get_user_pages() return value to posix mlock() error
231 */
232static int __mlock_posix_error_return(long retval)
233{
234 if (retval == -EFAULT)
235 retval = -ENOMEM;
236 else if (retval == -ENOMEM)
237 retval = -EAGAIN;
238 return retval;
b291f000
NP
239}
240
56afe477
VB
241/*
242 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
243 *
244 * The fast path is available only for evictable pages with single mapping.
245 * Then we can bypass the per-cpu pvec and get better performance.
246 * when mapcount > 1 we need try_to_munlock() which can fail.
247 * when !page_evictable(), we need the full redo logic of putback_lru_page to
248 * avoid leaving evictable page in unevictable list.
249 *
250 * In case of success, @page is added to @pvec and @pgrescued is incremented
251 * in case that the page was previously unevictable. @page is also unlocked.
252 */
253static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
254 int *pgrescued)
255{
256 VM_BUG_ON(PageLRU(page));
257 VM_BUG_ON(!PageLocked(page));
258
259 if (page_mapcount(page) <= 1 && page_evictable(page)) {
260 pagevec_add(pvec, page);
261 if (TestClearPageUnevictable(page))
262 (*pgrescued)++;
263 unlock_page(page);
264 return true;
265 }
266
267 return false;
268}
269
270/*
271 * Putback multiple evictable pages to the LRU
272 *
273 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
274 * the pages might have meanwhile become unevictable but that is OK.
275 */
276static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
277{
278 count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
279 /*
280 *__pagevec_lru_add() calls release_pages() so we don't call
281 * put_page() explicitly
282 */
283 __pagevec_lru_add(pvec);
284 count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
285}
286
7225522b
VB
287/*
288 * Munlock a batch of pages from the same zone
289 *
290 * The work is split to two main phases. First phase clears the Mlocked flag
291 * and attempts to isolate the pages, all under a single zone lru lock.
292 * The second phase finishes the munlock only for pages where isolation
293 * succeeded.
294 *
7a8010cd 295 * Note that the pagevec may be modified during the process.
7225522b
VB
296 */
297static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
298{
299 int i;
300 int nr = pagevec_count(pvec);
3b25df93 301 int delta_munlocked;
56afe477
VB
302 struct pagevec pvec_putback;
303 int pgrescued = 0;
7225522b 304
3b25df93
VB
305 pagevec_init(&pvec_putback, 0);
306
7225522b
VB
307 /* Phase 1: page isolation */
308 spin_lock_irq(&zone->lru_lock);
309 for (i = 0; i < nr; i++) {
310 struct page *page = pvec->pages[i];
311
312 if (TestClearPageMlocked(page)) {
313 struct lruvec *lruvec;
314 int lru;
315
7225522b
VB
316 if (PageLRU(page)) {
317 lruvec = mem_cgroup_page_lruvec(page, zone);
318 lru = page_lru(page);
5b40998a
VB
319 /*
320 * We already have pin from follow_page_mask()
321 * so we can spare the get_page() here.
322 */
7225522b
VB
323 ClearPageLRU(page);
324 del_page_from_lru_list(page, lruvec, lru);
325 } else {
326 __munlock_isolation_failed(page);
327 goto skip_munlock;
328 }
329
330 } else {
331skip_munlock:
332 /*
333 * We won't be munlocking this page in the next phase
334 * but we still need to release the follow_page_mask()
3b25df93
VB
335 * pin. We cannot do it under lru_lock however. If it's
336 * the last pin, __page_cache_release would deadlock.
7225522b 337 */
3b25df93 338 pagevec_add(&pvec_putback, pvec->pages[i]);
7225522b 339 pvec->pages[i] = NULL;
7225522b
VB
340 }
341 }
3b25df93 342 delta_munlocked = -nr + pagevec_count(&pvec_putback);
1ebb7cc6 343 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
7225522b
VB
344 spin_unlock_irq(&zone->lru_lock);
345
3b25df93
VB
346 /* Now we can release pins of pages that we are not munlocking */
347 pagevec_release(&pvec_putback);
348
56afe477 349 /* Phase 2: page munlock */
7225522b
VB
350 for (i = 0; i < nr; i++) {
351 struct page *page = pvec->pages[i];
352
353 if (page) {
354 lock_page(page);
56afe477
VB
355 if (!__putback_lru_fast_prepare(page, &pvec_putback,
356 &pgrescued)) {
5b40998a
VB
357 /*
358 * Slow path. We don't want to lose the last
359 * pin before unlock_page()
360 */
361 get_page(page); /* for putback_lru_page() */
56afe477
VB
362 __munlock_isolated_page(page);
363 unlock_page(page);
5b40998a 364 put_page(page); /* from follow_page_mask() */
56afe477 365 }
7225522b
VB
366 }
367 }
56afe477 368
5b40998a
VB
369 /*
370 * Phase 3: page putback for pages that qualified for the fast path
371 * This will also call put_page() to return pin from follow_page_mask()
372 */
56afe477
VB
373 if (pagevec_count(&pvec_putback))
374 __putback_lru_fast(&pvec_putback, pgrescued);
7a8010cd
VB
375}
376
377/*
378 * Fill up pagevec for __munlock_pagevec using pte walk
379 *
380 * The function expects that the struct page corresponding to @start address is
381 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
382 *
383 * The rest of @pvec is filled by subsequent pages within the same pmd and same
384 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
385 * pages also get pinned.
386 *
387 * Returns the address of the next page that should be scanned. This equals
388 * @start + PAGE_SIZE when no page could be added by the pte walk.
389 */
390static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
391 struct vm_area_struct *vma, int zoneid, unsigned long start,
392 unsigned long end)
393{
394 pte_t *pte;
395 spinlock_t *ptl;
396
397 /*
398 * Initialize pte walk starting at the already pinned page where we
eadb41ae
VB
399 * are sure that there is a pte, as it was pinned under the same
400 * mmap_sem write op.
7a8010cd
VB
401 */
402 pte = get_locked_pte(vma->vm_mm, start, &ptl);
eadb41ae
VB
403 /* Make sure we do not cross the page table boundary */
404 end = pgd_addr_end(start, end);
405 end = pud_addr_end(start, end);
406 end = pmd_addr_end(start, end);
7a8010cd
VB
407
408 /* The page next to the pinned page is the first we will try to get */
409 start += PAGE_SIZE;
410 while (start < end) {
411 struct page *page = NULL;
412 pte++;
413 if (pte_present(*pte))
414 page = vm_normal_page(vma, start, *pte);
415 /*
416 * Break if page could not be obtained or the page's node+zone does not
417 * match
418 */
419 if (!page || page_zone_id(page) != zoneid)
420 break;
56afe477 421
7a8010cd
VB
422 get_page(page);
423 /*
424 * Increase the address that will be returned *before* the
425 * eventual break due to pvec becoming full by adding the page
426 */
427 start += PAGE_SIZE;
428 if (pagevec_add(pvec, page) == 0)
429 break;
430 }
431 pte_unmap_unlock(pte, ptl);
432 return start;
7225522b
VB
433}
434
b291f000 435/*
ba470de4
RR
436 * munlock_vma_pages_range() - munlock all pages in the vma range.'
437 * @vma - vma containing range to be munlock()ed.
438 * @start - start address in @vma of the range
439 * @end - end of range in @vma.
440 *
441 * For mremap(), munmap() and exit().
442 *
443 * Called with @vma VM_LOCKED.
444 *
445 * Returns with VM_LOCKED cleared. Callers must be prepared to
446 * deal with this.
447 *
448 * We don't save and restore VM_LOCKED here because pages are
449 * still on lru. In unmap path, pages might be scanned by reclaim
450 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
451 * free them. This will result in freeing mlocked pages.
b291f000 452 */
ba470de4 453void munlock_vma_pages_range(struct vm_area_struct *vma,
408e82b7 454 unsigned long start, unsigned long end)
b291f000
NP
455{
456 vma->vm_flags &= ~VM_LOCKED;
408e82b7 457
ff6a6da6 458 while (start < end) {
7a8010cd 459 struct page *page = NULL;
c424be1c
VB
460 unsigned int page_mask;
461 unsigned long page_increm;
7a8010cd
VB
462 struct pagevec pvec;
463 struct zone *zone;
464 int zoneid;
ff6a6da6 465
7a8010cd 466 pagevec_init(&pvec, 0);
6e919717
HD
467 /*
468 * Although FOLL_DUMP is intended for get_dump_page(),
469 * it just so happens that its special treatment of the
470 * ZERO_PAGE (returning an error instead of doing get_page)
471 * suits munlock very well (and if somehow an abnormal page
472 * has sneaked into the range, we won't oops here: great).
473 */
ff6a6da6 474 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
7a8010cd
VB
475 &page_mask);
476
6e919717 477 if (page && !IS_ERR(page)) {
7225522b 478 if (PageTransHuge(page)) {
7225522b
VB
479 lock_page(page);
480 /*
481 * Any THP page found by follow_page_mask() may
482 * have gotten split before reaching
483 * munlock_vma_page(), so we need to recompute
484 * the page_mask here.
485 */
486 page_mask = munlock_vma_page(page);
487 unlock_page(page);
488 put_page(page); /* follow_page_mask() */
489 } else {
490 /*
7a8010cd
VB
491 * Non-huge pages are handled in batches via
492 * pagevec. The pin from follow_page_mask()
493 * prevents them from collapsing by THP.
494 */
495 pagevec_add(&pvec, page);
496 zone = page_zone(page);
497 zoneid = page_zone_id(page);
498
499 /*
500 * Try to fill the rest of pagevec using fast
501 * pte walk. This will also update start to
502 * the next page to process. Then munlock the
503 * pagevec.
7225522b 504 */
7a8010cd
VB
505 start = __munlock_pagevec_fill(&pvec, vma,
506 zoneid, start, end);
507 __munlock_pagevec(&pvec, zone);
508 goto next;
7225522b 509 }
408e82b7 510 }
c424be1c
VB
511 /* It's a bug to munlock in the middle of a THP page */
512 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
513 page_increm = 1 + page_mask;
ff6a6da6 514 start += page_increm * PAGE_SIZE;
7a8010cd 515next:
408e82b7
HD
516 cond_resched();
517 }
b291f000
NP
518}
519
520/*
521 * mlock_fixup - handle mlock[all]/munlock[all] requests.
522 *
523 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
524 * munlock is a no-op. However, for some special vmas, we go ahead and
cea10a19 525 * populate the ptes.
b291f000
NP
526 *
527 * For vmas that pass the filters, merge/split as appropriate.
528 */
1da177e4 529static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
ca16d140 530 unsigned long start, unsigned long end, vm_flags_t newflags)
1da177e4 531{
b291f000 532 struct mm_struct *mm = vma->vm_mm;
1da177e4 533 pgoff_t pgoff;
b291f000 534 int nr_pages;
1da177e4 535 int ret = 0;
ca16d140 536 int lock = !!(newflags & VM_LOCKED);
1da177e4 537
fed067da 538 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
31db58b3 539 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
b291f000
NP
540 goto out; /* don't set VM_LOCKED, don't count */
541
1da177e4
LT
542 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
543 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
544 vma->vm_file, pgoff, vma_policy(vma));
545 if (*prev) {
546 vma = *prev;
547 goto success;
548 }
549
1da177e4
LT
550 if (start != vma->vm_start) {
551 ret = split_vma(mm, vma, start, 1);
552 if (ret)
553 goto out;
554 }
555
556 if (end != vma->vm_end) {
557 ret = split_vma(mm, vma, end, 0);
558 if (ret)
559 goto out;
560 }
561
562success:
b291f000
NP
563 /*
564 * Keep track of amount of locked VM.
565 */
566 nr_pages = (end - start) >> PAGE_SHIFT;
567 if (!lock)
568 nr_pages = -nr_pages;
569 mm->locked_vm += nr_pages;
570
1da177e4
LT
571 /*
572 * vm_flags is protected by the mmap_sem held in write mode.
573 * It's okay if try_to_unmap_one unmaps a page just after we
b291f000 574 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
1da177e4 575 */
1da177e4 576
fed067da 577 if (lock)
408e82b7 578 vma->vm_flags = newflags;
fed067da 579 else
408e82b7 580 munlock_vma_pages_range(vma, start, end);
1da177e4 581
1da177e4 582out:
b291f000 583 *prev = vma;
1da177e4
LT
584 return ret;
585}
586
587static int do_mlock(unsigned long start, size_t len, int on)
588{
589 unsigned long nstart, end, tmp;
590 struct vm_area_struct * vma, * prev;
591 int error;
592
fed067da
ML
593 VM_BUG_ON(start & ~PAGE_MASK);
594 VM_BUG_ON(len != PAGE_ALIGN(len));
1da177e4
LT
595 end = start + len;
596 if (end < start)
597 return -EINVAL;
598 if (end == start)
599 return 0;
097d5910 600 vma = find_vma(current->mm, start);
1da177e4
LT
601 if (!vma || vma->vm_start > start)
602 return -ENOMEM;
603
097d5910 604 prev = vma->vm_prev;
1da177e4
LT
605 if (start > vma->vm_start)
606 prev = vma;
607
608 for (nstart = start ; ; ) {
ca16d140 609 vm_flags_t newflags;
1da177e4
LT
610
611 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
612
18693050
ML
613 newflags = vma->vm_flags & ~VM_LOCKED;
614 if (on)
09a9f1d2 615 newflags |= VM_LOCKED;
1da177e4
LT
616
617 tmp = vma->vm_end;
618 if (tmp > end)
619 tmp = end;
620 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
621 if (error)
622 break;
623 nstart = tmp;
624 if (nstart < prev->vm_end)
625 nstart = prev->vm_end;
626 if (nstart >= end)
627 break;
628
629 vma = prev->vm_next;
630 if (!vma || vma->vm_start != nstart) {
631 error = -ENOMEM;
632 break;
633 }
634 }
635 return error;
636}
637
bebeb3d6
ML
638/*
639 * __mm_populate - populate and/or mlock pages within a range of address space.
640 *
641 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
642 * flags. VMAs must be already marked with the desired vm_flags, and
643 * mmap_sem must not be held.
644 */
645int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
fed067da
ML
646{
647 struct mm_struct *mm = current->mm;
648 unsigned long end, nstart, nend;
649 struct vm_area_struct *vma = NULL;
53a7706d 650 int locked = 0;
28a35716 651 long ret = 0;
fed067da
ML
652
653 VM_BUG_ON(start & ~PAGE_MASK);
654 VM_BUG_ON(len != PAGE_ALIGN(len));
655 end = start + len;
656
fed067da
ML
657 for (nstart = start; nstart < end; nstart = nend) {
658 /*
659 * We want to fault in pages for [nstart; end) address range.
660 * Find first corresponding VMA.
661 */
53a7706d
ML
662 if (!locked) {
663 locked = 1;
664 down_read(&mm->mmap_sem);
fed067da 665 vma = find_vma(mm, nstart);
53a7706d 666 } else if (nstart >= vma->vm_end)
fed067da
ML
667 vma = vma->vm_next;
668 if (!vma || vma->vm_start >= end)
669 break;
670 /*
671 * Set [nstart; nend) to intersection of desired address
672 * range with the first VMA. Also, skip undesirable VMA types.
673 */
674 nend = min(end, vma->vm_end);
09a9f1d2 675 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
fed067da
ML
676 continue;
677 if (nstart < vma->vm_start)
678 nstart = vma->vm_start;
679 /*
53a7706d
ML
680 * Now fault in a range of pages. __mlock_vma_pages_range()
681 * double checks the vma flags, so that it won't mlock pages
682 * if the vma was already munlocked.
fed067da 683 */
53a7706d
ML
684 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
685 if (ret < 0) {
686 if (ignore_errors) {
687 ret = 0;
688 continue; /* continue at next VMA */
689 }
5fdb2002
ML
690 ret = __mlock_posix_error_return(ret);
691 break;
692 }
53a7706d
ML
693 nend = nstart + ret * PAGE_SIZE;
694 ret = 0;
fed067da 695 }
53a7706d
ML
696 if (locked)
697 up_read(&mm->mmap_sem);
fed067da
ML
698 return ret; /* 0 or negative error code */
699}
700
6a6160a7 701SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
1da177e4
LT
702{
703 unsigned long locked;
704 unsigned long lock_limit;
705 int error = -ENOMEM;
706
707 if (!can_do_mlock())
708 return -EPERM;
709
8891d6da
KM
710 lru_add_drain_all(); /* flush pagevec */
711
1da177e4
LT
712 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
713 start &= PAGE_MASK;
714
59e99e5b 715 lock_limit = rlimit(RLIMIT_MEMLOCK);
1da177e4 716 lock_limit >>= PAGE_SHIFT;
1f1cd705
DB
717 locked = len >> PAGE_SHIFT;
718
719 down_write(&current->mm->mmap_sem);
720
721 locked += current->mm->locked_vm;
1da177e4
LT
722
723 /* check against resource limits */
724 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
725 error = do_mlock(start, len, 1);
1f1cd705 726
1da177e4 727 up_write(&current->mm->mmap_sem);
fed067da 728 if (!error)
bebeb3d6 729 error = __mm_populate(start, len, 0);
1da177e4
LT
730 return error;
731}
732
6a6160a7 733SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
1da177e4
LT
734{
735 int ret;
736
1da177e4
LT
737 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
738 start &= PAGE_MASK;
1f1cd705
DB
739
740 down_write(&current->mm->mmap_sem);
1da177e4
LT
741 ret = do_mlock(start, len, 0);
742 up_write(&current->mm->mmap_sem);
1f1cd705 743
1da177e4
LT
744 return ret;
745}
746
747static int do_mlockall(int flags)
748{
749 struct vm_area_struct * vma, * prev = NULL;
1da177e4
LT
750
751 if (flags & MCL_FUTURE)
09a9f1d2 752 current->mm->def_flags |= VM_LOCKED;
9977f0f1 753 else
09a9f1d2 754 current->mm->def_flags &= ~VM_LOCKED;
1da177e4
LT
755 if (flags == MCL_FUTURE)
756 goto out;
757
758 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
ca16d140 759 vm_flags_t newflags;
1da177e4 760
18693050
ML
761 newflags = vma->vm_flags & ~VM_LOCKED;
762 if (flags & MCL_CURRENT)
09a9f1d2 763 newflags |= VM_LOCKED;
1da177e4
LT
764
765 /* Ignore errors */
766 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
22356f44 767 cond_resched();
1da177e4
LT
768 }
769out:
770 return 0;
771}
772
3480b257 773SYSCALL_DEFINE1(mlockall, int, flags)
1da177e4
LT
774{
775 unsigned long lock_limit;
776 int ret = -EINVAL;
777
778 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
779 goto out;
780
781 ret = -EPERM;
782 if (!can_do_mlock())
783 goto out;
784
df9d6985
CL
785 if (flags & MCL_CURRENT)
786 lru_add_drain_all(); /* flush pagevec */
8891d6da 787
59e99e5b 788 lock_limit = rlimit(RLIMIT_MEMLOCK);
1da177e4
LT
789 lock_limit >>= PAGE_SHIFT;
790
791 ret = -ENOMEM;
1f1cd705
DB
792 down_write(&current->mm->mmap_sem);
793
1da177e4
LT
794 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
795 capable(CAP_IPC_LOCK))
796 ret = do_mlockall(flags);
797 up_write(&current->mm->mmap_sem);
bebeb3d6
ML
798 if (!ret && (flags & MCL_CURRENT))
799 mm_populate(0, TASK_SIZE);
1da177e4
LT
800out:
801 return ret;
802}
803
3480b257 804SYSCALL_DEFINE0(munlockall)
1da177e4
LT
805{
806 int ret;
807
808 down_write(&current->mm->mmap_sem);
809 ret = do_mlockall(0);
810 up_write(&current->mm->mmap_sem);
811 return ret;
812}
813
814/*
815 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
816 * shm segments) get accounted against the user_struct instead.
817 */
818static DEFINE_SPINLOCK(shmlock_user_lock);
819
820int user_shm_lock(size_t size, struct user_struct *user)
821{
822 unsigned long lock_limit, locked;
823 int allowed = 0;
824
825 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
59e99e5b 826 lock_limit = rlimit(RLIMIT_MEMLOCK);
5ed44a40
HB
827 if (lock_limit == RLIM_INFINITY)
828 allowed = 1;
1da177e4
LT
829 lock_limit >>= PAGE_SHIFT;
830 spin_lock(&shmlock_user_lock);
5ed44a40
HB
831 if (!allowed &&
832 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
1da177e4
LT
833 goto out;
834 get_uid(user);
835 user->locked_shm += locked;
836 allowed = 1;
837out:
838 spin_unlock(&shmlock_user_lock);
839 return allowed;
840}
841
842void user_shm_unlock(size_t size, struct user_struct *user)
843{
844 spin_lock(&shmlock_user_lock);
845 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
846 spin_unlock(&shmlock_user_lock);
847 free_uid(user);
848}