Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier...
[linux-2.6-block.git] / mm / rmap.c
CommitLineData
1da177e4
LT
1/*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
98f32602 17 * Contributions by Hugh Dickins 2003, 2004
1da177e4
LT
18 */
19
20/*
21 * Lock ordering in mm:
22 *
1b1dcc1b 23 * inode->i_mutex (while writing or truncating, not reading or faulting)
82591e6e
NP
24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page)
3d48ae45 27 * mapping->i_mmap_mutex
2b575eb6 28 * anon_vma->mutex
82591e6e
NP
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
250df6ed 34 * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
a66979ab 35 * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty)
82591e6e
NP
36 * sb_lock (within inode_lock in fs/fs-writeback.c)
37 * mapping->tree_lock (widely used, in set_page_dirty,
38 * in arch-dependent flush_dcache_mmap_lock,
a66979ab 39 * within inode_wb_list_lock in __sync_single_inode)
6a46079c
AK
40 *
41 * (code doesn't rely on that order so it could be switched around)
42 * ->tasklist_lock
2b575eb6 43 * anon_vma->mutex (memory_failure, collect_procs_anon)
6a46079c 44 * pte map lock
1da177e4
LT
45 */
46
47#include <linux/mm.h>
48#include <linux/pagemap.h>
49#include <linux/swap.h>
50#include <linux/swapops.h>
51#include <linux/slab.h>
52#include <linux/init.h>
5ad64688 53#include <linux/ksm.h>
1da177e4
LT
54#include <linux/rmap.h>
55#include <linux/rcupdate.h>
a48d07af 56#include <linux/module.h>
8a9f3ccd 57#include <linux/memcontrol.h>
cddb8a5c 58#include <linux/mmu_notifier.h>
64cdd548 59#include <linux/migrate.h>
0fe6e20b 60#include <linux/hugetlb.h>
1da177e4
LT
61
62#include <asm/tlbflush.h>
63
b291f000
NP
64#include "internal.h"
65
fdd2e5f8 66static struct kmem_cache *anon_vma_cachep;
5beb4930 67static struct kmem_cache *anon_vma_chain_cachep;
fdd2e5f8
AB
68
69static inline struct anon_vma *anon_vma_alloc(void)
70{
01d8b20d
PZ
71 struct anon_vma *anon_vma;
72
73 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
74 if (anon_vma) {
75 atomic_set(&anon_vma->refcount, 1);
76 /*
77 * Initialise the anon_vma root to point to itself. If called
78 * from fork, the root will be reset to the parents anon_vma.
79 */
80 anon_vma->root = anon_vma;
81 }
82
83 return anon_vma;
fdd2e5f8
AB
84}
85
01d8b20d 86static inline void anon_vma_free(struct anon_vma *anon_vma)
fdd2e5f8 87{
01d8b20d 88 VM_BUG_ON(atomic_read(&anon_vma->refcount));
88c22088
PZ
89
90 /*
91 * Synchronize against page_lock_anon_vma() such that
92 * we can safely hold the lock without the anon_vma getting
93 * freed.
94 *
95 * Relies on the full mb implied by the atomic_dec_and_test() from
96 * put_anon_vma() against the acquire barrier implied by
97 * mutex_trylock() from page_lock_anon_vma(). This orders:
98 *
99 * page_lock_anon_vma() VS put_anon_vma()
100 * mutex_trylock() atomic_dec_and_test()
101 * LOCK MB
102 * atomic_read() mutex_is_locked()
103 *
104 * LOCK should suffice since the actual taking of the lock must
105 * happen _before_ what follows.
106 */
107 if (mutex_is_locked(&anon_vma->root->mutex)) {
108 anon_vma_lock(anon_vma);
109 anon_vma_unlock(anon_vma);
110 }
111
fdd2e5f8
AB
112 kmem_cache_free(anon_vma_cachep, anon_vma);
113}
1da177e4 114
5beb4930
RR
115static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
116{
117 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
118}
119
e574b5fd 120static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
5beb4930
RR
121{
122 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
123}
124
d9d332e0
LT
125/**
126 * anon_vma_prepare - attach an anon_vma to a memory region
127 * @vma: the memory region in question
128 *
129 * This makes sure the memory mapping described by 'vma' has
130 * an 'anon_vma' attached to it, so that we can associate the
131 * anonymous pages mapped into it with that anon_vma.
132 *
133 * The common case will be that we already have one, but if
23a0790a 134 * not we either need to find an adjacent mapping that we
d9d332e0
LT
135 * can re-use the anon_vma from (very common when the only
136 * reason for splitting a vma has been mprotect()), or we
137 * allocate a new one.
138 *
139 * Anon-vma allocations are very subtle, because we may have
140 * optimistically looked up an anon_vma in page_lock_anon_vma()
141 * and that may actually touch the spinlock even in the newly
142 * allocated vma (it depends on RCU to make sure that the
143 * anon_vma isn't actually destroyed).
144 *
145 * As a result, we need to do proper anon_vma locking even
146 * for the new allocation. At the same time, we do not want
147 * to do any locking for the common case of already having
148 * an anon_vma.
149 *
150 * This must be called with the mmap_sem held for reading.
151 */
1da177e4
LT
152int anon_vma_prepare(struct vm_area_struct *vma)
153{
154 struct anon_vma *anon_vma = vma->anon_vma;
5beb4930 155 struct anon_vma_chain *avc;
1da177e4
LT
156
157 might_sleep();
158 if (unlikely(!anon_vma)) {
159 struct mm_struct *mm = vma->vm_mm;
d9d332e0 160 struct anon_vma *allocated;
1da177e4 161
5beb4930
RR
162 avc = anon_vma_chain_alloc();
163 if (!avc)
164 goto out_enomem;
165
1da177e4 166 anon_vma = find_mergeable_anon_vma(vma);
d9d332e0
LT
167 allocated = NULL;
168 if (!anon_vma) {
1da177e4
LT
169 anon_vma = anon_vma_alloc();
170 if (unlikely(!anon_vma))
5beb4930 171 goto out_enomem_free_avc;
1da177e4 172 allocated = anon_vma;
1da177e4
LT
173 }
174
cba48b98 175 anon_vma_lock(anon_vma);
1da177e4
LT
176 /* page_table_lock to protect against threads */
177 spin_lock(&mm->page_table_lock);
178 if (likely(!vma->anon_vma)) {
179 vma->anon_vma = anon_vma;
5beb4930
RR
180 avc->anon_vma = anon_vma;
181 avc->vma = vma;
182 list_add(&avc->same_vma, &vma->anon_vma_chain);
26ba0cb6 183 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
1da177e4 184 allocated = NULL;
31f2b0eb 185 avc = NULL;
1da177e4
LT
186 }
187 spin_unlock(&mm->page_table_lock);
cba48b98 188 anon_vma_unlock(anon_vma);
31f2b0eb
ON
189
190 if (unlikely(allocated))
01d8b20d 191 put_anon_vma(allocated);
31f2b0eb 192 if (unlikely(avc))
5beb4930 193 anon_vma_chain_free(avc);
1da177e4
LT
194 }
195 return 0;
5beb4930
RR
196
197 out_enomem_free_avc:
198 anon_vma_chain_free(avc);
199 out_enomem:
200 return -ENOMEM;
1da177e4
LT
201}
202
5beb4930
RR
203static void anon_vma_chain_link(struct vm_area_struct *vma,
204 struct anon_vma_chain *avc,
205 struct anon_vma *anon_vma)
1da177e4 206{
5beb4930
RR
207 avc->vma = vma;
208 avc->anon_vma = anon_vma;
209 list_add(&avc->same_vma, &vma->anon_vma_chain);
210
cba48b98 211 anon_vma_lock(anon_vma);
05759d38
AA
212 /*
213 * It's critical to add new vmas to the tail of the anon_vma,
214 * see comment in huge_memory.c:__split_huge_page().
215 */
5beb4930 216 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
cba48b98 217 anon_vma_unlock(anon_vma);
1da177e4
LT
218}
219
5beb4930
RR
220/*
221 * Attach the anon_vmas from src to dst.
222 * Returns 0 on success, -ENOMEM on failure.
223 */
224int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
1da177e4 225{
5beb4930
RR
226 struct anon_vma_chain *avc, *pavc;
227
646d87b4 228 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
5beb4930
RR
229 avc = anon_vma_chain_alloc();
230 if (!avc)
231 goto enomem_failure;
232 anon_vma_chain_link(dst, avc, pavc->anon_vma);
233 }
234 return 0;
1da177e4 235
5beb4930
RR
236 enomem_failure:
237 unlink_anon_vmas(dst);
238 return -ENOMEM;
1da177e4
LT
239}
240
5beb4930
RR
241/*
242 * Attach vma to its own anon_vma, as well as to the anon_vmas that
243 * the corresponding VMA in the parent process is attached to.
244 * Returns 0 on success, non-zero on failure.
245 */
246int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1da177e4 247{
5beb4930
RR
248 struct anon_vma_chain *avc;
249 struct anon_vma *anon_vma;
1da177e4 250
5beb4930
RR
251 /* Don't bother if the parent process has no anon_vma here. */
252 if (!pvma->anon_vma)
253 return 0;
254
255 /*
256 * First, attach the new VMA to the parent VMA's anon_vmas,
257 * so rmap can find non-COWed pages in child processes.
258 */
259 if (anon_vma_clone(vma, pvma))
260 return -ENOMEM;
261
262 /* Then add our own anon_vma. */
263 anon_vma = anon_vma_alloc();
264 if (!anon_vma)
265 goto out_error;
266 avc = anon_vma_chain_alloc();
267 if (!avc)
268 goto out_error_free_anon_vma;
5c341ee1
RR
269
270 /*
271 * The root anon_vma's spinlock is the lock actually used when we
272 * lock any of the anon_vmas in this anon_vma tree.
273 */
274 anon_vma->root = pvma->anon_vma->root;
76545066 275 /*
01d8b20d
PZ
276 * With refcounts, an anon_vma can stay around longer than the
277 * process it belongs to. The root anon_vma needs to be pinned until
278 * this anon_vma is freed, because the lock lives in the root.
76545066
RR
279 */
280 get_anon_vma(anon_vma->root);
5beb4930
RR
281 /* Mark this anon_vma as the one where our new (COWed) pages go. */
282 vma->anon_vma = anon_vma;
5c341ee1 283 anon_vma_chain_link(vma, avc, anon_vma);
5beb4930
RR
284
285 return 0;
286
287 out_error_free_anon_vma:
01d8b20d 288 put_anon_vma(anon_vma);
5beb4930 289 out_error:
4946d54c 290 unlink_anon_vmas(vma);
5beb4930 291 return -ENOMEM;
1da177e4
LT
292}
293
5beb4930 294static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
1da177e4 295{
5beb4930 296 struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
1da177e4
LT
297 int empty;
298
5beb4930 299 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
1da177e4
LT
300 if (!anon_vma)
301 return;
302
cba48b98 303 anon_vma_lock(anon_vma);
5beb4930 304 list_del(&anon_vma_chain->same_anon_vma);
1da177e4
LT
305
306 /* We must garbage collect the anon_vma if it's empty */
01d8b20d 307 empty = list_empty(&anon_vma->head);
cba48b98 308 anon_vma_unlock(anon_vma);
1da177e4 309
01d8b20d
PZ
310 if (empty)
311 put_anon_vma(anon_vma);
1da177e4
LT
312}
313
5beb4930
RR
314void unlink_anon_vmas(struct vm_area_struct *vma)
315{
316 struct anon_vma_chain *avc, *next;
317
5c341ee1
RR
318 /*
319 * Unlink each anon_vma chained to the VMA. This list is ordered
320 * from newest to oldest, ensuring the root anon_vma gets freed last.
321 */
5beb4930
RR
322 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
323 anon_vma_unlink(avc);
324 list_del(&avc->same_vma);
325 anon_vma_chain_free(avc);
326 }
327}
328
51cc5068 329static void anon_vma_ctor(void *data)
1da177e4 330{
a35afb83 331 struct anon_vma *anon_vma = data;
1da177e4 332
2b575eb6 333 mutex_init(&anon_vma->mutex);
83813267 334 atomic_set(&anon_vma->refcount, 0);
a35afb83 335 INIT_LIST_HEAD(&anon_vma->head);
1da177e4
LT
336}
337
338void __init anon_vma_init(void)
339{
340 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
20c2df83 341 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
5beb4930 342 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
1da177e4
LT
343}
344
345/*
6111e4ca
PZ
346 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
347 *
348 * Since there is no serialization what so ever against page_remove_rmap()
349 * the best this function can do is return a locked anon_vma that might
350 * have been relevant to this page.
351 *
352 * The page might have been remapped to a different anon_vma or the anon_vma
353 * returned may already be freed (and even reused).
354 *
355 * All users of this function must be very careful when walking the anon_vma
356 * chain and verify that the page in question is indeed mapped in it
357 * [ something equivalent to page_mapped_in_vma() ].
358 *
359 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
360 * that the anon_vma pointer from page->mapping is valid if there is a
361 * mapcount, we can dereference the anon_vma after observing those.
1da177e4 362 */
746b18d4 363struct anon_vma *page_get_anon_vma(struct page *page)
1da177e4 364{
746b18d4 365 struct anon_vma *anon_vma = NULL;
1da177e4
LT
366 unsigned long anon_mapping;
367
368 rcu_read_lock();
80e14822 369 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
3ca7b3c5 370 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
1da177e4
LT
371 goto out;
372 if (!page_mapped(page))
373 goto out;
374
375 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
746b18d4
PZ
376 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
377 anon_vma = NULL;
378 goto out;
379 }
f1819427
HD
380
381 /*
382 * If this page is still mapped, then its anon_vma cannot have been
746b18d4
PZ
383 * freed. But if it has been unmapped, we have no security against the
384 * anon_vma structure being freed and reused (for another anon_vma:
385 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
386 * above cannot corrupt).
f1819427 387 */
746b18d4
PZ
388 if (!page_mapped(page)) {
389 put_anon_vma(anon_vma);
390 anon_vma = NULL;
391 }
1da177e4
LT
392out:
393 rcu_read_unlock();
746b18d4
PZ
394
395 return anon_vma;
396}
397
88c22088
PZ
398/*
399 * Similar to page_get_anon_vma() except it locks the anon_vma.
400 *
401 * Its a little more complex as it tries to keep the fast path to a single
402 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
403 * reference like with page_get_anon_vma() and then block on the mutex.
404 */
746b18d4
PZ
405struct anon_vma *page_lock_anon_vma(struct page *page)
406{
88c22088 407 struct anon_vma *anon_vma = NULL;
eee0f252 408 struct anon_vma *root_anon_vma;
88c22088 409 unsigned long anon_mapping;
746b18d4 410
88c22088
PZ
411 rcu_read_lock();
412 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
413 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
414 goto out;
415 if (!page_mapped(page))
416 goto out;
417
418 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
eee0f252
HD
419 root_anon_vma = ACCESS_ONCE(anon_vma->root);
420 if (mutex_trylock(&root_anon_vma->mutex)) {
88c22088 421 /*
eee0f252
HD
422 * If the page is still mapped, then this anon_vma is still
423 * its anon_vma, and holding the mutex ensures that it will
424 * not go away, see __put_anon_vma().
88c22088 425 */
eee0f252
HD
426 if (!page_mapped(page)) {
427 mutex_unlock(&root_anon_vma->mutex);
88c22088
PZ
428 anon_vma = NULL;
429 }
430 goto out;
431 }
746b18d4 432
88c22088
PZ
433 /* trylock failed, we got to sleep */
434 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
435 anon_vma = NULL;
436 goto out;
437 }
438
439 if (!page_mapped(page)) {
440 put_anon_vma(anon_vma);
441 anon_vma = NULL;
442 goto out;
443 }
444
445 /* we pinned the anon_vma, its safe to sleep */
446 rcu_read_unlock();
447 anon_vma_lock(anon_vma);
448
449 if (atomic_dec_and_test(&anon_vma->refcount)) {
450 /*
451 * Oops, we held the last refcount, release the lock
452 * and bail -- can't simply use put_anon_vma() because
453 * we'll deadlock on the anon_vma_lock() recursion.
454 */
455 anon_vma_unlock(anon_vma);
456 __put_anon_vma(anon_vma);
457 anon_vma = NULL;
458 }
459
460 return anon_vma;
461
462out:
463 rcu_read_unlock();
746b18d4 464 return anon_vma;
34bbd704
ON
465}
466
10be22df 467void page_unlock_anon_vma(struct anon_vma *anon_vma)
34bbd704 468{
cba48b98 469 anon_vma_unlock(anon_vma);
1da177e4
LT
470}
471
472/*
3ad33b24
LS
473 * At what user virtual address is page expected in @vma?
474 * Returns virtual address or -EFAULT if page's index/offset is not
475 * within the range mapped the @vma.
1da177e4 476 */
71e3aac0 477inline unsigned long
1da177e4
LT
478vma_address(struct page *page, struct vm_area_struct *vma)
479{
480 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
481 unsigned long address;
482
0fe6e20b
NH
483 if (unlikely(is_vm_hugetlb_page(vma)))
484 pgoff = page->index << huge_page_order(page_hstate(page));
1da177e4
LT
485 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
486 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
3ad33b24 487 /* page should be within @vma mapping range */
1da177e4
LT
488 return -EFAULT;
489 }
490 return address;
491}
492
493/*
bf89c8c8 494 * At what user virtual address is page expected in vma?
ab941e0f 495 * Caller should check the page is actually part of the vma.
1da177e4
LT
496 */
497unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
498{
21d0d443 499 if (PageAnon(page)) {
4829b906
HD
500 struct anon_vma *page__anon_vma = page_anon_vma(page);
501 /*
502 * Note: swapoff's unuse_vma() is more efficient with this
503 * check, and needs it to match anon_vma when KSM is active.
504 */
505 if (!vma->anon_vma || !page__anon_vma ||
506 vma->anon_vma->root != page__anon_vma->root)
21d0d443
AA
507 return -EFAULT;
508 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
ee498ed7
HD
509 if (!vma->vm_file ||
510 vma->vm_file->f_mapping != page->mapping)
1da177e4
LT
511 return -EFAULT;
512 } else
513 return -EFAULT;
514 return vma_address(page, vma);
515}
516
81b4082d
ND
517/*
518 * Check that @page is mapped at @address into @mm.
519 *
479db0bf
NP
520 * If @sync is false, page_check_address may perform a racy check to avoid
521 * the page table lock when the pte is not present (helpful when reclaiming
522 * highly shared pages).
523 *
b8072f09 524 * On success returns with pte mapped and locked.
81b4082d 525 */
e9a81a82 526pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
479db0bf 527 unsigned long address, spinlock_t **ptlp, int sync)
81b4082d
ND
528{
529 pgd_t *pgd;
530 pud_t *pud;
531 pmd_t *pmd;
532 pte_t *pte;
c0718806 533 spinlock_t *ptl;
81b4082d 534
0fe6e20b
NH
535 if (unlikely(PageHuge(page))) {
536 pte = huge_pte_offset(mm, address);
537 ptl = &mm->page_table_lock;
538 goto check;
539 }
540
81b4082d 541 pgd = pgd_offset(mm, address);
c0718806
HD
542 if (!pgd_present(*pgd))
543 return NULL;
544
545 pud = pud_offset(pgd, address);
546 if (!pud_present(*pud))
547 return NULL;
548
549 pmd = pmd_offset(pud, address);
550 if (!pmd_present(*pmd))
551 return NULL;
71e3aac0
AA
552 if (pmd_trans_huge(*pmd))
553 return NULL;
c0718806
HD
554
555 pte = pte_offset_map(pmd, address);
556 /* Make a quick check before getting the lock */
479db0bf 557 if (!sync && !pte_present(*pte)) {
c0718806
HD
558 pte_unmap(pte);
559 return NULL;
560 }
561
4c21e2f2 562 ptl = pte_lockptr(mm, pmd);
0fe6e20b 563check:
c0718806
HD
564 spin_lock(ptl);
565 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
566 *ptlp = ptl;
567 return pte;
81b4082d 568 }
c0718806
HD
569 pte_unmap_unlock(pte, ptl);
570 return NULL;
81b4082d
ND
571}
572
b291f000
NP
573/**
574 * page_mapped_in_vma - check whether a page is really mapped in a VMA
575 * @page: the page to test
576 * @vma: the VMA to test
577 *
578 * Returns 1 if the page is mapped into the page tables of the VMA, 0
579 * if the page is not mapped into the page tables of this VMA. Only
580 * valid for normal file or anonymous VMAs.
581 */
6a46079c 582int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
b291f000
NP
583{
584 unsigned long address;
585 pte_t *pte;
586 spinlock_t *ptl;
587
588 address = vma_address(page, vma);
589 if (address == -EFAULT) /* out of vma range */
590 return 0;
591 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
592 if (!pte) /* the page is not in this mm */
593 return 0;
594 pte_unmap_unlock(pte, ptl);
595
596 return 1;
597}
598
1da177e4
LT
599/*
600 * Subfunctions of page_referenced: page_referenced_one called
601 * repeatedly from either page_referenced_anon or page_referenced_file.
602 */
5ad64688
HD
603int page_referenced_one(struct page *page, struct vm_area_struct *vma,
604 unsigned long address, unsigned int *mapcount,
605 unsigned long *vm_flags)
1da177e4
LT
606{
607 struct mm_struct *mm = vma->vm_mm;
1da177e4
LT
608 int referenced = 0;
609
71e3aac0
AA
610 if (unlikely(PageTransHuge(page))) {
611 pmd_t *pmd;
612
613 spin_lock(&mm->page_table_lock);
2da28bfd
AA
614 /*
615 * rmap might return false positives; we must filter
616 * these out using page_check_address_pmd().
617 */
71e3aac0
AA
618 pmd = page_check_address_pmd(page, mm, address,
619 PAGE_CHECK_ADDRESS_PMD_FLAG);
2da28bfd
AA
620 if (!pmd) {
621 spin_unlock(&mm->page_table_lock);
622 goto out;
623 }
624
625 if (vma->vm_flags & VM_LOCKED) {
626 spin_unlock(&mm->page_table_lock);
627 *mapcount = 0; /* break early from loop */
628 *vm_flags |= VM_LOCKED;
629 goto out;
630 }
631
632 /* go ahead even if the pmd is pmd_trans_splitting() */
633 if (pmdp_clear_flush_young_notify(vma, address, pmd))
71e3aac0
AA
634 referenced++;
635 spin_unlock(&mm->page_table_lock);
636 } else {
637 pte_t *pte;
638 spinlock_t *ptl;
639
2da28bfd
AA
640 /*
641 * rmap might return false positives; we must filter
642 * these out using page_check_address().
643 */
71e3aac0
AA
644 pte = page_check_address(page, mm, address, &ptl, 0);
645 if (!pte)
646 goto out;
647
2da28bfd
AA
648 if (vma->vm_flags & VM_LOCKED) {
649 pte_unmap_unlock(pte, ptl);
650 *mapcount = 0; /* break early from loop */
651 *vm_flags |= VM_LOCKED;
652 goto out;
653 }
654
71e3aac0
AA
655 if (ptep_clear_flush_young_notify(vma, address, pte)) {
656 /*
657 * Don't treat a reference through a sequentially read
658 * mapping as such. If the page has been used in
659 * another mapping, we will catch it; if this other
660 * mapping is already gone, the unmap path will have
661 * set PG_referenced or activated the page.
662 */
663 if (likely(!VM_SequentialReadHint(vma)))
664 referenced++;
665 }
666 pte_unmap_unlock(pte, ptl);
667 }
668
2da28bfd
AA
669 /* Pretend the page is referenced if the task has the
670 swap token and is in the middle of a page fault. */
671 if (mm != current->mm && has_swap_token(mm) &&
672 rwsem_is_locked(&mm->mmap_sem))
673 referenced++;
674
c0718806 675 (*mapcount)--;
273f047e 676
6fe6b7e3
WF
677 if (referenced)
678 *vm_flags |= vma->vm_flags;
273f047e 679out:
1da177e4
LT
680 return referenced;
681}
682
bed7161a 683static int page_referenced_anon(struct page *page,
6fe6b7e3
WF
684 struct mem_cgroup *mem_cont,
685 unsigned long *vm_flags)
1da177e4
LT
686{
687 unsigned int mapcount;
688 struct anon_vma *anon_vma;
5beb4930 689 struct anon_vma_chain *avc;
1da177e4
LT
690 int referenced = 0;
691
692 anon_vma = page_lock_anon_vma(page);
693 if (!anon_vma)
694 return referenced;
695
696 mapcount = page_mapcount(page);
5beb4930
RR
697 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
698 struct vm_area_struct *vma = avc->vma;
1cb1729b
HD
699 unsigned long address = vma_address(page, vma);
700 if (address == -EFAULT)
701 continue;
bed7161a
BS
702 /*
703 * If we are reclaiming on behalf of a cgroup, skip
704 * counting on behalf of references from different
705 * cgroups
706 */
bd845e38 707 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
bed7161a 708 continue;
1cb1729b 709 referenced += page_referenced_one(page, vma, address,
6fe6b7e3 710 &mapcount, vm_flags);
1da177e4
LT
711 if (!mapcount)
712 break;
713 }
34bbd704
ON
714
715 page_unlock_anon_vma(anon_vma);
1da177e4
LT
716 return referenced;
717}
718
719/**
720 * page_referenced_file - referenced check for object-based rmap
721 * @page: the page we're checking references on.
43d8eac4 722 * @mem_cont: target memory controller
6fe6b7e3 723 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
1da177e4
LT
724 *
725 * For an object-based mapped page, find all the places it is mapped and
726 * check/clear the referenced flag. This is done by following the page->mapping
727 * pointer, then walking the chain of vmas it holds. It returns the number
728 * of references it found.
729 *
730 * This function is only called from page_referenced for object-based pages.
731 */
bed7161a 732static int page_referenced_file(struct page *page,
6fe6b7e3
WF
733 struct mem_cgroup *mem_cont,
734 unsigned long *vm_flags)
1da177e4
LT
735{
736 unsigned int mapcount;
737 struct address_space *mapping = page->mapping;
738 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
739 struct vm_area_struct *vma;
740 struct prio_tree_iter iter;
741 int referenced = 0;
742
743 /*
744 * The caller's checks on page->mapping and !PageAnon have made
745 * sure that this is a file page: the check for page->mapping
746 * excludes the case just before it gets set on an anon page.
747 */
748 BUG_ON(PageAnon(page));
749
750 /*
751 * The page lock not only makes sure that page->mapping cannot
752 * suddenly be NULLified by truncation, it makes sure that the
753 * structure at mapping cannot be freed and reused yet,
3d48ae45 754 * so we can safely take mapping->i_mmap_mutex.
1da177e4
LT
755 */
756 BUG_ON(!PageLocked(page));
757
3d48ae45 758 mutex_lock(&mapping->i_mmap_mutex);
1da177e4
LT
759
760 /*
3d48ae45 761 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
1da177e4
LT
762 * is more likely to be accurate if we note it after spinning.
763 */
764 mapcount = page_mapcount(page);
765
766 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1cb1729b
HD
767 unsigned long address = vma_address(page, vma);
768 if (address == -EFAULT)
769 continue;
bed7161a
BS
770 /*
771 * If we are reclaiming on behalf of a cgroup, skip
772 * counting on behalf of references from different
773 * cgroups
774 */
bd845e38 775 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
bed7161a 776 continue;
1cb1729b 777 referenced += page_referenced_one(page, vma, address,
6fe6b7e3 778 &mapcount, vm_flags);
1da177e4
LT
779 if (!mapcount)
780 break;
781 }
782
3d48ae45 783 mutex_unlock(&mapping->i_mmap_mutex);
1da177e4
LT
784 return referenced;
785}
786
787/**
788 * page_referenced - test if the page was referenced
789 * @page: the page to test
790 * @is_locked: caller holds lock on the page
43d8eac4 791 * @mem_cont: target memory controller
6fe6b7e3 792 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
1da177e4
LT
793 *
794 * Quick test_and_clear_referenced for all mappings to a page,
795 * returns the number of ptes which referenced the page.
796 */
6fe6b7e3
WF
797int page_referenced(struct page *page,
798 int is_locked,
799 struct mem_cgroup *mem_cont,
800 unsigned long *vm_flags)
1da177e4
LT
801{
802 int referenced = 0;
5ad64688 803 int we_locked = 0;
1da177e4 804
6fe6b7e3 805 *vm_flags = 0;
3ca7b3c5 806 if (page_mapped(page) && page_rmapping(page)) {
5ad64688
HD
807 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
808 we_locked = trylock_page(page);
809 if (!we_locked) {
810 referenced++;
811 goto out;
812 }
813 }
814 if (unlikely(PageKsm(page)))
815 referenced += page_referenced_ksm(page, mem_cont,
816 vm_flags);
817 else if (PageAnon(page))
6fe6b7e3
WF
818 referenced += page_referenced_anon(page, mem_cont,
819 vm_flags);
5ad64688 820 else if (page->mapping)
6fe6b7e3
WF
821 referenced += page_referenced_file(page, mem_cont,
822 vm_flags);
5ad64688 823 if (we_locked)
1da177e4 824 unlock_page(page);
1da177e4 825 }
5ad64688 826out:
2d42552d 827 if (page_test_and_clear_young(page_to_pfn(page)))
5b7baf05
CB
828 referenced++;
829
1da177e4
LT
830 return referenced;
831}
832
1cb1729b
HD
833static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
834 unsigned long address)
d08b3851
PZ
835{
836 struct mm_struct *mm = vma->vm_mm;
c2fda5fe 837 pte_t *pte;
d08b3851
PZ
838 spinlock_t *ptl;
839 int ret = 0;
840
479db0bf 841 pte = page_check_address(page, mm, address, &ptl, 1);
d08b3851
PZ
842 if (!pte)
843 goto out;
844
c2fda5fe
PZ
845 if (pte_dirty(*pte) || pte_write(*pte)) {
846 pte_t entry;
d08b3851 847
c2fda5fe 848 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 849 entry = ptep_clear_flush_notify(vma, address, pte);
c2fda5fe
PZ
850 entry = pte_wrprotect(entry);
851 entry = pte_mkclean(entry);
d6e88e67 852 set_pte_at(mm, address, pte, entry);
c2fda5fe
PZ
853 ret = 1;
854 }
d08b3851 855
d08b3851
PZ
856 pte_unmap_unlock(pte, ptl);
857out:
858 return ret;
859}
860
861static int page_mkclean_file(struct address_space *mapping, struct page *page)
862{
863 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
864 struct vm_area_struct *vma;
865 struct prio_tree_iter iter;
866 int ret = 0;
867
868 BUG_ON(PageAnon(page));
869
3d48ae45 870 mutex_lock(&mapping->i_mmap_mutex);
d08b3851 871 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1cb1729b
HD
872 if (vma->vm_flags & VM_SHARED) {
873 unsigned long address = vma_address(page, vma);
874 if (address == -EFAULT)
875 continue;
876 ret += page_mkclean_one(page, vma, address);
877 }
d08b3851 878 }
3d48ae45 879 mutex_unlock(&mapping->i_mmap_mutex);
d08b3851
PZ
880 return ret;
881}
882
883int page_mkclean(struct page *page)
884{
885 int ret = 0;
886
887 BUG_ON(!PageLocked(page));
888
889 if (page_mapped(page)) {
890 struct address_space *mapping = page_mapping(page);
ce7e9fae 891 if (mapping) {
d08b3851 892 ret = page_mkclean_file(mapping, page);
2d42552d 893 if (page_test_and_clear_dirty(page_to_pfn(page), 1))
ce7e9fae 894 ret = 1;
6c210482 895 }
d08b3851
PZ
896 }
897
898 return ret;
899}
60b59bea 900EXPORT_SYMBOL_GPL(page_mkclean);
d08b3851 901
c44b6743
RR
902/**
903 * page_move_anon_rmap - move a page to our anon_vma
904 * @page: the page to move to our anon_vma
905 * @vma: the vma the page belongs to
906 * @address: the user virtual address mapped
907 *
908 * When a page belongs exclusively to one process after a COW event,
909 * that page can be moved into the anon_vma that belongs to just that
910 * process, so the rmap code will not search the parent or sibling
911 * processes.
912 */
913void page_move_anon_rmap(struct page *page,
914 struct vm_area_struct *vma, unsigned long address)
915{
916 struct anon_vma *anon_vma = vma->anon_vma;
917
918 VM_BUG_ON(!PageLocked(page));
919 VM_BUG_ON(!anon_vma);
920 VM_BUG_ON(page->index != linear_page_index(vma, address));
921
922 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
923 page->mapping = (struct address_space *) anon_vma;
924}
925
9617d95e 926/**
4e1c1975
AK
927 * __page_set_anon_rmap - set up new anonymous rmap
928 * @page: Page to add to rmap
929 * @vma: VM area to add page to.
930 * @address: User virtual address of the mapping
e8a03feb 931 * @exclusive: the page is exclusively owned by the current process
9617d95e
NP
932 */
933static void __page_set_anon_rmap(struct page *page,
e8a03feb 934 struct vm_area_struct *vma, unsigned long address, int exclusive)
9617d95e 935{
e8a03feb 936 struct anon_vma *anon_vma = vma->anon_vma;
ea90002b 937
e8a03feb 938 BUG_ON(!anon_vma);
ea90002b 939
4e1c1975
AK
940 if (PageAnon(page))
941 return;
942
ea90002b 943 /*
e8a03feb
RR
944 * If the page isn't exclusively mapped into this vma,
945 * we must use the _oldest_ possible anon_vma for the
946 * page mapping!
ea90002b 947 */
4e1c1975 948 if (!exclusive)
288468c3 949 anon_vma = anon_vma->root;
9617d95e 950
9617d95e
NP
951 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
952 page->mapping = (struct address_space *) anon_vma;
9617d95e 953 page->index = linear_page_index(vma, address);
9617d95e
NP
954}
955
c97a9e10 956/**
43d8eac4 957 * __page_check_anon_rmap - sanity check anonymous rmap addition
c97a9e10
NP
958 * @page: the page to add the mapping to
959 * @vma: the vm area in which the mapping is added
960 * @address: the user virtual address mapped
961 */
962static void __page_check_anon_rmap(struct page *page,
963 struct vm_area_struct *vma, unsigned long address)
964{
965#ifdef CONFIG_DEBUG_VM
966 /*
967 * The page's anon-rmap details (mapping and index) are guaranteed to
968 * be set up correctly at this point.
969 *
970 * We have exclusion against page_add_anon_rmap because the caller
971 * always holds the page locked, except if called from page_dup_rmap,
972 * in which case the page is already known to be setup.
973 *
974 * We have exclusion against page_add_new_anon_rmap because those pages
975 * are initially only visible via the pagetables, and the pte is locked
976 * over the call to page_add_new_anon_rmap.
977 */
44ab57a0 978 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
c97a9e10
NP
979 BUG_ON(page->index != linear_page_index(vma, address));
980#endif
981}
982
1da177e4
LT
983/**
984 * page_add_anon_rmap - add pte mapping to an anonymous page
985 * @page: the page to add the mapping to
986 * @vma: the vm area in which the mapping is added
987 * @address: the user virtual address mapped
988 *
5ad64688 989 * The caller needs to hold the pte lock, and the page must be locked in
80e14822
HD
990 * the anon_vma case: to serialize mapping,index checking after setting,
991 * and to ensure that PageAnon is not being upgraded racily to PageKsm
992 * (but PageKsm is never downgraded to PageAnon).
1da177e4
LT
993 */
994void page_add_anon_rmap(struct page *page,
995 struct vm_area_struct *vma, unsigned long address)
ad8c2ee8
RR
996{
997 do_page_add_anon_rmap(page, vma, address, 0);
998}
999
1000/*
1001 * Special version of the above for do_swap_page, which often runs
1002 * into pages that are exclusively owned by the current process.
1003 * Everybody else should continue to use page_add_anon_rmap above.
1004 */
1005void do_page_add_anon_rmap(struct page *page,
1006 struct vm_area_struct *vma, unsigned long address, int exclusive)
1da177e4 1007{
5ad64688 1008 int first = atomic_inc_and_test(&page->_mapcount);
79134171
AA
1009 if (first) {
1010 if (!PageTransHuge(page))
1011 __inc_zone_page_state(page, NR_ANON_PAGES);
1012 else
1013 __inc_zone_page_state(page,
1014 NR_ANON_TRANSPARENT_HUGEPAGES);
1015 }
5ad64688
HD
1016 if (unlikely(PageKsm(page)))
1017 return;
1018
c97a9e10 1019 VM_BUG_ON(!PageLocked(page));
5dbe0af4 1020 /* address might be in next vma when migration races vma_adjust */
5ad64688 1021 if (first)
ad8c2ee8 1022 __page_set_anon_rmap(page, vma, address, exclusive);
69029cd5 1023 else
c97a9e10 1024 __page_check_anon_rmap(page, vma, address);
1da177e4
LT
1025}
1026
43d8eac4 1027/**
9617d95e
NP
1028 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1029 * @page: the page to add the mapping to
1030 * @vma: the vm area in which the mapping is added
1031 * @address: the user virtual address mapped
1032 *
1033 * Same as page_add_anon_rmap but must only be called on *new* pages.
1034 * This means the inc-and-test can be bypassed.
c97a9e10 1035 * Page does not have to be locked.
9617d95e
NP
1036 */
1037void page_add_new_anon_rmap(struct page *page,
1038 struct vm_area_struct *vma, unsigned long address)
1039{
b5934c53 1040 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
cbf84b7a
HD
1041 SetPageSwapBacked(page);
1042 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
79134171
AA
1043 if (!PageTransHuge(page))
1044 __inc_zone_page_state(page, NR_ANON_PAGES);
1045 else
1046 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
e8a03feb 1047 __page_set_anon_rmap(page, vma, address, 1);
b5934c53 1048 if (page_evictable(page, vma))
cbf84b7a 1049 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
b5934c53
HD
1050 else
1051 add_page_to_unevictable_list(page);
9617d95e
NP
1052}
1053
1da177e4
LT
1054/**
1055 * page_add_file_rmap - add pte mapping to a file page
1056 * @page: the page to add the mapping to
1057 *
b8072f09 1058 * The caller needs to hold the pte lock.
1da177e4
LT
1059 */
1060void page_add_file_rmap(struct page *page)
1061{
d69b042f 1062 if (atomic_inc_and_test(&page->_mapcount)) {
65ba55f5 1063 __inc_zone_page_state(page, NR_FILE_MAPPED);
2a7106f2 1064 mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
d69b042f 1065 }
1da177e4
LT
1066}
1067
1068/**
1069 * page_remove_rmap - take down pte mapping from a page
1070 * @page: page to remove mapping from
1071 *
b8072f09 1072 * The caller needs to hold the pte lock.
1da177e4 1073 */
edc315fd 1074void page_remove_rmap(struct page *page)
1da177e4 1075{
b904dcfe
KM
1076 /* page still mapped by someone else? */
1077 if (!atomic_add_negative(-1, &page->_mapcount))
1078 return;
1079
1080 /*
1081 * Now that the last pte has gone, s390 must transfer dirty
1082 * flag from storage key to struct page. We can usually skip
1083 * this if the page is anon, so about to be freed; but perhaps
1084 * not if it's in swapcache - there might be another pte slot
1085 * containing the swap entry, but page not yet written to swap.
1086 */
2d42552d
MS
1087 if ((!PageAnon(page) || PageSwapCache(page)) &&
1088 page_test_and_clear_dirty(page_to_pfn(page), 1))
b904dcfe 1089 set_page_dirty(page);
0fe6e20b
NH
1090 /*
1091 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1092 * and not charged by memcg for now.
1093 */
1094 if (unlikely(PageHuge(page)))
1095 return;
b904dcfe
KM
1096 if (PageAnon(page)) {
1097 mem_cgroup_uncharge_page(page);
79134171
AA
1098 if (!PageTransHuge(page))
1099 __dec_zone_page_state(page, NR_ANON_PAGES);
1100 else
1101 __dec_zone_page_state(page,
1102 NR_ANON_TRANSPARENT_HUGEPAGES);
b904dcfe
KM
1103 } else {
1104 __dec_zone_page_state(page, NR_FILE_MAPPED);
2a7106f2 1105 mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
b904dcfe 1106 }
b904dcfe
KM
1107 /*
1108 * It would be tidy to reset the PageAnon mapping here,
1109 * but that might overwrite a racing page_add_anon_rmap
1110 * which increments mapcount after us but sets mapping
1111 * before us: so leave the reset to free_hot_cold_page,
1112 * and remember that it's only reliable while mapped.
1113 * Leaving it set also helps swapoff to reinstate ptes
1114 * faster for those pages still in swapcache.
1115 */
1da177e4
LT
1116}
1117
1118/*
1119 * Subfunctions of try_to_unmap: try_to_unmap_one called
1120 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
1121 */
5ad64688
HD
1122int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1123 unsigned long address, enum ttu_flags flags)
1da177e4
LT
1124{
1125 struct mm_struct *mm = vma->vm_mm;
1da177e4
LT
1126 pte_t *pte;
1127 pte_t pteval;
c0718806 1128 spinlock_t *ptl;
1da177e4
LT
1129 int ret = SWAP_AGAIN;
1130
479db0bf 1131 pte = page_check_address(page, mm, address, &ptl, 0);
c0718806 1132 if (!pte)
81b4082d 1133 goto out;
1da177e4
LT
1134
1135 /*
1136 * If the page is mlock()d, we cannot swap it out.
1137 * If it's recently referenced (perhaps page_referenced
1138 * skipped over this mm) then we should reactivate it.
1139 */
14fa31b8 1140 if (!(flags & TTU_IGNORE_MLOCK)) {
caed0f48
KM
1141 if (vma->vm_flags & VM_LOCKED)
1142 goto out_mlock;
1143
af8e3354 1144 if (TTU_ACTION(flags) == TTU_MUNLOCK)
53f79acb 1145 goto out_unmap;
14fa31b8
AK
1146 }
1147 if (!(flags & TTU_IGNORE_ACCESS)) {
b291f000
NP
1148 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1149 ret = SWAP_FAIL;
1150 goto out_unmap;
1151 }
1152 }
1da177e4 1153
1da177e4
LT
1154 /* Nuke the page table entry. */
1155 flush_cache_page(vma, address, page_to_pfn(page));
cddb8a5c 1156 pteval = ptep_clear_flush_notify(vma, address, pte);
1da177e4
LT
1157
1158 /* Move the dirty bit to the physical page now the pte is gone. */
1159 if (pte_dirty(pteval))
1160 set_page_dirty(page);
1161
365e9c87
HD
1162 /* Update high watermark before we lower rss */
1163 update_hiwater_rss(mm);
1164
888b9f7c
AK
1165 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1166 if (PageAnon(page))
d559db08 1167 dec_mm_counter(mm, MM_ANONPAGES);
888b9f7c 1168 else
d559db08 1169 dec_mm_counter(mm, MM_FILEPAGES);
888b9f7c
AK
1170 set_pte_at(mm, address, pte,
1171 swp_entry_to_pte(make_hwpoison_entry(page)));
1172 } else if (PageAnon(page)) {
4c21e2f2 1173 swp_entry_t entry = { .val = page_private(page) };
0697212a
CL
1174
1175 if (PageSwapCache(page)) {
1176 /*
1177 * Store the swap location in the pte.
1178 * See handle_pte_fault() ...
1179 */
570a335b
HD
1180 if (swap_duplicate(entry) < 0) {
1181 set_pte_at(mm, address, pte, pteval);
1182 ret = SWAP_FAIL;
1183 goto out_unmap;
1184 }
0697212a
CL
1185 if (list_empty(&mm->mmlist)) {
1186 spin_lock(&mmlist_lock);
1187 if (list_empty(&mm->mmlist))
1188 list_add(&mm->mmlist, &init_mm.mmlist);
1189 spin_unlock(&mmlist_lock);
1190 }
d559db08 1191 dec_mm_counter(mm, MM_ANONPAGES);
b084d435 1192 inc_mm_counter(mm, MM_SWAPENTS);
64cdd548 1193 } else if (PAGE_MIGRATION) {
0697212a
CL
1194 /*
1195 * Store the pfn of the page in a special migration
1196 * pte. do_swap_page() will wait until the migration
1197 * pte is removed and then restart fault handling.
1198 */
14fa31b8 1199 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
0697212a 1200 entry = make_migration_entry(page, pte_write(pteval));
1da177e4
LT
1201 }
1202 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1203 BUG_ON(pte_file(*pte));
14fa31b8 1204 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
04e62a29
CL
1205 /* Establish migration entry for a file page */
1206 swp_entry_t entry;
1207 entry = make_migration_entry(page, pte_write(pteval));
1208 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1209 } else
d559db08 1210 dec_mm_counter(mm, MM_FILEPAGES);
1da177e4 1211
edc315fd 1212 page_remove_rmap(page);
1da177e4
LT
1213 page_cache_release(page);
1214
1215out_unmap:
c0718806 1216 pte_unmap_unlock(pte, ptl);
caed0f48
KM
1217out:
1218 return ret;
53f79acb 1219
caed0f48
KM
1220out_mlock:
1221 pte_unmap_unlock(pte, ptl);
1222
1223
1224 /*
1225 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1226 * unstable result and race. Plus, We can't wait here because
2b575eb6 1227 * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
caed0f48
KM
1228 * if trylock failed, the page remain in evictable lru and later
1229 * vmscan could retry to move the page to unevictable lru if the
1230 * page is actually mlocked.
1231 */
1232 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1233 if (vma->vm_flags & VM_LOCKED) {
1234 mlock_vma_page(page);
1235 ret = SWAP_MLOCK;
53f79acb 1236 }
caed0f48 1237 up_read(&vma->vm_mm->mmap_sem);
53f79acb 1238 }
1da177e4
LT
1239 return ret;
1240}
1241
1242/*
1243 * objrmap doesn't work for nonlinear VMAs because the assumption that
1244 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
1245 * Consequently, given a particular page and its ->index, we cannot locate the
1246 * ptes which are mapping that page without an exhaustive linear search.
1247 *
1248 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
1249 * maps the file to which the target page belongs. The ->vm_private_data field
1250 * holds the current cursor into that scan. Successive searches will circulate
1251 * around the vma's virtual address space.
1252 *
1253 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
1254 * more scanning pressure is placed against them as well. Eventually pages
1255 * will become fully unmapped and are eligible for eviction.
1256 *
1257 * For very sparsely populated VMAs this is a little inefficient - chances are
1258 * there there won't be many ptes located within the scan cluster. In this case
1259 * maybe we could scan further - to the end of the pte page, perhaps.
b291f000
NP
1260 *
1261 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
1262 * acquire it without blocking. If vma locked, mlock the pages in the cluster,
1263 * rather than unmapping them. If we encounter the "check_page" that vmscan is
1264 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
1da177e4
LT
1265 */
1266#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1267#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1268
b291f000
NP
1269static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1270 struct vm_area_struct *vma, struct page *check_page)
1da177e4
LT
1271{
1272 struct mm_struct *mm = vma->vm_mm;
1273 pgd_t *pgd;
1274 pud_t *pud;
1275 pmd_t *pmd;
c0718806 1276 pte_t *pte;
1da177e4 1277 pte_t pteval;
c0718806 1278 spinlock_t *ptl;
1da177e4
LT
1279 struct page *page;
1280 unsigned long address;
1281 unsigned long end;
b291f000
NP
1282 int ret = SWAP_AGAIN;
1283 int locked_vma = 0;
1da177e4 1284
1da177e4
LT
1285 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1286 end = address + CLUSTER_SIZE;
1287 if (address < vma->vm_start)
1288 address = vma->vm_start;
1289 if (end > vma->vm_end)
1290 end = vma->vm_end;
1291
1292 pgd = pgd_offset(mm, address);
1293 if (!pgd_present(*pgd))
b291f000 1294 return ret;
1da177e4
LT
1295
1296 pud = pud_offset(pgd, address);
1297 if (!pud_present(*pud))
b291f000 1298 return ret;
1da177e4
LT
1299
1300 pmd = pmd_offset(pud, address);
1301 if (!pmd_present(*pmd))
b291f000
NP
1302 return ret;
1303
1304 /*
af8e3354 1305 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
b291f000
NP
1306 * keep the sem while scanning the cluster for mlocking pages.
1307 */
af8e3354 1308 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
b291f000
NP
1309 locked_vma = (vma->vm_flags & VM_LOCKED);
1310 if (!locked_vma)
1311 up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1312 }
c0718806
HD
1313
1314 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1da177e4 1315
365e9c87
HD
1316 /* Update high watermark before we lower rss */
1317 update_hiwater_rss(mm);
1318
c0718806 1319 for (; address < end; pte++, address += PAGE_SIZE) {
1da177e4
LT
1320 if (!pte_present(*pte))
1321 continue;
6aab341e
LT
1322 page = vm_normal_page(vma, address, *pte);
1323 BUG_ON(!page || PageAnon(page));
1da177e4 1324
b291f000
NP
1325 if (locked_vma) {
1326 mlock_vma_page(page); /* no-op if already mlocked */
1327 if (page == check_page)
1328 ret = SWAP_MLOCK;
1329 continue; /* don't unmap */
1330 }
1331
cddb8a5c 1332 if (ptep_clear_flush_young_notify(vma, address, pte))
1da177e4
LT
1333 continue;
1334
1335 /* Nuke the page table entry. */
eca35133 1336 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 1337 pteval = ptep_clear_flush_notify(vma, address, pte);
1da177e4
LT
1338
1339 /* If nonlinear, store the file page offset in the pte. */
1340 if (page->index != linear_page_index(vma, address))
1341 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
1342
1343 /* Move the dirty bit to the physical page now the pte is gone. */
1344 if (pte_dirty(pteval))
1345 set_page_dirty(page);
1346
edc315fd 1347 page_remove_rmap(page);
1da177e4 1348 page_cache_release(page);
d559db08 1349 dec_mm_counter(mm, MM_FILEPAGES);
1da177e4
LT
1350 (*mapcount)--;
1351 }
c0718806 1352 pte_unmap_unlock(pte - 1, ptl);
b291f000
NP
1353 if (locked_vma)
1354 up_read(&vma->vm_mm->mmap_sem);
1355 return ret;
1da177e4
LT
1356}
1357
71e3aac0 1358bool is_vma_temporary_stack(struct vm_area_struct *vma)
a8bef8ff
MG
1359{
1360 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1361
1362 if (!maybe_stack)
1363 return false;
1364
1365 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1366 VM_STACK_INCOMPLETE_SETUP)
1367 return true;
1368
1369 return false;
1370}
1371
b291f000
NP
1372/**
1373 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1374 * rmap method
1375 * @page: the page to unmap/unlock
8051be5e 1376 * @flags: action and flags
b291f000
NP
1377 *
1378 * Find all the mappings of a page using the mapping pointer and the vma chains
1379 * contained in the anon_vma struct it points to.
1380 *
1381 * This function is only called from try_to_unmap/try_to_munlock for
1382 * anonymous pages.
1383 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1384 * where the page was found will be held for write. So, we won't recheck
1385 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1386 * 'LOCKED.
1387 */
14fa31b8 1388static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1da177e4
LT
1389{
1390 struct anon_vma *anon_vma;
5beb4930 1391 struct anon_vma_chain *avc;
1da177e4 1392 int ret = SWAP_AGAIN;
b291f000 1393
1da177e4
LT
1394 anon_vma = page_lock_anon_vma(page);
1395 if (!anon_vma)
1396 return ret;
1397
5beb4930
RR
1398 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1399 struct vm_area_struct *vma = avc->vma;
a8bef8ff
MG
1400 unsigned long address;
1401
1402 /*
1403 * During exec, a temporary VMA is setup and later moved.
1404 * The VMA is moved under the anon_vma lock but not the
1405 * page tables leading to a race where migration cannot
1406 * find the migration ptes. Rather than increasing the
1407 * locking requirements of exec(), migration skips
1408 * temporary VMAs until after exec() completes.
1409 */
1410 if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
1411 is_vma_temporary_stack(vma))
1412 continue;
1413
1414 address = vma_address(page, vma);
1cb1729b
HD
1415 if (address == -EFAULT)
1416 continue;
1417 ret = try_to_unmap_one(page, vma, address, flags);
53f79acb
HD
1418 if (ret != SWAP_AGAIN || !page_mapped(page))
1419 break;
1da177e4 1420 }
34bbd704
ON
1421
1422 page_unlock_anon_vma(anon_vma);
1da177e4
LT
1423 return ret;
1424}
1425
1426/**
b291f000
NP
1427 * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1428 * @page: the page to unmap/unlock
14fa31b8 1429 * @flags: action and flags
1da177e4
LT
1430 *
1431 * Find all the mappings of a page using the mapping pointer and the vma chains
1432 * contained in the address_space struct it points to.
1433 *
b291f000
NP
1434 * This function is only called from try_to_unmap/try_to_munlock for
1435 * object-based pages.
1436 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1437 * where the page was found will be held for write. So, we won't recheck
1438 * vm_flags for that VMA. That should be OK, because that vma shouldn't be
1439 * 'LOCKED.
1da177e4 1440 */
14fa31b8 1441static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1da177e4
LT
1442{
1443 struct address_space *mapping = page->mapping;
1444 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1445 struct vm_area_struct *vma;
1446 struct prio_tree_iter iter;
1447 int ret = SWAP_AGAIN;
1448 unsigned long cursor;
1449 unsigned long max_nl_cursor = 0;
1450 unsigned long max_nl_size = 0;
1451 unsigned int mapcount;
1452
3d48ae45 1453 mutex_lock(&mapping->i_mmap_mutex);
1da177e4 1454 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1cb1729b
HD
1455 unsigned long address = vma_address(page, vma);
1456 if (address == -EFAULT)
1457 continue;
1458 ret = try_to_unmap_one(page, vma, address, flags);
53f79acb
HD
1459 if (ret != SWAP_AGAIN || !page_mapped(page))
1460 goto out;
1da177e4
LT
1461 }
1462
1463 if (list_empty(&mapping->i_mmap_nonlinear))
1464 goto out;
1465
53f79acb
HD
1466 /*
1467 * We don't bother to try to find the munlocked page in nonlinears.
1468 * It's costly. Instead, later, page reclaim logic may call
1469 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
1470 */
1471 if (TTU_ACTION(flags) == TTU_MUNLOCK)
1472 goto out;
1473
1da177e4
LT
1474 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1475 shared.vm_set.list) {
1da177e4
LT
1476 cursor = (unsigned long) vma->vm_private_data;
1477 if (cursor > max_nl_cursor)
1478 max_nl_cursor = cursor;
1479 cursor = vma->vm_end - vma->vm_start;
1480 if (cursor > max_nl_size)
1481 max_nl_size = cursor;
1482 }
1483
b291f000 1484 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
1da177e4
LT
1485 ret = SWAP_FAIL;
1486 goto out;
1487 }
1488
1489 /*
1490 * We don't try to search for this page in the nonlinear vmas,
1491 * and page_referenced wouldn't have found it anyway. Instead
1492 * just walk the nonlinear vmas trying to age and unmap some.
1493 * The mapcount of the page we came in with is irrelevant,
1494 * but even so use it as a guide to how hard we should try?
1495 */
1496 mapcount = page_mapcount(page);
1497 if (!mapcount)
1498 goto out;
3d48ae45 1499 cond_resched();
1da177e4
LT
1500
1501 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1502 if (max_nl_cursor == 0)
1503 max_nl_cursor = CLUSTER_SIZE;
1504
1505 do {
1506 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1507 shared.vm_set.list) {
1da177e4 1508 cursor = (unsigned long) vma->vm_private_data;
839b9685 1509 while ( cursor < max_nl_cursor &&
1da177e4 1510 cursor < vma->vm_end - vma->vm_start) {
53f79acb
HD
1511 if (try_to_unmap_cluster(cursor, &mapcount,
1512 vma, page) == SWAP_MLOCK)
1513 ret = SWAP_MLOCK;
1da177e4
LT
1514 cursor += CLUSTER_SIZE;
1515 vma->vm_private_data = (void *) cursor;
1516 if ((int)mapcount <= 0)
1517 goto out;
1518 }
1519 vma->vm_private_data = (void *) max_nl_cursor;
1520 }
3d48ae45 1521 cond_resched();
1da177e4
LT
1522 max_nl_cursor += CLUSTER_SIZE;
1523 } while (max_nl_cursor <= max_nl_size);
1524
1525 /*
1526 * Don't loop forever (perhaps all the remaining pages are
1527 * in locked vmas). Reset cursor on all unreserved nonlinear
1528 * vmas, now forgetting on which ones it had fallen behind.
1529 */
101d2be7
HD
1530 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1531 vma->vm_private_data = NULL;
1da177e4 1532out:
3d48ae45 1533 mutex_unlock(&mapping->i_mmap_mutex);
1da177e4
LT
1534 return ret;
1535}
1536
1537/**
1538 * try_to_unmap - try to remove all page table mappings to a page
1539 * @page: the page to get unmapped
14fa31b8 1540 * @flags: action and flags
1da177e4
LT
1541 *
1542 * Tries to remove all the page table entries which are mapping this
1543 * page, used in the pageout path. Caller must hold the page lock.
1544 * Return values are:
1545 *
1546 * SWAP_SUCCESS - we succeeded in removing all mappings
1547 * SWAP_AGAIN - we missed a mapping, try again later
1548 * SWAP_FAIL - the page is unswappable
b291f000 1549 * SWAP_MLOCK - page is mlocked.
1da177e4 1550 */
14fa31b8 1551int try_to_unmap(struct page *page, enum ttu_flags flags)
1da177e4
LT
1552{
1553 int ret;
1554
1da177e4 1555 BUG_ON(!PageLocked(page));
91600e9e 1556 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1da177e4 1557
5ad64688
HD
1558 if (unlikely(PageKsm(page)))
1559 ret = try_to_unmap_ksm(page, flags);
1560 else if (PageAnon(page))
14fa31b8 1561 ret = try_to_unmap_anon(page, flags);
1da177e4 1562 else
14fa31b8 1563 ret = try_to_unmap_file(page, flags);
b291f000 1564 if (ret != SWAP_MLOCK && !page_mapped(page))
1da177e4
LT
1565 ret = SWAP_SUCCESS;
1566 return ret;
1567}
81b4082d 1568
b291f000
NP
1569/**
1570 * try_to_munlock - try to munlock a page
1571 * @page: the page to be munlocked
1572 *
1573 * Called from munlock code. Checks all of the VMAs mapping the page
1574 * to make sure nobody else has this page mlocked. The page will be
1575 * returned with PG_mlocked cleared if no other vmas have it mlocked.
1576 *
1577 * Return values are:
1578 *
53f79acb 1579 * SWAP_AGAIN - no vma is holding page mlocked, or,
b291f000 1580 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
5ad64688 1581 * SWAP_FAIL - page cannot be located at present
b291f000
NP
1582 * SWAP_MLOCK - page is now mlocked.
1583 */
1584int try_to_munlock(struct page *page)
1585{
1586 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1587
5ad64688
HD
1588 if (unlikely(PageKsm(page)))
1589 return try_to_unmap_ksm(page, TTU_MUNLOCK);
1590 else if (PageAnon(page))
14fa31b8 1591 return try_to_unmap_anon(page, TTU_MUNLOCK);
b291f000 1592 else
14fa31b8 1593 return try_to_unmap_file(page, TTU_MUNLOCK);
b291f000 1594}
e9995ef9 1595
01d8b20d 1596void __put_anon_vma(struct anon_vma *anon_vma)
76545066 1597{
01d8b20d 1598 struct anon_vma *root = anon_vma->root;
76545066 1599
01d8b20d
PZ
1600 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1601 anon_vma_free(root);
76545066 1602
01d8b20d 1603 anon_vma_free(anon_vma);
76545066 1604}
76545066 1605
e9995ef9
HD
1606#ifdef CONFIG_MIGRATION
1607/*
1608 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1609 * Called by migrate.c to remove migration ptes, but might be used more later.
1610 */
1611static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1612 struct vm_area_struct *, unsigned long, void *), void *arg)
1613{
1614 struct anon_vma *anon_vma;
5beb4930 1615 struct anon_vma_chain *avc;
e9995ef9
HD
1616 int ret = SWAP_AGAIN;
1617
1618 /*
1619 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1620 * because that depends on page_mapped(); but not all its usages
3f6c8272
MG
1621 * are holding mmap_sem. Users without mmap_sem are required to
1622 * take a reference count to prevent the anon_vma disappearing
e9995ef9
HD
1623 */
1624 anon_vma = page_anon_vma(page);
1625 if (!anon_vma)
1626 return ret;
cba48b98 1627 anon_vma_lock(anon_vma);
5beb4930
RR
1628 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1629 struct vm_area_struct *vma = avc->vma;
e9995ef9
HD
1630 unsigned long address = vma_address(page, vma);
1631 if (address == -EFAULT)
1632 continue;
1633 ret = rmap_one(page, vma, address, arg);
1634 if (ret != SWAP_AGAIN)
1635 break;
1636 }
cba48b98 1637 anon_vma_unlock(anon_vma);
e9995ef9
HD
1638 return ret;
1639}
1640
1641static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1642 struct vm_area_struct *, unsigned long, void *), void *arg)
1643{
1644 struct address_space *mapping = page->mapping;
1645 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1646 struct vm_area_struct *vma;
1647 struct prio_tree_iter iter;
1648 int ret = SWAP_AGAIN;
1649
1650 if (!mapping)
1651 return ret;
3d48ae45 1652 mutex_lock(&mapping->i_mmap_mutex);
e9995ef9
HD
1653 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1654 unsigned long address = vma_address(page, vma);
1655 if (address == -EFAULT)
1656 continue;
1657 ret = rmap_one(page, vma, address, arg);
1658 if (ret != SWAP_AGAIN)
1659 break;
1660 }
1661 /*
1662 * No nonlinear handling: being always shared, nonlinear vmas
1663 * never contain migration ptes. Decide what to do about this
1664 * limitation to linear when we need rmap_walk() on nonlinear.
1665 */
3d48ae45 1666 mutex_unlock(&mapping->i_mmap_mutex);
e9995ef9
HD
1667 return ret;
1668}
1669
1670int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1671 struct vm_area_struct *, unsigned long, void *), void *arg)
1672{
1673 VM_BUG_ON(!PageLocked(page));
1674
1675 if (unlikely(PageKsm(page)))
1676 return rmap_walk_ksm(page, rmap_one, arg);
1677 else if (PageAnon(page))
1678 return rmap_walk_anon(page, rmap_one, arg);
1679 else
1680 return rmap_walk_file(page, rmap_one, arg);
1681}
1682#endif /* CONFIG_MIGRATION */
0fe6e20b 1683
e3390f67 1684#ifdef CONFIG_HUGETLB_PAGE
0fe6e20b
NH
1685/*
1686 * The following three functions are for anonymous (private mapped) hugepages.
1687 * Unlike common anonymous pages, anonymous hugepages have no accounting code
1688 * and no lru code, because we handle hugepages differently from common pages.
1689 */
1690static void __hugepage_set_anon_rmap(struct page *page,
1691 struct vm_area_struct *vma, unsigned long address, int exclusive)
1692{
1693 struct anon_vma *anon_vma = vma->anon_vma;
433abed6 1694
0fe6e20b 1695 BUG_ON(!anon_vma);
433abed6
NH
1696
1697 if (PageAnon(page))
1698 return;
1699 if (!exclusive)
1700 anon_vma = anon_vma->root;
1701
0fe6e20b
NH
1702 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1703 page->mapping = (struct address_space *) anon_vma;
1704 page->index = linear_page_index(vma, address);
1705}
1706
1707void hugepage_add_anon_rmap(struct page *page,
1708 struct vm_area_struct *vma, unsigned long address)
1709{
1710 struct anon_vma *anon_vma = vma->anon_vma;
1711 int first;
a850ea30
NH
1712
1713 BUG_ON(!PageLocked(page));
0fe6e20b 1714 BUG_ON(!anon_vma);
5dbe0af4 1715 /* address might be in next vma when migration races vma_adjust */
0fe6e20b
NH
1716 first = atomic_inc_and_test(&page->_mapcount);
1717 if (first)
1718 __hugepage_set_anon_rmap(page, vma, address, 0);
1719}
1720
1721void hugepage_add_new_anon_rmap(struct page *page,
1722 struct vm_area_struct *vma, unsigned long address)
1723{
1724 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1725 atomic_set(&page->_mapcount, 0);
1726 __hugepage_set_anon_rmap(page, vma, address, 1);
1727}
e3390f67 1728#endif /* CONFIG_HUGETLB_PAGE */