mm: FOLL flags for GUP flags
[linux-block.git] / mm / swap_state.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9#include <linux/module.h>
10#include <linux/mm.h>
11#include <linux/kernel_stat.h>
12#include <linux/swap.h>
46017e95 13#include <linux/swapops.h>
1da177e4
LT
14#include <linux/init.h>
15#include <linux/pagemap.h>
16#include <linux/buffer_head.h>
17#include <linux/backing-dev.h>
c484d410 18#include <linux/pagevec.h>
b20a3503 19#include <linux/migrate.h>
8c7c6e34 20#include <linux/page_cgroup.h>
1da177e4
LT
21
22#include <asm/pgtable.h>
23
24/*
25 * swapper_space is a fiction, retained to simplify the path through
2706a1b8 26 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
1da177e4
LT
27 * future use of radix_tree tags in the swap cache.
28 */
f5e54d6e 29static const struct address_space_operations swap_aops = {
1da177e4
LT
30 .writepage = swap_writepage,
31 .sync_page = block_sync_page,
32 .set_page_dirty = __set_page_dirty_nobuffers,
e965f963 33 .migratepage = migrate_page,
1da177e4
LT
34};
35
36static struct backing_dev_info swap_backing_dev_info = {
d993831f 37 .name = "swap",
4f98a2fe 38 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
39 .unplug_io_fn = swap_unplug_io_fn,
40};
41
42struct address_space swapper_space = {
43 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
19fd6231 44 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
1da177e4
LT
45 .a_ops = &swap_aops,
46 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
47 .backing_dev_info = &swap_backing_dev_info,
48};
1da177e4
LT
49
50#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
51
52static struct {
53 unsigned long add_total;
54 unsigned long del_total;
55 unsigned long find_success;
56 unsigned long find_total;
1da177e4
LT
57} swap_cache_info;
58
59void show_swap_cache_info(void)
60{
2c97b7fc
JW
61 printk("%lu pages in swap cache\n", total_swapcache_pages);
62 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
1da177e4 63 swap_cache_info.add_total, swap_cache_info.del_total,
bb63be0a 64 swap_cache_info.find_success, swap_cache_info.find_total);
07279cdf 65 printk("Free swap = %ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
1da177e4
LT
66 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
67}
68
69/*
31a56396 70 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1da177e4
LT
71 * but sets SwapCache flag and private instead of mapping and index.
72 */
31a56396 73static int __add_to_swap_cache(struct page *page, swp_entry_t entry)
1da177e4
LT
74{
75 int error;
76
51726b12
HD
77 VM_BUG_ON(!PageLocked(page));
78 VM_BUG_ON(PageSwapCache(page));
79 VM_BUG_ON(!PageSwapBacked(page));
80
31a56396
DN
81 page_cache_get(page);
82 SetPageSwapCache(page);
83 set_page_private(page, entry.val);
84
85 spin_lock_irq(&swapper_space.tree_lock);
86 error = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
87 if (likely(!error)) {
88 total_swapcache_pages++;
89 __inc_zone_page_state(page, NR_FILE_PAGES);
90 INC_CACHE_INFO(add_total);
91 }
92 spin_unlock_irq(&swapper_space.tree_lock);
93
94 if (unlikely(error)) {
2ca4532a
DN
95 /*
96 * Only the context which have set SWAP_HAS_CACHE flag
97 * would call add_to_swap_cache().
98 * So add_to_swap_cache() doesn't returns -EEXIST.
99 */
100 VM_BUG_ON(error == -EEXIST);
31a56396
DN
101 set_page_private(page, 0UL);
102 ClearPageSwapCache(page);
103 page_cache_release(page);
104 }
105
106 return error;
107}
108
109
110int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
111{
112 int error;
113
35c754d7
BS
114 error = radix_tree_preload(gfp_mask);
115 if (!error) {
31a56396 116 error = __add_to_swap_cache(page, entry);
1da177e4 117 radix_tree_preload_end();
fa1de900 118 }
1da177e4
LT
119 return error;
120}
121
1da177e4
LT
122/*
123 * This must be called only on pages that have
124 * been verified to be in the swap cache.
125 */
126void __delete_from_swap_cache(struct page *page)
127{
51726b12
HD
128 VM_BUG_ON(!PageLocked(page));
129 VM_BUG_ON(!PageSwapCache(page));
130 VM_BUG_ON(PageWriteback(page));
1da177e4 131
4c21e2f2
HD
132 radix_tree_delete(&swapper_space.page_tree, page_private(page));
133 set_page_private(page, 0);
1da177e4
LT
134 ClearPageSwapCache(page);
135 total_swapcache_pages--;
347ce434 136 __dec_zone_page_state(page, NR_FILE_PAGES);
1da177e4
LT
137 INC_CACHE_INFO(del_total);
138}
139
140/**
141 * add_to_swap - allocate swap space for a page
142 * @page: page we want to move to swap
143 *
144 * Allocate swap space for the page and add the page to the
145 * swap cache. Caller needs to hold the page lock.
146 */
ac47b003 147int add_to_swap(struct page *page)
1da177e4
LT
148{
149 swp_entry_t entry;
1da177e4
LT
150 int err;
151
51726b12
HD
152 VM_BUG_ON(!PageLocked(page));
153 VM_BUG_ON(!PageUptodate(page));
1da177e4 154
2ca4532a
DN
155 entry = get_swap_page();
156 if (!entry.val)
157 return 0;
158
159 /*
160 * Radix-tree node allocations from PF_MEMALLOC contexts could
161 * completely exhaust the page allocator. __GFP_NOMEMALLOC
162 * stops emergency reserves from being allocated.
163 *
164 * TODO: this could cause a theoretical memory reclaim
165 * deadlock in the swap out path.
166 */
167 /*
168 * Add it to the swap cache and mark it dirty
169 */
170 err = add_to_swap_cache(page, entry,
171 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
172
173 if (!err) { /* Success */
174 SetPageDirty(page);
175 return 1;
176 } else { /* -ENOMEM radix-tree allocation failure */
bd53b714 177 /*
2ca4532a
DN
178 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
179 * clear SWAP_HAS_CACHE flag.
1da177e4 180 */
2ca4532a
DN
181 swapcache_free(entry, NULL);
182 return 0;
1da177e4
LT
183 }
184}
185
186/*
187 * This must be called only on pages that have
188 * been verified to be in the swap cache and locked.
189 * It will never put the page into the free list,
190 * the caller has a reference on the page.
191 */
192void delete_from_swap_cache(struct page *page)
193{
194 swp_entry_t entry;
195
4c21e2f2 196 entry.val = page_private(page);
1da177e4 197
19fd6231 198 spin_lock_irq(&swapper_space.tree_lock);
1da177e4 199 __delete_from_swap_cache(page);
19fd6231 200 spin_unlock_irq(&swapper_space.tree_lock);
1da177e4 201
cb4b86ba 202 swapcache_free(entry, page);
1da177e4
LT
203 page_cache_release(page);
204}
205
1da177e4
LT
206/*
207 * If we are the only user, then try to free up the swap cache.
208 *
209 * Its ok to check for PageSwapCache without the page lock
a2c43eed
HD
210 * here because we are going to recheck again inside
211 * try_to_free_swap() _with_ the lock.
1da177e4
LT
212 * - Marcelo
213 */
214static inline void free_swap_cache(struct page *page)
215{
a2c43eed
HD
216 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
217 try_to_free_swap(page);
1da177e4
LT
218 unlock_page(page);
219 }
220}
221
222/*
223 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 224 * this page if it is the last user of the page.
1da177e4
LT
225 */
226void free_page_and_swap_cache(struct page *page)
227{
228 free_swap_cache(page);
229 page_cache_release(page);
230}
231
232/*
233 * Passed an array of pages, drop them all from swapcache and then release
234 * them. They are removed from the LRU and freed if this is their last use.
235 */
236void free_pages_and_swap_cache(struct page **pages, int nr)
237{
1da177e4
LT
238 struct page **pagep = pages;
239
240 lru_add_drain();
241 while (nr) {
c484d410 242 int todo = min(nr, PAGEVEC_SIZE);
1da177e4
LT
243 int i;
244
245 for (i = 0; i < todo; i++)
246 free_swap_cache(pagep[i]);
247 release_pages(pagep, todo, 0);
248 pagep += todo;
249 nr -= todo;
250 }
251}
252
253/*
254 * Lookup a swap entry in the swap cache. A found page will be returned
255 * unlocked and with its refcount incremented - we rely on the kernel
256 * lock getting page table operations atomic even if we drop the page
257 * lock before returning.
258 */
259struct page * lookup_swap_cache(swp_entry_t entry)
260{
261 struct page *page;
262
263 page = find_get_page(&swapper_space, entry.val);
264
265 if (page)
266 INC_CACHE_INFO(find_success);
267
268 INC_CACHE_INFO(find_total);
269 return page;
270}
271
272/*
273 * Locate a page of swap in physical memory, reserving swap cache space
274 * and reading the disk if it is not already cached.
275 * A failure return means that either the page allocation failed or that
276 * the swap entry is no longer in use.
277 */
02098fea 278struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
1da177e4
LT
279 struct vm_area_struct *vma, unsigned long addr)
280{
281 struct page *found_page, *new_page = NULL;
282 int err;
283
284 do {
285 /*
286 * First check the swap cache. Since this is normally
287 * called after lookup_swap_cache() failed, re-calling
288 * that would confuse statistics.
289 */
290 found_page = find_get_page(&swapper_space, entry.val);
291 if (found_page)
292 break;
293
294 /*
295 * Get a new page to read into from swap.
296 */
297 if (!new_page) {
02098fea 298 new_page = alloc_page_vma(gfp_mask, vma, addr);
1da177e4
LT
299 if (!new_page)
300 break; /* Out of memory */
301 }
302
31a56396
DN
303 /*
304 * call radix_tree_preload() while we can wait.
305 */
306 err = radix_tree_preload(gfp_mask & GFP_KERNEL);
307 if (err)
308 break;
309
f000944d
HD
310 /*
311 * Swap entry may have been freed since our caller observed it.
312 */
355cfa73 313 err = swapcache_prepare(entry);
31a56396
DN
314 if (err == -EEXIST) { /* seems racy */
315 radix_tree_preload_end();
355cfa73 316 continue;
31a56396
DN
317 }
318 if (err) { /* swp entry is obsolete ? */
319 radix_tree_preload_end();
f000944d 320 break;
31a56396 321 }
f000944d 322
2ca4532a 323 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
f45840b5 324 __set_page_locked(new_page);
b2e18538 325 SetPageSwapBacked(new_page);
31a56396 326 err = __add_to_swap_cache(new_page, entry);
529ae9aa 327 if (likely(!err)) {
31a56396 328 radix_tree_preload_end();
1da177e4
LT
329 /*
330 * Initiate read into locked page and return.
331 */
c5fdae46 332 lru_cache_add_anon(new_page);
aca8bf32 333 swap_readpage(new_page);
1da177e4
LT
334 return new_page;
335 }
31a56396 336 radix_tree_preload_end();
b2e18538 337 ClearPageSwapBacked(new_page);
f45840b5 338 __clear_page_locked(new_page);
2ca4532a
DN
339 /*
340 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
341 * clear SWAP_HAS_CACHE flag.
342 */
cb4b86ba 343 swapcache_free(entry, NULL);
f000944d 344 } while (err != -ENOMEM);
1da177e4
LT
345
346 if (new_page)
347 page_cache_release(new_page);
348 return found_page;
349}
46017e95
HD
350
351/**
352 * swapin_readahead - swap in pages in hope we need them soon
353 * @entry: swap entry of this memory
7682486b 354 * @gfp_mask: memory allocation flags
46017e95
HD
355 * @vma: user vma this address belongs to
356 * @addr: target address for mempolicy
357 *
358 * Returns the struct page for entry and addr, after queueing swapin.
359 *
360 * Primitive swap readahead code. We simply read an aligned block of
361 * (1 << page_cluster) entries in the swap area. This method is chosen
362 * because it doesn't cost us any seek time. We also make sure to queue
363 * the 'original' request together with the readahead ones...
364 *
365 * This has been extended to use the NUMA policies from the mm triggering
366 * the readahead.
367 *
368 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
369 */
02098fea 370struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
46017e95
HD
371 struct vm_area_struct *vma, unsigned long addr)
372{
373 int nr_pages;
374 struct page *page;
375 unsigned long offset;
376 unsigned long end_offset;
377
378 /*
379 * Get starting offset for readaround, and number of pages to read.
380 * Adjust starting address by readbehind (for NUMA interleave case)?
381 * No, it's very unlikely that swap layout would follow vma layout,
382 * more likely that neighbouring swap pages came from the same node:
383 * so use the same "addr" to choose the same node for each swap read.
384 */
385 nr_pages = valid_swaphandles(entry, &offset);
386 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
387 /* Ok, do the async read-ahead now */
388 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
02098fea 389 gfp_mask, vma, addr);
46017e95
HD
390 if (!page)
391 break;
392 page_cache_release(page);
393 }
394 lru_add_drain(); /* Push any new pages onto the LRU now */
02098fea 395 return read_swap_cache_async(entry, gfp_mask, vma, addr);
46017e95 396}