Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-block.git] / mm / swap_state.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
1da177e4 10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4
LT
12#include <linux/kernel_stat.h>
13#include <linux/swap.h>
46017e95 14#include <linux/swapops.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/pagemap.h>
1da177e4 17#include <linux/backing-dev.h>
3fb5c298 18#include <linux/blkdev.h>
c484d410 19#include <linux/pagevec.h>
b20a3503 20#include <linux/migrate.h>
4b3ef9da 21#include <linux/vmalloc.h>
67afa38e 22#include <linux/swap_slots.h>
38d8b4e6 23#include <linux/huge_mm.h>
61ef1865 24#include <linux/shmem_fs.h>
243bce09 25#include "internal.h"
014bb1de 26#include "swap.h"
1da177e4
LT
27
28/*
29 * swapper_space is a fiction, retained to simplify the path through
7eaceacc 30 * vmscan's shrink_page_list.
1da177e4 31 */
f5e54d6e 32static const struct address_space_operations swap_aops = {
1da177e4 33 .writepage = swap_writepage,
4c4a7634 34 .dirty_folio = noop_dirty_folio,
1c93923c 35#ifdef CONFIG_MIGRATION
54184650 36 .migrate_folio = migrate_folio,
1c93923c 37#endif
1da177e4
LT
38};
39
783cb68e
CD
40struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
f5c754d6 42static bool enable_vma_readahead __read_mostly = true;
ec560175 43
ec560175
HY
44#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
45#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
46#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
47#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48
49#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
50#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
52
53#define SWAP_RA_VAL(addr, win, hits) \
54 (((addr) & PAGE_MASK) | \
55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
56 ((hits) & SWAP_RA_HITS_MASK))
57
58/* Initial readahead hits is 4 to start up with a small window */
59#define GET_SWAP_RA_VAL(vma) \
60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
1da177e4 61
579f8290
SL
62static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
63
1da177e4
LT
64void show_swap_cache_info(void)
65{
33806f06 66 printk("%lu pages in swap cache\n", total_swapcache_pages());
ec8acf20
SL
67 printk("Free swap = %ldkB\n",
68 get_nr_swap_pages() << (PAGE_SHIFT - 10));
1da177e4
LT
69 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
70}
71
aae466b0
JK
72void *get_shadow_from_swap_cache(swp_entry_t entry)
73{
74 struct address_space *address_space = swap_address_space(entry);
75 pgoff_t idx = swp_offset(entry);
76 struct page *page;
77
8c647dd1 78 page = xa_load(&address_space->i_pages, idx);
aae466b0
JK
79 if (xa_is_value(page))
80 return page;
aae466b0
JK
81 return NULL;
82}
83
1da177e4 84/*
2bb876b5 85 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
1da177e4
LT
86 * but sets SwapCache flag and private instead of mapping and index.
87 */
a4c366f0 88int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
3852f676 89 gfp_t gfp, void **shadowp)
1da177e4 90{
8d93b41c 91 struct address_space *address_space = swap_address_space(entry);
38d8b4e6 92 pgoff_t idx = swp_offset(entry);
a4c366f0
MWO
93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
94 unsigned long i, nr = folio_nr_pages(folio);
3852f676 95 void *old;
1da177e4 96
a4c366f0
MWO
97 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
51726b12 100
a4c366f0
MWO
101 folio_ref_add(folio, nr);
102 folio_set_swapcache(folio);
31a56396 103
8d93b41c
MW
104 do {
105 xas_lock_irq(&xas);
106 xas_create_range(&xas);
107 if (xas_error(&xas))
108 goto unlock;
109 for (i = 0; i < nr; i++) {
a4c366f0 110 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
3852f676
JK
111 old = xas_load(&xas);
112 if (xa_is_value(old)) {
3852f676
JK
113 if (shadowp)
114 *shadowp = old;
115 }
a4c366f0
MWO
116 set_page_private(folio_page(folio, i), entry.val + i);
117 xas_store(&xas, folio);
8d93b41c
MW
118 xas_next(&xas);
119 }
38d8b4e6 120 address_space->nrpages += nr;
a4c366f0
MWO
121 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
122 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
8d93b41c
MW
123unlock:
124 xas_unlock_irq(&xas);
125 } while (xas_nomem(&xas, gfp));
31a56396 126
8d93b41c
MW
127 if (!xas_error(&xas))
128 return 0;
31a56396 129
a4c366f0
MWO
130 folio_clear_swapcache(folio);
131 folio_ref_sub(folio, nr);
8d93b41c 132 return xas_error(&xas);
1da177e4
LT
133}
134
1da177e4 135/*
ceff9d33 136 * This must be called only on folios that have
1da177e4
LT
137 * been verified to be in the swap cache.
138 */
ceff9d33 139void __delete_from_swap_cache(struct folio *folio,
3852f676 140 swp_entry_t entry, void *shadow)
1da177e4 141{
4e17ec25 142 struct address_space *address_space = swap_address_space(entry);
ceff9d33
MWO
143 int i;
144 long nr = folio_nr_pages(folio);
4e17ec25
MW
145 pgoff_t idx = swp_offset(entry);
146 XA_STATE(xas, &address_space->i_pages, idx);
33806f06 147
ceff9d33
MWO
148 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
149 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
150 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
1da177e4 151
38d8b4e6 152 for (i = 0; i < nr; i++) {
3852f676 153 void *entry = xas_store(&xas, shadow);
b9eb7776 154 VM_BUG_ON_PAGE(entry != folio, entry);
ceff9d33 155 set_page_private(folio_page(folio, i), 0);
4e17ec25 156 xas_next(&xas);
38d8b4e6 157 }
ceff9d33 158 folio_clear_swapcache(folio);
38d8b4e6 159 address_space->nrpages -= nr;
ceff9d33
MWO
160 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
161 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
1da177e4
LT
162}
163
164/**
09c02e56
MWO
165 * add_to_swap - allocate swap space for a folio
166 * @folio: folio we want to move to swap
1da177e4 167 *
09c02e56
MWO
168 * Allocate swap space for the folio and add the folio to the
169 * swap cache.
170 *
171 * Context: Caller needs to hold the folio lock.
172 * Return: Whether the folio was added to the swap cache.
1da177e4 173 */
09c02e56 174bool add_to_swap(struct folio *folio)
1da177e4
LT
175{
176 swp_entry_t entry;
1da177e4
LT
177 int err;
178
09c02e56
MWO
179 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
180 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1da177e4 181
e2e3fdc7 182 entry = folio_alloc_swap(folio);
2ca4532a 183 if (!entry.val)
09c02e56 184 return false;
0f074658 185
2ca4532a 186 /*
8d93b41c 187 * XArray node allocations from PF_MEMALLOC contexts could
2ca4532a
DN
188 * completely exhaust the page allocator. __GFP_NOMEMALLOC
189 * stops emergency reserves from being allocated.
190 *
191 * TODO: this could cause a theoretical memory reclaim
192 * deadlock in the swap out path.
193 */
194 /*
854e9ed0 195 * Add it to the swap cache.
2ca4532a 196 */
a4c366f0 197 err = add_to_swap_cache(folio, entry,
3852f676 198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
38d8b4e6 199 if (err)
bd53b714 200 /*
2ca4532a
DN
201 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
202 * clear SWAP_HAS_CACHE flag.
1da177e4 203 */
0f074658 204 goto fail;
9625456c 205 /*
09c02e56
MWO
206 * Normally the folio will be dirtied in unmap because its
207 * pte should be dirty. A special case is MADV_FREE page. The
208 * page's pte could have dirty bit cleared but the folio's
209 * SwapBacked flag is still set because clearing the dirty bit
210 * and SwapBacked flag has no lock protected. For such folio,
211 * unmap will not set dirty bit for it, so folio reclaim will
212 * not write the folio out. This can cause data corruption when
213 * the folio is swapped in later. Always setting the dirty flag
214 * for the folio solves the problem.
9625456c 215 */
09c02e56 216 folio_mark_dirty(folio);
38d8b4e6 217
09c02e56 218 return true;
38d8b4e6 219
38d8b4e6 220fail:
4081f744 221 put_swap_folio(folio, entry);
09c02e56 222 return false;
1da177e4
LT
223}
224
225/*
75fa68a5 226 * This must be called only on folios that have
1da177e4 227 * been verified to be in the swap cache and locked.
75fa68a5
MWO
228 * It will never put the folio into the free list,
229 * the caller has a reference on the folio.
1da177e4 230 */
75fa68a5 231void delete_from_swap_cache(struct folio *folio)
1da177e4 232{
75fa68a5 233 swp_entry_t entry = folio_swap_entry(folio);
4e17ec25 234 struct address_space *address_space = swap_address_space(entry);
1da177e4 235
b93b0163 236 xa_lock_irq(&address_space->i_pages);
ceff9d33 237 __delete_from_swap_cache(folio, entry, NULL);
b93b0163 238 xa_unlock_irq(&address_space->i_pages);
1da177e4 239
4081f744 240 put_swap_folio(folio, entry);
75fa68a5 241 folio_ref_sub(folio, folio_nr_pages(folio));
1da177e4
LT
242}
243
3852f676
JK
244void clear_shadow_from_swap_cache(int type, unsigned long begin,
245 unsigned long end)
246{
247 unsigned long curr = begin;
248 void *old;
249
250 for (;;) {
3852f676
JK
251 swp_entry_t entry = swp_entry(type, curr);
252 struct address_space *address_space = swap_address_space(entry);
253 XA_STATE(xas, &address_space->i_pages, curr);
254
255 xa_lock_irq(&address_space->i_pages);
256 xas_for_each(&xas, old, end) {
257 if (!xa_is_value(old))
258 continue;
259 xas_store(&xas, NULL);
3852f676 260 }
3852f676
JK
261 xa_unlock_irq(&address_space->i_pages);
262
263 /* search the next swapcache until we meet end */
264 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
265 curr++;
266 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
267 if (curr > end)
268 break;
269 }
270}
271
1da177e4
LT
272/*
273 * If we are the only user, then try to free up the swap cache.
274 *
aedd74d4 275 * Its ok to check the swapcache flag without the folio lock
a2c43eed 276 * here because we are going to recheck again inside
aedd74d4 277 * folio_free_swap() _with_ the lock.
1da177e4
LT
278 * - Marcelo
279 */
f4c4a3f4 280void free_swap_cache(struct page *page)
1da177e4 281{
aedd74d4
MWO
282 struct folio *folio = page_folio(page);
283
284 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
285 folio_trylock(folio)) {
286 folio_free_swap(folio);
287 folio_unlock(folio);
1da177e4
LT
288 }
289}
290
291/*
292 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 293 * this page if it is the last user of the page.
1da177e4
LT
294 */
295void free_page_and_swap_cache(struct page *page)
296{
297 free_swap_cache(page);
6fcb52a5 298 if (!is_huge_zero_page(page))
770a5370 299 put_page(page);
1da177e4
LT
300}
301
302/*
303 * Passed an array of pages, drop them all from swapcache and then release
304 * them. They are removed from the LRU and freed if this is their last use.
305 */
306void free_pages_and_swap_cache(struct page **pages, int nr)
307{
1da177e4 308 struct page **pagep = pages;
aabfb572 309 int i;
1da177e4
LT
310
311 lru_add_drain();
aabfb572
MH
312 for (i = 0; i < nr; i++)
313 free_swap_cache(pagep[i]);
c6f92f9f 314 release_pages(pagep, nr);
1da177e4
LT
315}
316
e9e9b7ec
MK
317static inline bool swap_use_vma_readahead(void)
318{
319 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
320}
321
1da177e4 322/*
c9edc242 323 * Lookup a swap entry in the swap cache. A found folio will be returned
1da177e4 324 * unlocked and with its refcount incremented - we rely on the kernel
c9edc242 325 * lock getting page table operations atomic even if we drop the folio
1da177e4
LT
326 * lock before returning.
327 */
c9edc242
MWO
328struct folio *swap_cache_get_folio(swp_entry_t entry,
329 struct vm_area_struct *vma, unsigned long addr)
1da177e4 330{
c9edc242 331 struct folio *folio;
eb085574 332 struct swap_info_struct *si;
1da177e4 333
eb085574
HY
334 si = get_swap_device(entry);
335 if (!si)
336 return NULL;
c9edc242 337 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
eb085574 338 put_swap_device(si);
1da177e4 339
c9edc242 340 if (folio) {
eaf649eb
MK
341 bool vma_ra = swap_use_vma_readahead();
342 bool readahead;
343
eaf649eb
MK
344 /*
345 * At the moment, we don't support PG_readahead for anon THP
346 * so let's bail out rather than confusing the readahead stat.
347 */
c9edc242
MWO
348 if (unlikely(folio_test_large(folio)))
349 return folio;
eaf649eb 350
c9edc242 351 readahead = folio_test_clear_readahead(folio);
eaf649eb
MK
352 if (vma && vma_ra) {
353 unsigned long ra_val;
354 int win, hits;
355
356 ra_val = GET_SWAP_RA_VAL(vma);
357 win = SWAP_RA_WIN(ra_val);
358 hits = SWAP_RA_HITS(ra_val);
ec560175
HY
359 if (readahead)
360 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
361 atomic_long_set(&vma->swap_readahead_info,
362 SWAP_RA_VAL(addr, win, hits));
363 }
eaf649eb 364
ec560175 365 if (readahead) {
cbc65df2 366 count_vm_event(SWAP_RA_HIT);
eaf649eb 367 if (!vma || !vma_ra)
ec560175 368 atomic_inc(&swapin_readahead_hits);
cbc65df2 369 }
579f8290 370 }
eaf649eb 371
c9edc242
MWO
372 return folio;
373}
374
61ef1865
MWO
375/**
376 * find_get_incore_page - Find and get a page from the page or swap caches.
377 * @mapping: The address_space to search.
378 * @index: The page cache index.
379 *
380 * This differs from find_get_page() in that it will also look for the
381 * page in the swap cache.
382 *
383 * Return: The found page or %NULL.
384 */
385struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
386{
387 swp_entry_t swp;
388 struct swap_info_struct *si;
44835d20
MWO
389 struct page *page = pagecache_get_page(mapping, index,
390 FGP_ENTRY | FGP_HEAD, 0);
61ef1865 391
a6de4b48 392 if (!page)
61ef1865 393 return page;
a6de4b48
MWO
394 if (!xa_is_value(page))
395 return find_subpage(page, index);
61ef1865
MWO
396 if (!shmem_mapping(mapping))
397 return NULL;
398
399 swp = radix_to_swp_entry(page);
ba6851b4
ML
400 /* There might be swapin error entries in shmem mapping. */
401 if (non_swap_entry(swp))
402 return NULL;
61ef1865
MWO
403 /* Prevent swapoff from happening to us */
404 si = get_swap_device(swp);
405 if (!si)
406 return NULL;
407 page = find_get_page(swap_address_space(swp), swp_offset(swp));
408 put_swap_device(si);
409 return page;
410}
411
5b999aad
DS
412struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
413 struct vm_area_struct *vma, unsigned long addr,
414 bool *new_page_allocated)
1da177e4 415{
eb085574 416 struct swap_info_struct *si;
a0d3374b 417 struct folio *folio;
aae466b0 418 void *shadow = NULL;
4c6355b2 419
5b999aad 420 *new_page_allocated = false;
1da177e4 421
4c6355b2
JW
422 for (;;) {
423 int err;
1da177e4
LT
424 /*
425 * First check the swap cache. Since this is normally
cb691e2f 426 * called after swap_cache_get_folio() failed, re-calling
1da177e4
LT
427 * that would confuse statistics.
428 */
eb085574
HY
429 si = get_swap_device(entry);
430 if (!si)
4c6355b2 431 return NULL;
a0d3374b
MWO
432 folio = filemap_get_folio(swap_address_space(entry),
433 swp_offset(entry));
eb085574 434 put_swap_device(si);
a0d3374b
MWO
435 if (folio)
436 return folio_file_page(folio, swp_offset(entry));
1da177e4 437
ba81f838
HY
438 /*
439 * Just skip read ahead for unused swap slot.
440 * During swap_off when swap_slot_cache is disabled,
441 * we have to handle the race between putting
442 * swap entry in swap cache and marking swap slot
443 * as SWAP_HAS_CACHE. That's done in later part of code or
444 * else swap_off will be aborted if we return NULL.
445 */
446 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
4c6355b2 447 return NULL;
e8c26ab6 448
1da177e4 449 /*
4c6355b2
JW
450 * Get a new page to read into from swap. Allocate it now,
451 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
452 * cause any racers to loop around until we add it to cache.
1da177e4 453 */
a0d3374b
MWO
454 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
455 if (!folio)
4c6355b2 456 return NULL;
1da177e4 457
f000944d
HD
458 /*
459 * Swap entry may have been freed since our caller observed it.
460 */
355cfa73 461 err = swapcache_prepare(entry);
4c6355b2 462 if (!err)
f000944d
HD
463 break;
464
a0d3374b 465 folio_put(folio);
4c6355b2
JW
466 if (err != -EEXIST)
467 return NULL;
468
2ca4532a 469 /*
4c6355b2
JW
470 * We might race against __delete_from_swap_cache(), and
471 * stumble across a swap_map entry whose SWAP_HAS_CACHE
472 * has not yet been cleared. Or race against another
473 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
474 * in swap_map, but not yet added its page to swap cache.
2ca4532a 475 */
029c4628 476 schedule_timeout_uninterruptible(1);
4c6355b2
JW
477 }
478
479 /*
480 * The swap entry is ours to swap in. Prepare the new page.
481 */
482
a0d3374b
MWO
483 __folio_set_locked(folio);
484 __folio_set_swapbacked(folio);
4c6355b2 485
65995918 486 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
4c6355b2 487 goto fail_unlock;
4c6355b2 488
0add0c77 489 /* May fail (-ENOMEM) if XArray node allocation failed. */
a4c366f0 490 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
4c6355b2 491 goto fail_unlock;
0add0c77
SB
492
493 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 494
aae466b0 495 if (shadow)
a0d3374b 496 workingset_refault(folio, shadow);
314b57fb 497
a0d3374b
MWO
498 /* Caller will initiate read into locked folio */
499 folio_add_lru(folio);
4c6355b2 500 *new_page_allocated = true;
a0d3374b 501 return &folio->page;
1da177e4 502
4c6355b2 503fail_unlock:
4081f744 504 put_swap_folio(folio, entry);
a0d3374b
MWO
505 folio_unlock(folio);
506 folio_put(folio);
4c6355b2 507 return NULL;
1da177e4 508}
46017e95 509
5b999aad
DS
510/*
511 * Locate a page of swap in physical memory, reserving swap cache space
512 * and reading the disk if it is not already cached.
513 * A failure return means that either the page allocation failed or that
514 * the swap entry is no longer in use.
515 */
516struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
5169b844
N
517 struct vm_area_struct *vma,
518 unsigned long addr, bool do_poll,
519 struct swap_iocb **plug)
5b999aad
DS
520{
521 bool page_was_allocated;
522 struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
523 vma, addr, &page_was_allocated);
524
525 if (page_was_allocated)
5169b844 526 swap_readpage(retpage, do_poll, plug);
5b999aad
DS
527
528 return retpage;
529}
530
ec560175
HY
531static unsigned int __swapin_nr_pages(unsigned long prev_offset,
532 unsigned long offset,
533 int hits,
534 int max_pages,
535 int prev_win)
579f8290 536{
ec560175 537 unsigned int pages, last_ra;
579f8290
SL
538
539 /*
540 * This heuristic has been found to work well on both sequential and
541 * random loads, swapping to hard disk or to SSD: please don't ask
542 * what the "+ 2" means, it just happens to work well, that's all.
543 */
ec560175 544 pages = hits + 2;
579f8290
SL
545 if (pages == 2) {
546 /*
547 * We can have no readahead hits to judge by: but must not get
548 * stuck here forever, so check for an adjacent offset instead
549 * (and don't even bother to check whether swap type is same).
550 */
551 if (offset != prev_offset + 1 && offset != prev_offset - 1)
552 pages = 1;
579f8290
SL
553 } else {
554 unsigned int roundup = 4;
555 while (roundup < pages)
556 roundup <<= 1;
557 pages = roundup;
558 }
559
560 if (pages > max_pages)
561 pages = max_pages;
562
563 /* Don't shrink readahead too fast */
ec560175 564 last_ra = prev_win / 2;
579f8290
SL
565 if (pages < last_ra)
566 pages = last_ra;
ec560175
HY
567
568 return pages;
569}
570
571static unsigned long swapin_nr_pages(unsigned long offset)
572{
573 static unsigned long prev_offset;
574 unsigned int hits, pages, max_pages;
575 static atomic_t last_readahead_pages;
576
577 max_pages = 1 << READ_ONCE(page_cluster);
578 if (max_pages <= 1)
579 return 1;
580
581 hits = atomic_xchg(&swapin_readahead_hits, 0);
d6c1f098
QC
582 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
583 max_pages,
ec560175
HY
584 atomic_read(&last_readahead_pages));
585 if (!hits)
d6c1f098 586 WRITE_ONCE(prev_offset, offset);
579f8290
SL
587 atomic_set(&last_readahead_pages, pages);
588
589 return pages;
590}
591
46017e95 592/**
e9e9b7ec 593 * swap_cluster_readahead - swap in pages in hope we need them soon
46017e95 594 * @entry: swap entry of this memory
7682486b 595 * @gfp_mask: memory allocation flags
e9e9b7ec 596 * @vmf: fault information
46017e95
HD
597 *
598 * Returns the struct page for entry and addr, after queueing swapin.
599 *
600 * Primitive swap readahead code. We simply read an aligned block of
601 * (1 << page_cluster) entries in the swap area. This method is chosen
602 * because it doesn't cost us any seek time. We also make sure to queue
603 * the 'original' request together with the readahead ones...
604 *
605 * This has been extended to use the NUMA policies from the mm triggering
606 * the readahead.
607 *
c1e8d7c6 608 * Caller must hold read mmap_lock if vmf->vma is not NULL.
46017e95 609 */
e9e9b7ec
MK
610struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
611 struct vm_fault *vmf)
46017e95 612{
46017e95 613 struct page *page;
579f8290
SL
614 unsigned long entry_offset = swp_offset(entry);
615 unsigned long offset = entry_offset;
67f96aa2 616 unsigned long start_offset, end_offset;
579f8290 617 unsigned long mask;
e9a6effa 618 struct swap_info_struct *si = swp_swap_info(entry);
3fb5c298 619 struct blk_plug plug;
5169b844 620 struct swap_iocb *splug = NULL;
c4fa6309 621 bool do_poll = true, page_allocated;
e9e9b7ec
MK
622 struct vm_area_struct *vma = vmf->vma;
623 unsigned long addr = vmf->address;
46017e95 624
579f8290
SL
625 mask = swapin_nr_pages(offset) - 1;
626 if (!mask)
627 goto skip;
628
23955622 629 do_poll = false;
67f96aa2
RR
630 /* Read a page_cluster sized and aligned cluster around offset. */
631 start_offset = offset & ~mask;
632 end_offset = offset | mask;
633 if (!start_offset) /* First page is swap header. */
634 start_offset++;
e9a6effa
HY
635 if (end_offset >= si->max)
636 end_offset = si->max - 1;
67f96aa2 637
3fb5c298 638 blk_start_plug(&plug);
67f96aa2 639 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95 640 /* Ok, do the async read-ahead now */
c4fa6309
HY
641 page = __read_swap_cache_async(
642 swp_entry(swp_type(entry), offset),
643 gfp_mask, vma, addr, &page_allocated);
46017e95 644 if (!page)
67f96aa2 645 continue;
c4fa6309 646 if (page_allocated) {
5169b844 647 swap_readpage(page, false, &splug);
eaf649eb 648 if (offset != entry_offset) {
c4fa6309
HY
649 SetPageReadahead(page);
650 count_vm_event(SWAP_RA);
651 }
cbc65df2 652 }
09cbfeaf 653 put_page(page);
46017e95 654 }
3fb5c298 655 blk_finish_plug(&plug);
5169b844 656 swap_read_unplug(splug);
3fb5c298 657
46017e95 658 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 659skip:
5169b844
N
660 /* The page was likely read above, so no need for plugging here */
661 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
46017e95 662}
4b3ef9da
HY
663
664int init_swap_address_space(unsigned int type, unsigned long nr_pages)
665{
666 struct address_space *spaces, *space;
667 unsigned int i, nr;
668
669 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
778e1cdd 670 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
4b3ef9da
HY
671 if (!spaces)
672 return -ENOMEM;
673 for (i = 0; i < nr; i++) {
674 space = spaces + i;
a2833486 675 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
4b3ef9da
HY
676 atomic_set(&space->i_mmap_writable, 0);
677 space->a_ops = &swap_aops;
678 /* swap cache doesn't use writeback related tags */
679 mapping_set_no_writeback_tags(space);
4b3ef9da
HY
680 }
681 nr_swapper_spaces[type] = nr;
054f1d1f 682 swapper_spaces[type] = spaces;
4b3ef9da
HY
683
684 return 0;
685}
686
687void exit_swap_address_space(unsigned int type)
688{
eea4a501
HY
689 int i;
690 struct address_space *spaces = swapper_spaces[type];
691
692 for (i = 0; i < nr_swapper_spaces[type]; i++)
693 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
694 kvfree(spaces);
4b3ef9da 695 nr_swapper_spaces[type] = 0;
054f1d1f 696 swapper_spaces[type] = NULL;
4b3ef9da 697}
ec560175
HY
698
699static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
700 unsigned long faddr,
701 unsigned long lpfn,
702 unsigned long rpfn,
703 unsigned long *start,
704 unsigned long *end)
705{
706 *start = max3(lpfn, PFN_DOWN(vma->vm_start),
707 PFN_DOWN(faddr & PMD_MASK));
708 *end = min3(rpfn, PFN_DOWN(vma->vm_end),
709 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
710}
711
eaf649eb
MK
712static void swap_ra_info(struct vm_fault *vmf,
713 struct vma_swap_readahead *ra_info)
ec560175
HY
714{
715 struct vm_area_struct *vma = vmf->vma;
eaf649eb 716 unsigned long ra_val;
ec560175
HY
717 unsigned long faddr, pfn, fpfn;
718 unsigned long start, end;
eaf649eb 719 pte_t *pte, *orig_pte;
ec560175
HY
720 unsigned int max_win, hits, prev_win, win, left;
721#ifndef CONFIG_64BIT
722 pte_t *tpte;
723#endif
724
61b63972
HY
725 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
726 SWAP_RA_ORDER_CEILING);
727 if (max_win == 1) {
eaf649eb
MK
728 ra_info->win = 1;
729 return;
61b63972
HY
730 }
731
ec560175 732 faddr = vmf->address;
eaf649eb 733 orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
ec560175 734
ec560175 735 fpfn = PFN_DOWN(faddr);
eaf649eb
MK
736 ra_val = GET_SWAP_RA_VAL(vma);
737 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
738 prev_win = SWAP_RA_WIN(ra_val);
739 hits = SWAP_RA_HITS(ra_val);
740 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
ec560175
HY
741 max_win, prev_win);
742 atomic_long_set(&vma->swap_readahead_info,
743 SWAP_RA_VAL(faddr, win, 0));
744
eaf649eb
MK
745 if (win == 1) {
746 pte_unmap(orig_pte);
747 return;
748 }
ec560175
HY
749
750 /* Copy the PTEs because the page table may be unmapped */
751 if (fpfn == pfn + 1)
752 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
753 else if (pfn == fpfn + 1)
754 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
755 &start, &end);
756 else {
757 left = (win - 1) / 2;
758 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
759 &start, &end);
760 }
eaf649eb
MK
761 ra_info->nr_pte = end - start;
762 ra_info->offset = fpfn - start;
763 pte -= ra_info->offset;
ec560175 764#ifdef CONFIG_64BIT
eaf649eb 765 ra_info->ptes = pte;
ec560175 766#else
eaf649eb 767 tpte = ra_info->ptes;
ec560175
HY
768 for (pfn = start; pfn != end; pfn++)
769 *tpte++ = *pte++;
770#endif
eaf649eb 771 pte_unmap(orig_pte);
ec560175
HY
772}
773
e9f59873
YS
774/**
775 * swap_vma_readahead - swap in pages in hope we need them soon
27ec4878 776 * @fentry: swap entry of this memory
e9f59873
YS
777 * @gfp_mask: memory allocation flags
778 * @vmf: fault information
779 *
780 * Returns the struct page for entry and addr, after queueing swapin.
781 *
cb152a1a 782 * Primitive swap readahead code. We simply read in a few pages whose
e9f59873
YS
783 * virtual addresses are around the fault address in the same vma.
784 *
c1e8d7c6 785 * Caller must hold read mmap_lock if vmf->vma is not NULL.
e9f59873
YS
786 *
787 */
f5c754d6
CIK
788static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
789 struct vm_fault *vmf)
ec560175
HY
790{
791 struct blk_plug plug;
5169b844 792 struct swap_iocb *splug = NULL;
ec560175
HY
793 struct vm_area_struct *vma = vmf->vma;
794 struct page *page;
795 pte_t *pte, pentry;
796 swp_entry_t entry;
797 unsigned int i;
798 bool page_allocated;
e97af699
ML
799 struct vma_swap_readahead ra_info = {
800 .win = 1,
801 };
ec560175 802
eaf649eb
MK
803 swap_ra_info(vmf, &ra_info);
804 if (ra_info.win == 1)
ec560175
HY
805 goto skip;
806
807 blk_start_plug(&plug);
eaf649eb 808 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
ec560175
HY
809 i++, pte++) {
810 pentry = *pte;
92bafb20 811 if (!is_swap_pte(pentry))
ec560175
HY
812 continue;
813 entry = pte_to_swp_entry(pentry);
814 if (unlikely(non_swap_entry(entry)))
815 continue;
816 page = __read_swap_cache_async(entry, gfp_mask, vma,
817 vmf->address, &page_allocated);
818 if (!page)
819 continue;
820 if (page_allocated) {
5169b844 821 swap_readpage(page, false, &splug);
eaf649eb 822 if (i != ra_info.offset) {
ec560175
HY
823 SetPageReadahead(page);
824 count_vm_event(SWAP_RA);
825 }
826 }
827 put_page(page);
828 }
829 blk_finish_plug(&plug);
5169b844 830 swap_read_unplug(splug);
ec560175
HY
831 lru_add_drain();
832skip:
5169b844 833 /* The page was likely read above, so no need for plugging here */
ec560175 834 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
5169b844 835 ra_info.win == 1, NULL);
ec560175 836}
d9bfcfdc 837
e9e9b7ec
MK
838/**
839 * swapin_readahead - swap in pages in hope we need them soon
840 * @entry: swap entry of this memory
841 * @gfp_mask: memory allocation flags
842 * @vmf: fault information
843 *
844 * Returns the struct page for entry and addr, after queueing swapin.
845 *
846 * It's a main entry function for swap readahead. By the configuration,
847 * it will read ahead blocks by cluster-based(ie, physical disk based)
848 * or vma-based(ie, virtual address based on faulty address) readahead.
849 */
850struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
851 struct vm_fault *vmf)
852{
853 return swap_use_vma_readahead() ?
854 swap_vma_readahead(entry, gfp_mask, vmf) :
855 swap_cluster_readahead(entry, gfp_mask, vmf);
856}
857
d9bfcfdc
HY
858#ifdef CONFIG_SYSFS
859static ssize_t vma_ra_enabled_show(struct kobject *kobj,
860 struct kobj_attribute *attr, char *buf)
861{
ae7a927d
JP
862 return sysfs_emit(buf, "%s\n",
863 enable_vma_readahead ? "true" : "false");
d9bfcfdc
HY
864}
865static ssize_t vma_ra_enabled_store(struct kobject *kobj,
866 struct kobj_attribute *attr,
867 const char *buf, size_t count)
868{
717aeab4
JG
869 ssize_t ret;
870
871 ret = kstrtobool(buf, &enable_vma_readahead);
872 if (ret)
873 return ret;
d9bfcfdc
HY
874
875 return count;
876}
6106b93e 877static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
d9bfcfdc 878
d9bfcfdc
HY
879static struct attribute *swap_attrs[] = {
880 &vma_ra_enabled_attr.attr,
d9bfcfdc
HY
881 NULL,
882};
883
e48333b6 884static const struct attribute_group swap_attr_group = {
d9bfcfdc
HY
885 .attrs = swap_attrs,
886};
887
888static int __init swap_init_sysfs(void)
889{
890 int err;
891 struct kobject *swap_kobj;
892
893 swap_kobj = kobject_create_and_add("swap", mm_kobj);
894 if (!swap_kobj) {
895 pr_err("failed to create swap kobject\n");
896 return -ENOMEM;
897 }
898 err = sysfs_create_group(swap_kobj, &swap_attr_group);
899 if (err) {
900 pr_err("failed to register swap group\n");
901 goto delete_obj;
902 }
903 return 0;
904
905delete_obj:
906 kobject_put(swap_kobj);
907 return err;
908}
909subsys_initcall(swap_init_sysfs);
910#endif