Merge tag 'pmdomain-v6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh...
[linux-2.6-block.git] / mm / swap_state.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
1da177e4 10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4 12#include <linux/kernel_stat.h>
ddc1a5cb 13#include <linux/mempolicy.h>
1da177e4 14#include <linux/swap.h>
46017e95 15#include <linux/swapops.h>
1da177e4
LT
16#include <linux/init.h>
17#include <linux/pagemap.h>
4907e80b 18#include <linux/pagevec.h>
1da177e4 19#include <linux/backing-dev.h>
3fb5c298 20#include <linux/blkdev.h>
b20a3503 21#include <linux/migrate.h>
4b3ef9da 22#include <linux/vmalloc.h>
67afa38e 23#include <linux/swap_slots.h>
38d8b4e6 24#include <linux/huge_mm.h>
61ef1865 25#include <linux/shmem_fs.h>
243bce09 26#include "internal.h"
014bb1de 27#include "swap.h"
1da177e4
LT
28
29/*
30 * swapper_space is a fiction, retained to simplify the path through
7eaceacc 31 * vmscan's shrink_page_list.
1da177e4 32 */
f5e54d6e 33static const struct address_space_operations swap_aops = {
1da177e4 34 .writepage = swap_writepage,
4c4a7634 35 .dirty_folio = noop_dirty_folio,
1c93923c 36#ifdef CONFIG_MIGRATION
54184650 37 .migrate_folio = migrate_folio,
1c93923c 38#endif
1da177e4
LT
39};
40
783cb68e
CD
41struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
42static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
f5c754d6 43static bool enable_vma_readahead __read_mostly = true;
ec560175 44
ec560175
HY
45#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
46#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
47#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
48#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
49
50#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
51#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
52#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
53
54#define SWAP_RA_VAL(addr, win, hits) \
55 (((addr) & PAGE_MASK) | \
56 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
57 ((hits) & SWAP_RA_HITS_MASK))
58
59/* Initial readahead hits is 4 to start up with a small window */
60#define GET_SWAP_RA_VAL(vma) \
61 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
1da177e4 62
579f8290
SL
63static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
64
1da177e4
LT
65void show_swap_cache_info(void)
66{
33806f06 67 printk("%lu pages in swap cache\n", total_swapcache_pages());
3cb8eaa4
Z
68 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
69 printk("Total swap = %lukB\n", K(total_swap_pages));
1da177e4
LT
70}
71
aae466b0
JK
72void *get_shadow_from_swap_cache(swp_entry_t entry)
73{
74 struct address_space *address_space = swap_address_space(entry);
75 pgoff_t idx = swp_offset(entry);
4c773a44 76 void *shadow;
aae466b0 77
4c773a44
MWO
78 shadow = xa_load(&address_space->i_pages, idx);
79 if (xa_is_value(shadow))
80 return shadow;
aae466b0
JK
81 return NULL;
82}
83
1da177e4 84/*
2bb876b5 85 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
1da177e4
LT
86 * but sets SwapCache flag and private instead of mapping and index.
87 */
a4c366f0 88int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
3852f676 89 gfp_t gfp, void **shadowp)
1da177e4 90{
8d93b41c 91 struct address_space *address_space = swap_address_space(entry);
38d8b4e6 92 pgoff_t idx = swp_offset(entry);
a4c366f0
MWO
93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
94 unsigned long i, nr = folio_nr_pages(folio);
3852f676 95 void *old;
1da177e4 96
5649d113
YY
97 xas_set_update(&xas, workingset_update_node);
98
a4c366f0
MWO
99 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
100 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
101 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
51726b12 102
a4c366f0
MWO
103 folio_ref_add(folio, nr);
104 folio_set_swapcache(folio);
3d2c9087 105 folio->swap = entry;
31a56396 106
8d93b41c
MW
107 do {
108 xas_lock_irq(&xas);
109 xas_create_range(&xas);
110 if (xas_error(&xas))
111 goto unlock;
112 for (i = 0; i < nr; i++) {
a4c366f0 113 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
e5b306a0
KS
114 if (shadowp) {
115 old = xas_load(&xas);
116 if (xa_is_value(old))
3852f676
JK
117 *shadowp = old;
118 }
a4c366f0 119 xas_store(&xas, folio);
8d93b41c
MW
120 xas_next(&xas);
121 }
38d8b4e6 122 address_space->nrpages += nr;
a4c366f0
MWO
123 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
124 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
8d93b41c
MW
125unlock:
126 xas_unlock_irq(&xas);
127 } while (xas_nomem(&xas, gfp));
31a56396 128
8d93b41c
MW
129 if (!xas_error(&xas))
130 return 0;
31a56396 131
a4c366f0
MWO
132 folio_clear_swapcache(folio);
133 folio_ref_sub(folio, nr);
8d93b41c 134 return xas_error(&xas);
1da177e4
LT
135}
136
1da177e4 137/*
ceff9d33 138 * This must be called only on folios that have
1da177e4
LT
139 * been verified to be in the swap cache.
140 */
ceff9d33 141void __delete_from_swap_cache(struct folio *folio,
3852f676 142 swp_entry_t entry, void *shadow)
1da177e4 143{
4e17ec25 144 struct address_space *address_space = swap_address_space(entry);
ceff9d33
MWO
145 int i;
146 long nr = folio_nr_pages(folio);
4e17ec25
MW
147 pgoff_t idx = swp_offset(entry);
148 XA_STATE(xas, &address_space->i_pages, idx);
33806f06 149
5649d113
YY
150 xas_set_update(&xas, workingset_update_node);
151
ceff9d33
MWO
152 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
153 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
154 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
1da177e4 155
38d8b4e6 156 for (i = 0; i < nr; i++) {
3852f676 157 void *entry = xas_store(&xas, shadow);
b9eb7776 158 VM_BUG_ON_PAGE(entry != folio, entry);
4e17ec25 159 xas_next(&xas);
38d8b4e6 160 }
3d2c9087 161 folio->swap.val = 0;
ceff9d33 162 folio_clear_swapcache(folio);
38d8b4e6 163 address_space->nrpages -= nr;
ceff9d33
MWO
164 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
165 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
1da177e4
LT
166}
167
168/**
09c02e56
MWO
169 * add_to_swap - allocate swap space for a folio
170 * @folio: folio we want to move to swap
1da177e4 171 *
09c02e56
MWO
172 * Allocate swap space for the folio and add the folio to the
173 * swap cache.
174 *
175 * Context: Caller needs to hold the folio lock.
176 * Return: Whether the folio was added to the swap cache.
1da177e4 177 */
09c02e56 178bool add_to_swap(struct folio *folio)
1da177e4
LT
179{
180 swp_entry_t entry;
1da177e4
LT
181 int err;
182
09c02e56
MWO
183 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
184 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1da177e4 185
e2e3fdc7 186 entry = folio_alloc_swap(folio);
2ca4532a 187 if (!entry.val)
09c02e56 188 return false;
0f074658 189
2ca4532a 190 /*
8d93b41c 191 * XArray node allocations from PF_MEMALLOC contexts could
2ca4532a
DN
192 * completely exhaust the page allocator. __GFP_NOMEMALLOC
193 * stops emergency reserves from being allocated.
194 *
195 * TODO: this could cause a theoretical memory reclaim
196 * deadlock in the swap out path.
197 */
198 /*
854e9ed0 199 * Add it to the swap cache.
2ca4532a 200 */
a4c366f0 201 err = add_to_swap_cache(folio, entry,
3852f676 202 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
38d8b4e6 203 if (err)
bd53b714 204 /*
2ca4532a
DN
205 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206 * clear SWAP_HAS_CACHE flag.
1da177e4 207 */
0f074658 208 goto fail;
9625456c 209 /*
09c02e56
MWO
210 * Normally the folio will be dirtied in unmap because its
211 * pte should be dirty. A special case is MADV_FREE page. The
212 * page's pte could have dirty bit cleared but the folio's
213 * SwapBacked flag is still set because clearing the dirty bit
214 * and SwapBacked flag has no lock protected. For such folio,
215 * unmap will not set dirty bit for it, so folio reclaim will
216 * not write the folio out. This can cause data corruption when
217 * the folio is swapped in later. Always setting the dirty flag
218 * for the folio solves the problem.
9625456c 219 */
09c02e56 220 folio_mark_dirty(folio);
38d8b4e6 221
09c02e56 222 return true;
38d8b4e6 223
38d8b4e6 224fail:
4081f744 225 put_swap_folio(folio, entry);
09c02e56 226 return false;
1da177e4
LT
227}
228
229/*
75fa68a5 230 * This must be called only on folios that have
1da177e4 231 * been verified to be in the swap cache and locked.
75fa68a5
MWO
232 * It will never put the folio into the free list,
233 * the caller has a reference on the folio.
1da177e4 234 */
75fa68a5 235void delete_from_swap_cache(struct folio *folio)
1da177e4 236{
3d2c9087 237 swp_entry_t entry = folio->swap;
4e17ec25 238 struct address_space *address_space = swap_address_space(entry);
1da177e4 239
b93b0163 240 xa_lock_irq(&address_space->i_pages);
ceff9d33 241 __delete_from_swap_cache(folio, entry, NULL);
b93b0163 242 xa_unlock_irq(&address_space->i_pages);
1da177e4 243
4081f744 244 put_swap_folio(folio, entry);
75fa68a5 245 folio_ref_sub(folio, folio_nr_pages(folio));
1da177e4
LT
246}
247
3852f676
JK
248void clear_shadow_from_swap_cache(int type, unsigned long begin,
249 unsigned long end)
250{
251 unsigned long curr = begin;
252 void *old;
253
254 for (;;) {
3852f676
JK
255 swp_entry_t entry = swp_entry(type, curr);
256 struct address_space *address_space = swap_address_space(entry);
257 XA_STATE(xas, &address_space->i_pages, curr);
258
5649d113
YY
259 xas_set_update(&xas, workingset_update_node);
260
3852f676
JK
261 xa_lock_irq(&address_space->i_pages);
262 xas_for_each(&xas, old, end) {
263 if (!xa_is_value(old))
264 continue;
265 xas_store(&xas, NULL);
3852f676 266 }
3852f676
JK
267 xa_unlock_irq(&address_space->i_pages);
268
269 /* search the next swapcache until we meet end */
270 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
271 curr++;
272 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
273 if (curr > end)
274 break;
275 }
276}
277
c33c7948
RR
278/*
279 * If we are the only user, then try to free up the swap cache.
280 *
aedd74d4 281 * Its ok to check the swapcache flag without the folio lock
a2c43eed 282 * here because we are going to recheck again inside
aedd74d4 283 * folio_free_swap() _with_ the lock.
1da177e4
LT
284 * - Marcelo
285 */
63b77499 286void free_swap_cache(struct folio *folio)
1da177e4 287{
aedd74d4
MWO
288 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
289 folio_trylock(folio)) {
290 folio_free_swap(folio);
291 folio_unlock(folio);
1da177e4
LT
292 }
293}
294
c33c7948 295/*
1da177e4 296 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 297 * this page if it is the last user of the page.
1da177e4
LT
298 */
299void free_page_and_swap_cache(struct page *page)
300{
63b77499
MWO
301 struct folio *folio = page_folio(page);
302
303 free_swap_cache(folio);
5beaee54 304 if (!is_huge_zero_folio(folio))
63b77499 305 folio_put(folio);
1da177e4
LT
306}
307
308/*
309 * Passed an array of pages, drop them all from swapcache and then release
310 * them. They are removed from the LRU and freed if this is their last use.
311 */
7cc8f9c7 312void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
1da177e4 313{
4907e80b
MWO
314 struct folio_batch folios;
315 unsigned int refs[PAGEVEC_SIZE];
316
1da177e4 317 lru_add_drain();
4907e80b 318 folio_batch_init(&folios);
d7f861b9 319 for (int i = 0; i < nr; i++) {
4907e80b 320 struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
d7f861b9 321
63b77499 322 free_swap_cache(folio);
4907e80b 323 refs[folios.nr] = 1;
d7f861b9
DH
324 if (unlikely(encoded_page_flags(pages[i]) &
325 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
4907e80b 326 refs[folios.nr] = encoded_nr_pages(pages[++i]);
d7f861b9 327
4907e80b
MWO
328 if (folio_batch_add(&folios, folio) == 0)
329 folios_put_refs(&folios, refs);
d7f861b9 330 }
4907e80b
MWO
331 if (folios.nr)
332 folios_put_refs(&folios, refs);
1da177e4
LT
333}
334
e9e9b7ec
MK
335static inline bool swap_use_vma_readahead(void)
336{
337 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
338}
339
1da177e4 340/*
c9edc242 341 * Lookup a swap entry in the swap cache. A found folio will be returned
1da177e4 342 * unlocked and with its refcount incremented - we rely on the kernel
c9edc242 343 * lock getting page table operations atomic even if we drop the folio
1da177e4 344 * lock before returning.
cbc2bd98
KS
345 *
346 * Caller must lock the swap device or hold a reference to keep it valid.
1da177e4 347 */
c9edc242
MWO
348struct folio *swap_cache_get_folio(swp_entry_t entry,
349 struct vm_area_struct *vma, unsigned long addr)
1da177e4 350{
c9edc242 351 struct folio *folio;
1da177e4 352
c9edc242 353 folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
66dabbb6 354 if (!IS_ERR(folio)) {
eaf649eb
MK
355 bool vma_ra = swap_use_vma_readahead();
356 bool readahead;
357
eaf649eb
MK
358 /*
359 * At the moment, we don't support PG_readahead for anon THP
360 * so let's bail out rather than confusing the readahead stat.
361 */
c9edc242
MWO
362 if (unlikely(folio_test_large(folio)))
363 return folio;
eaf649eb 364
c9edc242 365 readahead = folio_test_clear_readahead(folio);
eaf649eb
MK
366 if (vma && vma_ra) {
367 unsigned long ra_val;
368 int win, hits;
369
370 ra_val = GET_SWAP_RA_VAL(vma);
371 win = SWAP_RA_WIN(ra_val);
372 hits = SWAP_RA_HITS(ra_val);
ec560175
HY
373 if (readahead)
374 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
375 atomic_long_set(&vma->swap_readahead_info,
376 SWAP_RA_VAL(addr, win, hits));
377 }
eaf649eb 378
ec560175 379 if (readahead) {
cbc65df2 380 count_vm_event(SWAP_RA_HIT);
eaf649eb 381 if (!vma || !vma_ra)
ec560175 382 atomic_inc(&swapin_readahead_hits);
cbc65df2 383 }
66dabbb6
CH
384 } else {
385 folio = NULL;
579f8290 386 }
eaf649eb 387
c9edc242
MWO
388 return folio;
389}
390
61ef1865 391/**
524984ff 392 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
61ef1865
MWO
393 * @mapping: The address_space to search.
394 * @index: The page cache index.
395 *
524984ff
MWO
396 * This differs from filemap_get_folio() in that it will also look for the
397 * folio in the swap cache.
61ef1865 398 *
524984ff 399 * Return: The found folio or %NULL.
61ef1865 400 */
524984ff
MWO
401struct folio *filemap_get_incore_folio(struct address_space *mapping,
402 pgoff_t index)
61ef1865
MWO
403{
404 swp_entry_t swp;
405 struct swap_info_struct *si;
097b3e59 406 struct folio *folio = filemap_get_entry(mapping, index);
61ef1865 407
66dabbb6
CH
408 if (!folio)
409 return ERR_PTR(-ENOENT);
dd8095b1 410 if (!xa_is_value(folio))
66dabbb6 411 return folio;
61ef1865 412 if (!shmem_mapping(mapping))
66dabbb6 413 return ERR_PTR(-ENOENT);
61ef1865 414
dd8095b1 415 swp = radix_to_swp_entry(folio);
ba6851b4
ML
416 /* There might be swapin error entries in shmem mapping. */
417 if (non_swap_entry(swp))
66dabbb6 418 return ERR_PTR(-ENOENT);
61ef1865
MWO
419 /* Prevent swapoff from happening to us */
420 si = get_swap_device(swp);
421 if (!si)
66dabbb6 422 return ERR_PTR(-ENOENT);
dd8095b1
MWO
423 index = swp_offset(swp);
424 folio = filemap_get_folio(swap_address_space(swp), index);
61ef1865 425 put_swap_device(si);
524984ff 426 return folio;
61ef1865
MWO
427}
428
96c7b0b4
MWO
429struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
430 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
431 bool skip_if_exists)
1da177e4 432{
eb085574 433 struct swap_info_struct *si;
a0d3374b 434 struct folio *folio;
aae466b0 435 void *shadow = NULL;
4c6355b2 436
5b999aad 437 *new_page_allocated = false;
46a774d3
HY
438 si = get_swap_device(entry);
439 if (!si)
440 return NULL;
1da177e4 441
4c6355b2
JW
442 for (;;) {
443 int err;
1da177e4
LT
444 /*
445 * First check the swap cache. Since this is normally
cb691e2f 446 * called after swap_cache_get_folio() failed, re-calling
1da177e4
LT
447 * that would confuse statistics.
448 */
a0d3374b
MWO
449 folio = filemap_get_folio(swap_address_space(entry),
450 swp_offset(entry));
96c7b0b4
MWO
451 if (!IS_ERR(folio))
452 goto got_folio;
1da177e4 453
ba81f838
HY
454 /*
455 * Just skip read ahead for unused swap slot.
456 * During swap_off when swap_slot_cache is disabled,
457 * we have to handle the race between putting
458 * swap entry in swap cache and marking swap slot
459 * as SWAP_HAS_CACHE. That's done in later part of code or
460 * else swap_off will be aborted if we return NULL.
461 */
3ecdeb0f 462 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
46a774d3 463 goto fail_put_swap;
e8c26ab6 464
1da177e4 465 /*
96c7b0b4 466 * Get a new folio to read into from swap. Allocate it now,
4c6355b2
JW
467 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
468 * cause any racers to loop around until we add it to cache.
1da177e4 469 */
ddc1a5cb
HD
470 folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0,
471 mpol, ilx, numa_node_id());
a0d3374b 472 if (!folio)
46a774d3 473 goto fail_put_swap;
1da177e4 474
f000944d
HD
475 /*
476 * Swap entry may have been freed since our caller observed it.
477 */
355cfa73 478 err = swapcache_prepare(entry);
4c6355b2 479 if (!err)
f000944d
HD
480 break;
481
a0d3374b 482 folio_put(folio);
4c6355b2 483 if (err != -EEXIST)
46a774d3 484 goto fail_put_swap;
4c6355b2 485
a65b0e76
DC
486 /*
487 * Protect against a recursive call to __read_swap_cache_async()
488 * on the same entry waiting forever here because SWAP_HAS_CACHE
489 * is set but the folio is not the swap cache yet. This can
490 * happen today if mem_cgroup_swapin_charge_folio() below
491 * triggers reclaim through zswap, which may call
492 * __read_swap_cache_async() in the writeback path.
493 */
494 if (skip_if_exists)
495 goto fail_put_swap;
496
2ca4532a 497 /*
4c6355b2
JW
498 * We might race against __delete_from_swap_cache(), and
499 * stumble across a swap_map entry whose SWAP_HAS_CACHE
500 * has not yet been cleared. Or race against another
501 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
96c7b0b4 502 * in swap_map, but not yet added its folio to swap cache.
2ca4532a 503 */
029c4628 504 schedule_timeout_uninterruptible(1);
4c6355b2
JW
505 }
506
507 /*
96c7b0b4 508 * The swap entry is ours to swap in. Prepare the new folio.
4c6355b2
JW
509 */
510
a0d3374b
MWO
511 __folio_set_locked(folio);
512 __folio_set_swapbacked(folio);
4c6355b2 513
65995918 514 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
4c6355b2 515 goto fail_unlock;
4c6355b2 516
0add0c77 517 /* May fail (-ENOMEM) if XArray node allocation failed. */
a4c366f0 518 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
4c6355b2 519 goto fail_unlock;
0add0c77
SB
520
521 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 522
aae466b0 523 if (shadow)
a0d3374b 524 workingset_refault(folio, shadow);
314b57fb 525
a0d3374b
MWO
526 /* Caller will initiate read into locked folio */
527 folio_add_lru(folio);
4c6355b2 528 *new_page_allocated = true;
96c7b0b4 529got_folio:
46a774d3 530 put_swap_device(si);
96c7b0b4 531 return folio;
1da177e4 532
4c6355b2 533fail_unlock:
4081f744 534 put_swap_folio(folio, entry);
a0d3374b
MWO
535 folio_unlock(folio);
536 folio_put(folio);
46a774d3
HY
537fail_put_swap:
538 put_swap_device(si);
4c6355b2 539 return NULL;
1da177e4 540}
46017e95 541
5b999aad
DS
542/*
543 * Locate a page of swap in physical memory, reserving swap cache space
544 * and reading the disk if it is not already cached.
545 * A failure return means that either the page allocation failed or that
546 * the swap entry is no longer in use.
46a774d3
HY
547 *
548 * get/put_swap_device() aren't needed to call this function, because
c9bdf768 549 * __read_swap_cache_async() call them and swap_read_folio() holds the
46a774d3 550 * swap cache folio lock.
5b999aad 551 */
6e03492e
MWO
552struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
553 struct vm_area_struct *vma, unsigned long addr,
554 struct swap_iocb **plug)
5b999aad 555{
ddc1a5cb
HD
556 bool page_allocated;
557 struct mempolicy *mpol;
558 pgoff_t ilx;
96c7b0b4 559 struct folio *folio;
5b999aad 560
ddc1a5cb 561 mpol = get_vma_policy(vma, addr, 0, &ilx);
96c7b0b4 562 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
a65b0e76 563 &page_allocated, false);
ddc1a5cb 564 mpol_cond_put(mpol);
5b999aad 565
ddc1a5cb 566 if (page_allocated)
c9bdf768 567 swap_read_folio(folio, false, plug);
6e03492e 568 return folio;
5b999aad
DS
569}
570
ec560175
HY
571static unsigned int __swapin_nr_pages(unsigned long prev_offset,
572 unsigned long offset,
573 int hits,
574 int max_pages,
575 int prev_win)
579f8290 576{
ec560175 577 unsigned int pages, last_ra;
579f8290
SL
578
579 /*
580 * This heuristic has been found to work well on both sequential and
581 * random loads, swapping to hard disk or to SSD: please don't ask
582 * what the "+ 2" means, it just happens to work well, that's all.
583 */
ec560175 584 pages = hits + 2;
579f8290
SL
585 if (pages == 2) {
586 /*
587 * We can have no readahead hits to judge by: but must not get
588 * stuck here forever, so check for an adjacent offset instead
589 * (and don't even bother to check whether swap type is same).
590 */
591 if (offset != prev_offset + 1 && offset != prev_offset - 1)
592 pages = 1;
579f8290
SL
593 } else {
594 unsigned int roundup = 4;
595 while (roundup < pages)
596 roundup <<= 1;
597 pages = roundup;
598 }
599
600 if (pages > max_pages)
601 pages = max_pages;
602
603 /* Don't shrink readahead too fast */
ec560175 604 last_ra = prev_win / 2;
579f8290
SL
605 if (pages < last_ra)
606 pages = last_ra;
ec560175
HY
607
608 return pages;
609}
610
611static unsigned long swapin_nr_pages(unsigned long offset)
612{
613 static unsigned long prev_offset;
614 unsigned int hits, pages, max_pages;
615 static atomic_t last_readahead_pages;
616
617 max_pages = 1 << READ_ONCE(page_cluster);
618 if (max_pages <= 1)
619 return 1;
620
621 hits = atomic_xchg(&swapin_readahead_hits, 0);
d6c1f098
QC
622 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
623 max_pages,
ec560175
HY
624 atomic_read(&last_readahead_pages));
625 if (!hits)
d6c1f098 626 WRITE_ONCE(prev_offset, offset);
579f8290
SL
627 atomic_set(&last_readahead_pages, pages);
628
629 return pages;
630}
631
46017e95 632/**
e9e9b7ec 633 * swap_cluster_readahead - swap in pages in hope we need them soon
46017e95 634 * @entry: swap entry of this memory
7682486b 635 * @gfp_mask: memory allocation flags
ddc1a5cb
HD
636 * @mpol: NUMA memory allocation policy to be applied
637 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
46017e95 638 *
a4575c41 639 * Returns the struct folio for entry and addr, after queueing swapin.
46017e95
HD
640 *
641 * Primitive swap readahead code. We simply read an aligned block of
642 * (1 << page_cluster) entries in the swap area. This method is chosen
643 * because it doesn't cost us any seek time. We also make sure to queue
644 * the 'original' request together with the readahead ones...
645 *
ddc1a5cb
HD
646 * Note: it is intentional that the same NUMA policy and interleave index
647 * are used for every page of the readahead: neighbouring pages on swap
648 * are fairly likely to have been swapped out from the same node.
46017e95 649 */
a4575c41 650struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
ddc1a5cb 651 struct mempolicy *mpol, pgoff_t ilx)
46017e95 652{
96c7b0b4 653 struct folio *folio;
579f8290
SL
654 unsigned long entry_offset = swp_offset(entry);
655 unsigned long offset = entry_offset;
67f96aa2 656 unsigned long start_offset, end_offset;
579f8290 657 unsigned long mask;
e9a6effa 658 struct swap_info_struct *si = swp_swap_info(entry);
3fb5c298 659 struct blk_plug plug;
5169b844 660 struct swap_iocb *splug = NULL;
b243dcbf 661 bool page_allocated;
46017e95 662
579f8290
SL
663 mask = swapin_nr_pages(offset) - 1;
664 if (!mask)
665 goto skip;
666
67f96aa2
RR
667 /* Read a page_cluster sized and aligned cluster around offset. */
668 start_offset = offset & ~mask;
669 end_offset = offset | mask;
670 if (!start_offset) /* First page is swap header. */
671 start_offset++;
e9a6effa
HY
672 if (end_offset >= si->max)
673 end_offset = si->max - 1;
67f96aa2 674
3fb5c298 675 blk_start_plug(&plug);
67f96aa2 676 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95 677 /* Ok, do the async read-ahead now */
96c7b0b4 678 folio = __read_swap_cache_async(
ddc1a5cb 679 swp_entry(swp_type(entry), offset),
a65b0e76 680 gfp_mask, mpol, ilx, &page_allocated, false);
96c7b0b4 681 if (!folio)
67f96aa2 682 continue;
c4fa6309 683 if (page_allocated) {
c9bdf768 684 swap_read_folio(folio, false, &splug);
eaf649eb 685 if (offset != entry_offset) {
96c7b0b4 686 folio_set_readahead(folio);
c4fa6309
HY
687 count_vm_event(SWAP_RA);
688 }
cbc65df2 689 }
96c7b0b4 690 folio_put(folio);
46017e95 691 }
3fb5c298 692 blk_finish_plug(&plug);
5169b844 693 swap_read_unplug(splug);
46017e95 694 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 695skip:
5169b844 696 /* The page was likely read above, so no need for plugging here */
96c7b0b4 697 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
a65b0e76 698 &page_allocated, false);
16e96ba5
NP
699 if (unlikely(page_allocated)) {
700 zswap_folio_swapin(folio);
c9bdf768 701 swap_read_folio(folio, false, NULL);
16e96ba5 702 }
a4575c41 703 return folio;
46017e95 704}
4b3ef9da
HY
705
706int init_swap_address_space(unsigned int type, unsigned long nr_pages)
707{
708 struct address_space *spaces, *space;
709 unsigned int i, nr;
710
711 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
778e1cdd 712 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
4b3ef9da
HY
713 if (!spaces)
714 return -ENOMEM;
715 for (i = 0; i < nr; i++) {
716 space = spaces + i;
a2833486 717 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
4b3ef9da
HY
718 atomic_set(&space->i_mmap_writable, 0);
719 space->a_ops = &swap_aops;
720 /* swap cache doesn't use writeback related tags */
721 mapping_set_no_writeback_tags(space);
4b3ef9da
HY
722 }
723 nr_swapper_spaces[type] = nr;
054f1d1f 724 swapper_spaces[type] = spaces;
4b3ef9da
HY
725
726 return 0;
727}
728
729void exit_swap_address_space(unsigned int type)
730{
eea4a501
HY
731 int i;
732 struct address_space *spaces = swapper_spaces[type];
733
734 for (i = 0; i < nr_swapper_spaces[type]; i++)
735 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
736 kvfree(spaces);
4b3ef9da 737 nr_swapper_spaces[type] = 0;
054f1d1f 738 swapper_spaces[type] = NULL;
4b3ef9da 739}
ec560175 740
4f8fcf4c
HD
741#define SWAP_RA_ORDER_CEILING 5
742
743struct vma_swap_readahead {
744 unsigned short win;
745 unsigned short offset;
746 unsigned short nr_pte;
747};
748
eaf649eb 749static void swap_ra_info(struct vm_fault *vmf,
16ba391e 750 struct vma_swap_readahead *ra_info)
ec560175
HY
751{
752 struct vm_area_struct *vma = vmf->vma;
eaf649eb 753 unsigned long ra_val;
16ba391e 754 unsigned long faddr, pfn, fpfn, lpfn, rpfn;
ec560175 755 unsigned long start, end;
16ba391e 756 unsigned int max_win, hits, prev_win, win;
ec560175 757
61b63972
HY
758 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
759 SWAP_RA_ORDER_CEILING);
760 if (max_win == 1) {
eaf649eb
MK
761 ra_info->win = 1;
762 return;
61b63972
HY
763 }
764
ec560175 765 faddr = vmf->address;
ec560175 766 fpfn = PFN_DOWN(faddr);
eaf649eb
MK
767 ra_val = GET_SWAP_RA_VAL(vma);
768 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
769 prev_win = SWAP_RA_WIN(ra_val);
770 hits = SWAP_RA_HITS(ra_val);
771 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
ec560175
HY
772 max_win, prev_win);
773 atomic_long_set(&vma->swap_readahead_info,
774 SWAP_RA_VAL(faddr, win, 0));
18ad72f5 775 if (win == 1)
eaf649eb 776 return;
ec560175 777
16ba391e
KS
778 if (fpfn == pfn + 1) {
779 lpfn = fpfn;
780 rpfn = fpfn + win;
781 } else if (pfn == fpfn + 1) {
782 lpfn = fpfn - win + 1;
783 rpfn = fpfn + 1;
784 } else {
785 unsigned int left = (win - 1) / 2;
786
787 lpfn = fpfn - left;
788 rpfn = fpfn + win - left;
ec560175 789 }
16ba391e
KS
790 start = max3(lpfn, PFN_DOWN(vma->vm_start),
791 PFN_DOWN(faddr & PMD_MASK));
792 end = min3(rpfn, PFN_DOWN(vma->vm_end),
793 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
794
eaf649eb
MK
795 ra_info->nr_pte = end - start;
796 ra_info->offset = fpfn - start;
ec560175
HY
797}
798
e9f59873
YS
799/**
800 * swap_vma_readahead - swap in pages in hope we need them soon
ddc1a5cb 801 * @targ_entry: swap entry of the targeted memory
e9f59873 802 * @gfp_mask: memory allocation flags
ddc1a5cb
HD
803 * @mpol: NUMA memory allocation policy to be applied
804 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
e9f59873
YS
805 * @vmf: fault information
806 *
a4575c41 807 * Returns the struct folio for entry and addr, after queueing swapin.
e9f59873 808 *
cb152a1a 809 * Primitive swap readahead code. We simply read in a few pages whose
e9f59873
YS
810 * virtual addresses are around the fault address in the same vma.
811 *
c1e8d7c6 812 * Caller must hold read mmap_lock if vmf->vma is not NULL.
e9f59873
YS
813 *
814 */
a4575c41
MWO
815static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
816 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
ec560175
HY
817{
818 struct blk_plug plug;
5169b844 819 struct swap_iocb *splug = NULL;
96c7b0b4 820 struct folio *folio;
4f8fcf4c
HD
821 pte_t *pte = NULL, pentry;
822 unsigned long addr;
ec560175 823 swp_entry_t entry;
ddc1a5cb 824 pgoff_t ilx;
ec560175
HY
825 unsigned int i;
826 bool page_allocated;
e97af699
ML
827 struct vma_swap_readahead ra_info = {
828 .win = 1,
829 };
ec560175 830
eaf649eb
MK
831 swap_ra_info(vmf, &ra_info);
832 if (ra_info.win == 1)
ec560175
HY
833 goto skip;
834
4f8fcf4c 835 addr = vmf->address - (ra_info.offset * PAGE_SIZE);
ddc1a5cb 836 ilx = targ_ilx - ra_info.offset;
4f8fcf4c 837
ec560175 838 blk_start_plug(&plug);
ddc1a5cb 839 for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) {
4f8fcf4c
HD
840 if (!pte++) {
841 pte = pte_offset_map(vmf->pmd, addr);
842 if (!pte)
843 break;
844 }
845 pentry = ptep_get_lockless(pte);
92bafb20 846 if (!is_swap_pte(pentry))
ec560175
HY
847 continue;
848 entry = pte_to_swp_entry(pentry);
849 if (unlikely(non_swap_entry(entry)))
850 continue;
4f8fcf4c
HD
851 pte_unmap(pte);
852 pte = NULL;
96c7b0b4 853 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
a65b0e76 854 &page_allocated, false);
96c7b0b4 855 if (!folio)
ec560175
HY
856 continue;
857 if (page_allocated) {
c9bdf768 858 swap_read_folio(folio, false, &splug);
eaf649eb 859 if (i != ra_info.offset) {
96c7b0b4 860 folio_set_readahead(folio);
ec560175
HY
861 count_vm_event(SWAP_RA);
862 }
863 }
96c7b0b4 864 folio_put(folio);
ec560175 865 }
4f8fcf4c
HD
866 if (pte)
867 pte_unmap(pte);
ec560175 868 blk_finish_plug(&plug);
5169b844 869 swap_read_unplug(splug);
ec560175
HY
870 lru_add_drain();
871skip:
96c7b0b4
MWO
872 /* The folio was likely read above, so no need for plugging here */
873 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
a65b0e76 874 &page_allocated, false);
16e96ba5
NP
875 if (unlikely(page_allocated)) {
876 zswap_folio_swapin(folio);
c9bdf768 877 swap_read_folio(folio, false, NULL);
16e96ba5 878 }
a4575c41 879 return folio;
ec560175 880}
d9bfcfdc 881
e9e9b7ec
MK
882/**
883 * swapin_readahead - swap in pages in hope we need them soon
884 * @entry: swap entry of this memory
885 * @gfp_mask: memory allocation flags
886 * @vmf: fault information
887 *
888 * Returns the struct page for entry and addr, after queueing swapin.
889 *
890 * It's a main entry function for swap readahead. By the configuration,
891 * it will read ahead blocks by cluster-based(ie, physical disk based)
892 * or vma-based(ie, virtual address based on faulty address) readahead.
893 */
894struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
895 struct vm_fault *vmf)
896{
ddc1a5cb
HD
897 struct mempolicy *mpol;
898 pgoff_t ilx;
a4575c41 899 struct folio *folio;
ddc1a5cb
HD
900
901 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
a4575c41 902 folio = swap_use_vma_readahead() ?
ddc1a5cb
HD
903 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
904 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
905 mpol_cond_put(mpol);
a4575c41
MWO
906
907 if (!folio)
908 return NULL;
909 return folio_file_page(folio, swp_offset(entry));
e9e9b7ec
MK
910}
911
d9bfcfdc
HY
912#ifdef CONFIG_SYSFS
913static ssize_t vma_ra_enabled_show(struct kobject *kobj,
914 struct kobj_attribute *attr, char *buf)
915{
ae7a927d
JP
916 return sysfs_emit(buf, "%s\n",
917 enable_vma_readahead ? "true" : "false");
d9bfcfdc
HY
918}
919static ssize_t vma_ra_enabled_store(struct kobject *kobj,
920 struct kobj_attribute *attr,
921 const char *buf, size_t count)
922{
717aeab4
JG
923 ssize_t ret;
924
925 ret = kstrtobool(buf, &enable_vma_readahead);
926 if (ret)
927 return ret;
d9bfcfdc
HY
928
929 return count;
930}
6106b93e 931static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
d9bfcfdc 932
d9bfcfdc
HY
933static struct attribute *swap_attrs[] = {
934 &vma_ra_enabled_attr.attr,
d9bfcfdc
HY
935 NULL,
936};
937
e48333b6 938static const struct attribute_group swap_attr_group = {
d9bfcfdc
HY
939 .attrs = swap_attrs,
940};
941
942static int __init swap_init_sysfs(void)
943{
944 int err;
945 struct kobject *swap_kobj;
946
947 swap_kobj = kobject_create_and_add("swap", mm_kobj);
948 if (!swap_kobj) {
949 pr_err("failed to create swap kobject\n");
950 return -ENOMEM;
951 }
952 err = sysfs_create_group(swap_kobj, &swap_attr_group);
953 if (err) {
954 pr_err("failed to register swap group\n");
955 goto delete_obj;
956 }
957 return 0;
958
959delete_obj:
960 kobject_put(swap_kobj);
961 return err;
962}
963subsys_initcall(swap_init_sysfs);
964#endif