Merge tag 'for-linux-6.12-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / swap_state.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
1da177e4 10#include <linux/mm.h>
5a0e3ad6 11#include <linux/gfp.h>
1da177e4 12#include <linux/kernel_stat.h>
ddc1a5cb 13#include <linux/mempolicy.h>
1da177e4 14#include <linux/swap.h>
46017e95 15#include <linux/swapops.h>
1da177e4
LT
16#include <linux/init.h>
17#include <linux/pagemap.h>
4907e80b 18#include <linux/pagevec.h>
1da177e4 19#include <linux/backing-dev.h>
3fb5c298 20#include <linux/blkdev.h>
b20a3503 21#include <linux/migrate.h>
4b3ef9da 22#include <linux/vmalloc.h>
67afa38e 23#include <linux/swap_slots.h>
38d8b4e6 24#include <linux/huge_mm.h>
61ef1865 25#include <linux/shmem_fs.h>
243bce09 26#include "internal.h"
014bb1de 27#include "swap.h"
1da177e4
LT
28
29/*
30 * swapper_space is a fiction, retained to simplify the path through
0ba5e806 31 * vmscan's shrink_folio_list.
1da177e4 32 */
f5e54d6e 33static const struct address_space_operations swap_aops = {
1da177e4 34 .writepage = swap_writepage,
4c4a7634 35 .dirty_folio = noop_dirty_folio,
1c93923c 36#ifdef CONFIG_MIGRATION
54184650 37 .migrate_folio = migrate_folio,
1c93923c 38#endif
1da177e4
LT
39};
40
783cb68e
CD
41struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
42static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
f5c754d6 43static bool enable_vma_readahead __read_mostly = true;
ec560175 44
dce08dd2
HY
45#define SWAP_RA_ORDER_CEILING 5
46
ec560175
HY
47#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
48#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
49#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
50#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
51
52#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
53#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
54#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
55
56#define SWAP_RA_VAL(addr, win, hits) \
57 (((addr) & PAGE_MASK) | \
58 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
59 ((hits) & SWAP_RA_HITS_MASK))
60
61/* Initial readahead hits is 4 to start up with a small window */
62#define GET_SWAP_RA_VAL(vma) \
63 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
1da177e4 64
579f8290
SL
65static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
66
1da177e4
LT
67void show_swap_cache_info(void)
68{
33806f06 69 printk("%lu pages in swap cache\n", total_swapcache_pages());
3cb8eaa4
Z
70 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
71 printk("Total swap = %lukB\n", K(total_swap_pages));
1da177e4
LT
72}
73
aae466b0
JK
74void *get_shadow_from_swap_cache(swp_entry_t entry)
75{
76 struct address_space *address_space = swap_address_space(entry);
7aad25b4 77 pgoff_t idx = swap_cache_index(entry);
4c773a44 78 void *shadow;
aae466b0 79
4c773a44
MWO
80 shadow = xa_load(&address_space->i_pages, idx);
81 if (xa_is_value(shadow))
82 return shadow;
aae466b0
JK
83 return NULL;
84}
85
1da177e4 86/*
2bb876b5 87 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
1da177e4
LT
88 * but sets SwapCache flag and private instead of mapping and index.
89 */
a4c366f0 90int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
3852f676 91 gfp_t gfp, void **shadowp)
1da177e4 92{
8d93b41c 93 struct address_space *address_space = swap_address_space(entry);
7aad25b4 94 pgoff_t idx = swap_cache_index(entry);
a4c366f0
MWO
95 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
96 unsigned long i, nr = folio_nr_pages(folio);
3852f676 97 void *old;
1da177e4 98
5649d113
YY
99 xas_set_update(&xas, workingset_update_node);
100
a4c366f0
MWO
101 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
102 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
103 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
51726b12 104
a4c366f0
MWO
105 folio_ref_add(folio, nr);
106 folio_set_swapcache(folio);
3d2c9087 107 folio->swap = entry;
31a56396 108
8d93b41c
MW
109 do {
110 xas_lock_irq(&xas);
111 xas_create_range(&xas);
112 if (xas_error(&xas))
113 goto unlock;
114 for (i = 0; i < nr; i++) {
a4c366f0 115 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
e5b306a0
KS
116 if (shadowp) {
117 old = xas_load(&xas);
118 if (xa_is_value(old))
3852f676
JK
119 *shadowp = old;
120 }
a4c366f0 121 xas_store(&xas, folio);
8d93b41c
MW
122 xas_next(&xas);
123 }
38d8b4e6 124 address_space->nrpages += nr;
a4c366f0
MWO
125 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
126 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
8d93b41c
MW
127unlock:
128 xas_unlock_irq(&xas);
129 } while (xas_nomem(&xas, gfp));
31a56396 130
8d93b41c
MW
131 if (!xas_error(&xas))
132 return 0;
31a56396 133
a4c366f0
MWO
134 folio_clear_swapcache(folio);
135 folio_ref_sub(folio, nr);
8d93b41c 136 return xas_error(&xas);
1da177e4
LT
137}
138
1da177e4 139/*
ceff9d33 140 * This must be called only on folios that have
1da177e4
LT
141 * been verified to be in the swap cache.
142 */
ceff9d33 143void __delete_from_swap_cache(struct folio *folio,
3852f676 144 swp_entry_t entry, void *shadow)
1da177e4 145{
4e17ec25 146 struct address_space *address_space = swap_address_space(entry);
ceff9d33
MWO
147 int i;
148 long nr = folio_nr_pages(folio);
7aad25b4 149 pgoff_t idx = swap_cache_index(entry);
4e17ec25 150 XA_STATE(xas, &address_space->i_pages, idx);
33806f06 151
5649d113
YY
152 xas_set_update(&xas, workingset_update_node);
153
ceff9d33
MWO
154 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
155 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
156 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
1da177e4 157
38d8b4e6 158 for (i = 0; i < nr; i++) {
3852f676 159 void *entry = xas_store(&xas, shadow);
b9eb7776 160 VM_BUG_ON_PAGE(entry != folio, entry);
4e17ec25 161 xas_next(&xas);
38d8b4e6 162 }
3d2c9087 163 folio->swap.val = 0;
ceff9d33 164 folio_clear_swapcache(folio);
38d8b4e6 165 address_space->nrpages -= nr;
ceff9d33
MWO
166 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
167 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
1da177e4
LT
168}
169
170/**
09c02e56
MWO
171 * add_to_swap - allocate swap space for a folio
172 * @folio: folio we want to move to swap
1da177e4 173 *
09c02e56
MWO
174 * Allocate swap space for the folio and add the folio to the
175 * swap cache.
176 *
177 * Context: Caller needs to hold the folio lock.
178 * Return: Whether the folio was added to the swap cache.
1da177e4 179 */
09c02e56 180bool add_to_swap(struct folio *folio)
1da177e4
LT
181{
182 swp_entry_t entry;
1da177e4
LT
183 int err;
184
09c02e56
MWO
185 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
186 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1da177e4 187
e2e3fdc7 188 entry = folio_alloc_swap(folio);
2ca4532a 189 if (!entry.val)
09c02e56 190 return false;
0f074658 191
2ca4532a 192 /*
8d93b41c 193 * XArray node allocations from PF_MEMALLOC contexts could
2ca4532a
DN
194 * completely exhaust the page allocator. __GFP_NOMEMALLOC
195 * stops emergency reserves from being allocated.
196 *
197 * TODO: this could cause a theoretical memory reclaim
198 * deadlock in the swap out path.
199 */
200 /*
854e9ed0 201 * Add it to the swap cache.
2ca4532a 202 */
a4c366f0 203 err = add_to_swap_cache(folio, entry,
3852f676 204 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
38d8b4e6 205 if (err)
bd53b714 206 /*
2ca4532a
DN
207 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
208 * clear SWAP_HAS_CACHE flag.
1da177e4 209 */
0f074658 210 goto fail;
9625456c 211 /*
09c02e56
MWO
212 * Normally the folio will be dirtied in unmap because its
213 * pte should be dirty. A special case is MADV_FREE page. The
214 * page's pte could have dirty bit cleared but the folio's
215 * SwapBacked flag is still set because clearing the dirty bit
216 * and SwapBacked flag has no lock protected. For such folio,
217 * unmap will not set dirty bit for it, so folio reclaim will
218 * not write the folio out. This can cause data corruption when
219 * the folio is swapped in later. Always setting the dirty flag
220 * for the folio solves the problem.
9625456c 221 */
09c02e56 222 folio_mark_dirty(folio);
38d8b4e6 223
09c02e56 224 return true;
38d8b4e6 225
38d8b4e6 226fail:
4081f744 227 put_swap_folio(folio, entry);
09c02e56 228 return false;
1da177e4
LT
229}
230
231/*
75fa68a5 232 * This must be called only on folios that have
1da177e4 233 * been verified to be in the swap cache and locked.
75fa68a5
MWO
234 * It will never put the folio into the free list,
235 * the caller has a reference on the folio.
1da177e4 236 */
75fa68a5 237void delete_from_swap_cache(struct folio *folio)
1da177e4 238{
3d2c9087 239 swp_entry_t entry = folio->swap;
4e17ec25 240 struct address_space *address_space = swap_address_space(entry);
1da177e4 241
b93b0163 242 xa_lock_irq(&address_space->i_pages);
ceff9d33 243 __delete_from_swap_cache(folio, entry, NULL);
b93b0163 244 xa_unlock_irq(&address_space->i_pages);
1da177e4 245
4081f744 246 put_swap_folio(folio, entry);
75fa68a5 247 folio_ref_sub(folio, folio_nr_pages(folio));
1da177e4
LT
248}
249
3852f676
JK
250void clear_shadow_from_swap_cache(int type, unsigned long begin,
251 unsigned long end)
252{
253 unsigned long curr = begin;
254 void *old;
255
256 for (;;) {
3852f676 257 swp_entry_t entry = swp_entry(type, curr);
7aad25b4 258 unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK;
3852f676 259 struct address_space *address_space = swap_address_space(entry);
7aad25b4 260 XA_STATE(xas, &address_space->i_pages, index);
3852f676 261
5649d113
YY
262 xas_set_update(&xas, workingset_update_node);
263
3852f676 264 xa_lock_irq(&address_space->i_pages);
7aad25b4 265 xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) {
3852f676
JK
266 if (!xa_is_value(old))
267 continue;
268 xas_store(&xas, NULL);
3852f676 269 }
3852f676
JK
270 xa_unlock_irq(&address_space->i_pages);
271
272 /* search the next swapcache until we meet end */
273 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
274 curr++;
275 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
276 if (curr > end)
277 break;
278 }
279}
280
c33c7948
RR
281/*
282 * If we are the only user, then try to free up the swap cache.
283 *
aedd74d4 284 * Its ok to check the swapcache flag without the folio lock
a2c43eed 285 * here because we are going to recheck again inside
aedd74d4 286 * folio_free_swap() _with_ the lock.
1da177e4
LT
287 * - Marcelo
288 */
63b77499 289void free_swap_cache(struct folio *folio)
1da177e4 290{
aedd74d4
MWO
291 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
292 folio_trylock(folio)) {
293 folio_free_swap(folio);
294 folio_unlock(folio);
1da177e4
LT
295 }
296}
297
c33c7948 298/*
1da177e4 299 * Perform a free_page(), also freeing any swap cache associated with
b8072f09 300 * this page if it is the last user of the page.
1da177e4
LT
301 */
302void free_page_and_swap_cache(struct page *page)
303{
63b77499
MWO
304 struct folio *folio = page_folio(page);
305
306 free_swap_cache(folio);
5beaee54 307 if (!is_huge_zero_folio(folio))
63b77499 308 folio_put(folio);
1da177e4
LT
309}
310
311/*
312 * Passed an array of pages, drop them all from swapcache and then release
313 * them. They are removed from the LRU and freed if this is their last use.
314 */
7cc8f9c7 315void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
1da177e4 316{
4907e80b
MWO
317 struct folio_batch folios;
318 unsigned int refs[PAGEVEC_SIZE];
319
1da177e4 320 lru_add_drain();
4907e80b 321 folio_batch_init(&folios);
d7f861b9 322 for (int i = 0; i < nr; i++) {
4907e80b 323 struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
d7f861b9 324
63b77499 325 free_swap_cache(folio);
4907e80b 326 refs[folios.nr] = 1;
d7f861b9
DH
327 if (unlikely(encoded_page_flags(pages[i]) &
328 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
4907e80b 329 refs[folios.nr] = encoded_nr_pages(pages[++i]);
d7f861b9 330
4907e80b
MWO
331 if (folio_batch_add(&folios, folio) == 0)
332 folios_put_refs(&folios, refs);
d7f861b9 333 }
4907e80b
MWO
334 if (folios.nr)
335 folios_put_refs(&folios, refs);
1da177e4
LT
336}
337
e9e9b7ec
MK
338static inline bool swap_use_vma_readahead(void)
339{
340 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
341}
342
1da177e4 343/*
c9edc242 344 * Lookup a swap entry in the swap cache. A found folio will be returned
1da177e4 345 * unlocked and with its refcount incremented - we rely on the kernel
c9edc242 346 * lock getting page table operations atomic even if we drop the folio
1da177e4 347 * lock before returning.
cbc2bd98
KS
348 *
349 * Caller must lock the swap device or hold a reference to keep it valid.
1da177e4 350 */
c9edc242
MWO
351struct folio *swap_cache_get_folio(swp_entry_t entry,
352 struct vm_area_struct *vma, unsigned long addr)
1da177e4 353{
c9edc242 354 struct folio *folio;
1da177e4 355
7aad25b4 356 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
66dabbb6 357 if (!IS_ERR(folio)) {
eaf649eb
MK
358 bool vma_ra = swap_use_vma_readahead();
359 bool readahead;
360
eaf649eb
MK
361 /*
362 * At the moment, we don't support PG_readahead for anon THP
363 * so let's bail out rather than confusing the readahead stat.
364 */
c9edc242
MWO
365 if (unlikely(folio_test_large(folio)))
366 return folio;
eaf649eb 367
c9edc242 368 readahead = folio_test_clear_readahead(folio);
eaf649eb
MK
369 if (vma && vma_ra) {
370 unsigned long ra_val;
371 int win, hits;
372
373 ra_val = GET_SWAP_RA_VAL(vma);
374 win = SWAP_RA_WIN(ra_val);
375 hits = SWAP_RA_HITS(ra_val);
ec560175
HY
376 if (readahead)
377 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
378 atomic_long_set(&vma->swap_readahead_info,
379 SWAP_RA_VAL(addr, win, hits));
380 }
eaf649eb 381
ec560175 382 if (readahead) {
cbc65df2 383 count_vm_event(SWAP_RA_HIT);
eaf649eb 384 if (!vma || !vma_ra)
ec560175 385 atomic_inc(&swapin_readahead_hits);
cbc65df2 386 }
66dabbb6
CH
387 } else {
388 folio = NULL;
579f8290 389 }
eaf649eb 390
c9edc242
MWO
391 return folio;
392}
393
61ef1865 394/**
524984ff 395 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
61ef1865
MWO
396 * @mapping: The address_space to search.
397 * @index: The page cache index.
398 *
524984ff
MWO
399 * This differs from filemap_get_folio() in that it will also look for the
400 * folio in the swap cache.
61ef1865 401 *
524984ff 402 * Return: The found folio or %NULL.
61ef1865 403 */
524984ff
MWO
404struct folio *filemap_get_incore_folio(struct address_space *mapping,
405 pgoff_t index)
61ef1865
MWO
406{
407 swp_entry_t swp;
408 struct swap_info_struct *si;
097b3e59 409 struct folio *folio = filemap_get_entry(mapping, index);
61ef1865 410
66dabbb6
CH
411 if (!folio)
412 return ERR_PTR(-ENOENT);
dd8095b1 413 if (!xa_is_value(folio))
66dabbb6 414 return folio;
61ef1865 415 if (!shmem_mapping(mapping))
66dabbb6 416 return ERR_PTR(-ENOENT);
61ef1865 417
dd8095b1 418 swp = radix_to_swp_entry(folio);
ba6851b4
ML
419 /* There might be swapin error entries in shmem mapping. */
420 if (non_swap_entry(swp))
66dabbb6 421 return ERR_PTR(-ENOENT);
61ef1865
MWO
422 /* Prevent swapoff from happening to us */
423 si = get_swap_device(swp);
424 if (!si)
66dabbb6 425 return ERR_PTR(-ENOENT);
7aad25b4 426 index = swap_cache_index(swp);
dd8095b1 427 folio = filemap_get_folio(swap_address_space(swp), index);
61ef1865 428 put_swap_device(si);
524984ff 429 return folio;
61ef1865
MWO
430}
431
96c7b0b4
MWO
432struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
433 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
434 bool skip_if_exists)
1da177e4 435{
eb085574 436 struct swap_info_struct *si;
a0d3374b 437 struct folio *folio;
aae466b0 438 void *shadow = NULL;
4c6355b2 439
5b999aad 440 *new_page_allocated = false;
46a774d3
HY
441 si = get_swap_device(entry);
442 if (!si)
443 return NULL;
1da177e4 444
4c6355b2
JW
445 for (;;) {
446 int err;
1da177e4
LT
447 /*
448 * First check the swap cache. Since this is normally
cb691e2f 449 * called after swap_cache_get_folio() failed, re-calling
1da177e4
LT
450 * that would confuse statistics.
451 */
a0d3374b 452 folio = filemap_get_folio(swap_address_space(entry),
7aad25b4 453 swap_cache_index(entry));
96c7b0b4
MWO
454 if (!IS_ERR(folio))
455 goto got_folio;
1da177e4 456
ba81f838
HY
457 /*
458 * Just skip read ahead for unused swap slot.
459 * During swap_off when swap_slot_cache is disabled,
460 * we have to handle the race between putting
461 * swap entry in swap cache and marking swap slot
462 * as SWAP_HAS_CACHE. That's done in later part of code or
463 * else swap_off will be aborted if we return NULL.
464 */
3ecdeb0f 465 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
46a774d3 466 goto fail_put_swap;
e8c26ab6 467
1da177e4 468 /*
96c7b0b4 469 * Get a new folio to read into from swap. Allocate it now,
4c6355b2
JW
470 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
471 * cause any racers to loop around until we add it to cache.
1da177e4 472 */
2ef52d5b 473 folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
a0d3374b 474 if (!folio)
46a774d3 475 goto fail_put_swap;
1da177e4 476
f000944d
HD
477 /*
478 * Swap entry may have been freed since our caller observed it.
479 */
355cfa73 480 err = swapcache_prepare(entry);
4c6355b2 481 if (!err)
f000944d
HD
482 break;
483
a0d3374b 484 folio_put(folio);
4c6355b2 485 if (err != -EEXIST)
46a774d3 486 goto fail_put_swap;
4c6355b2 487
a65b0e76
DC
488 /*
489 * Protect against a recursive call to __read_swap_cache_async()
490 * on the same entry waiting forever here because SWAP_HAS_CACHE
491 * is set but the folio is not the swap cache yet. This can
492 * happen today if mem_cgroup_swapin_charge_folio() below
493 * triggers reclaim through zswap, which may call
494 * __read_swap_cache_async() in the writeback path.
495 */
496 if (skip_if_exists)
497 goto fail_put_swap;
498
2ca4532a 499 /*
4c6355b2
JW
500 * We might race against __delete_from_swap_cache(), and
501 * stumble across a swap_map entry whose SWAP_HAS_CACHE
502 * has not yet been cleared. Or race against another
503 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
96c7b0b4 504 * in swap_map, but not yet added its folio to swap cache.
2ca4532a 505 */
029c4628 506 schedule_timeout_uninterruptible(1);
4c6355b2
JW
507 }
508
509 /*
96c7b0b4 510 * The swap entry is ours to swap in. Prepare the new folio.
4c6355b2
JW
511 */
512
a0d3374b
MWO
513 __folio_set_locked(folio);
514 __folio_set_swapbacked(folio);
4c6355b2 515
65995918 516 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
4c6355b2 517 goto fail_unlock;
4c6355b2 518
0add0c77 519 /* May fail (-ENOMEM) if XArray node allocation failed. */
a4c366f0 520 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
4c6355b2 521 goto fail_unlock;
0add0c77
SB
522
523 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 524
aae466b0 525 if (shadow)
a0d3374b 526 workingset_refault(folio, shadow);
314b57fb 527
a0d3374b
MWO
528 /* Caller will initiate read into locked folio */
529 folio_add_lru(folio);
4c6355b2 530 *new_page_allocated = true;
96c7b0b4 531got_folio:
46a774d3 532 put_swap_device(si);
96c7b0b4 533 return folio;
1da177e4 534
4c6355b2 535fail_unlock:
4081f744 536 put_swap_folio(folio, entry);
a0d3374b
MWO
537 folio_unlock(folio);
538 folio_put(folio);
46a774d3
HY
539fail_put_swap:
540 put_swap_device(si);
4c6355b2 541 return NULL;
1da177e4 542}
46017e95 543
5b999aad
DS
544/*
545 * Locate a page of swap in physical memory, reserving swap cache space
546 * and reading the disk if it is not already cached.
547 * A failure return means that either the page allocation failed or that
548 * the swap entry is no longer in use.
46a774d3
HY
549 *
550 * get/put_swap_device() aren't needed to call this function, because
c9bdf768 551 * __read_swap_cache_async() call them and swap_read_folio() holds the
46a774d3 552 * swap cache folio lock.
5b999aad 553 */
6e03492e
MWO
554struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
555 struct vm_area_struct *vma, unsigned long addr,
556 struct swap_iocb **plug)
5b999aad 557{
ddc1a5cb
HD
558 bool page_allocated;
559 struct mempolicy *mpol;
560 pgoff_t ilx;
96c7b0b4 561 struct folio *folio;
5b999aad 562
ddc1a5cb 563 mpol = get_vma_policy(vma, addr, 0, &ilx);
96c7b0b4 564 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
a65b0e76 565 &page_allocated, false);
ddc1a5cb 566 mpol_cond_put(mpol);
5b999aad 567
ddc1a5cb 568 if (page_allocated)
b2d1f38b 569 swap_read_folio(folio, plug);
6e03492e 570 return folio;
5b999aad
DS
571}
572
ec560175
HY
573static unsigned int __swapin_nr_pages(unsigned long prev_offset,
574 unsigned long offset,
575 int hits,
576 int max_pages,
577 int prev_win)
579f8290 578{
ec560175 579 unsigned int pages, last_ra;
579f8290
SL
580
581 /*
582 * This heuristic has been found to work well on both sequential and
583 * random loads, swapping to hard disk or to SSD: please don't ask
584 * what the "+ 2" means, it just happens to work well, that's all.
585 */
ec560175 586 pages = hits + 2;
579f8290
SL
587 if (pages == 2) {
588 /*
589 * We can have no readahead hits to judge by: but must not get
590 * stuck here forever, so check for an adjacent offset instead
591 * (and don't even bother to check whether swap type is same).
592 */
593 if (offset != prev_offset + 1 && offset != prev_offset - 1)
594 pages = 1;
579f8290
SL
595 } else {
596 unsigned int roundup = 4;
597 while (roundup < pages)
598 roundup <<= 1;
599 pages = roundup;
600 }
601
602 if (pages > max_pages)
603 pages = max_pages;
604
605 /* Don't shrink readahead too fast */
ec560175 606 last_ra = prev_win / 2;
579f8290
SL
607 if (pages < last_ra)
608 pages = last_ra;
ec560175
HY
609
610 return pages;
611}
612
613static unsigned long swapin_nr_pages(unsigned long offset)
614{
615 static unsigned long prev_offset;
616 unsigned int hits, pages, max_pages;
617 static atomic_t last_readahead_pages;
618
619 max_pages = 1 << READ_ONCE(page_cluster);
620 if (max_pages <= 1)
621 return 1;
622
623 hits = atomic_xchg(&swapin_readahead_hits, 0);
d6c1f098
QC
624 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
625 max_pages,
ec560175
HY
626 atomic_read(&last_readahead_pages));
627 if (!hits)
d6c1f098 628 WRITE_ONCE(prev_offset, offset);
579f8290
SL
629 atomic_set(&last_readahead_pages, pages);
630
631 return pages;
632}
633
46017e95 634/**
e9e9b7ec 635 * swap_cluster_readahead - swap in pages in hope we need them soon
46017e95 636 * @entry: swap entry of this memory
7682486b 637 * @gfp_mask: memory allocation flags
ddc1a5cb
HD
638 * @mpol: NUMA memory allocation policy to be applied
639 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
46017e95 640 *
a4575c41 641 * Returns the struct folio for entry and addr, after queueing swapin.
46017e95
HD
642 *
643 * Primitive swap readahead code. We simply read an aligned block of
644 * (1 << page_cluster) entries in the swap area. This method is chosen
645 * because it doesn't cost us any seek time. We also make sure to queue
646 * the 'original' request together with the readahead ones...
647 *
ddc1a5cb
HD
648 * Note: it is intentional that the same NUMA policy and interleave index
649 * are used for every page of the readahead: neighbouring pages on swap
650 * are fairly likely to have been swapped out from the same node.
46017e95 651 */
a4575c41 652struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
ddc1a5cb 653 struct mempolicy *mpol, pgoff_t ilx)
46017e95 654{
96c7b0b4 655 struct folio *folio;
579f8290
SL
656 unsigned long entry_offset = swp_offset(entry);
657 unsigned long offset = entry_offset;
67f96aa2 658 unsigned long start_offset, end_offset;
579f8290 659 unsigned long mask;
e9a6effa 660 struct swap_info_struct *si = swp_swap_info(entry);
3fb5c298 661 struct blk_plug plug;
5169b844 662 struct swap_iocb *splug = NULL;
b243dcbf 663 bool page_allocated;
46017e95 664
579f8290
SL
665 mask = swapin_nr_pages(offset) - 1;
666 if (!mask)
667 goto skip;
668
67f96aa2
RR
669 /* Read a page_cluster sized and aligned cluster around offset. */
670 start_offset = offset & ~mask;
671 end_offset = offset | mask;
672 if (!start_offset) /* First page is swap header. */
673 start_offset++;
e9a6effa
HY
674 if (end_offset >= si->max)
675 end_offset = si->max - 1;
67f96aa2 676
3fb5c298 677 blk_start_plug(&plug);
67f96aa2 678 for (offset = start_offset; offset <= end_offset ; offset++) {
46017e95 679 /* Ok, do the async read-ahead now */
96c7b0b4 680 folio = __read_swap_cache_async(
ddc1a5cb 681 swp_entry(swp_type(entry), offset),
a65b0e76 682 gfp_mask, mpol, ilx, &page_allocated, false);
96c7b0b4 683 if (!folio)
67f96aa2 684 continue;
c4fa6309 685 if (page_allocated) {
b2d1f38b 686 swap_read_folio(folio, &splug);
eaf649eb 687 if (offset != entry_offset) {
96c7b0b4 688 folio_set_readahead(folio);
c4fa6309
HY
689 count_vm_event(SWAP_RA);
690 }
cbc65df2 691 }
96c7b0b4 692 folio_put(folio);
46017e95 693 }
3fb5c298 694 blk_finish_plug(&plug);
5169b844 695 swap_read_unplug(splug);
46017e95 696 lru_add_drain(); /* Push any new pages onto the LRU now */
579f8290 697skip:
5169b844 698 /* The page was likely read above, so no need for plugging here */
96c7b0b4 699 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
a65b0e76 700 &page_allocated, false);
16e96ba5
NP
701 if (unlikely(page_allocated)) {
702 zswap_folio_swapin(folio);
b2d1f38b 703 swap_read_folio(folio, NULL);
16e96ba5 704 }
a4575c41 705 return folio;
46017e95 706}
4b3ef9da
HY
707
708int init_swap_address_space(unsigned int type, unsigned long nr_pages)
709{
710 struct address_space *spaces, *space;
711 unsigned int i, nr;
712
713 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
778e1cdd 714 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
4b3ef9da
HY
715 if (!spaces)
716 return -ENOMEM;
717 for (i = 0; i < nr; i++) {
718 space = spaces + i;
a2833486 719 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
4b3ef9da
HY
720 atomic_set(&space->i_mmap_writable, 0);
721 space->a_ops = &swap_aops;
722 /* swap cache doesn't use writeback related tags */
723 mapping_set_no_writeback_tags(space);
4b3ef9da
HY
724 }
725 nr_swapper_spaces[type] = nr;
054f1d1f 726 swapper_spaces[type] = spaces;
4b3ef9da
HY
727
728 return 0;
729}
730
731void exit_swap_address_space(unsigned int type)
732{
eea4a501
HY
733 int i;
734 struct address_space *spaces = swapper_spaces[type];
735
736 for (i = 0; i < nr_swapper_spaces[type]; i++)
737 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
738 kvfree(spaces);
4b3ef9da 739 nr_swapper_spaces[type] = 0;
054f1d1f 740 swapper_spaces[type] = NULL;
4b3ef9da 741}
ec560175 742
ba518f4d
HY
743static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
744 unsigned long *end)
ec560175
HY
745{
746 struct vm_area_struct *vma = vmf->vma;
eaf649eb 747 unsigned long ra_val;
ba518f4d 748 unsigned long faddr, prev_faddr, left, right;
16ba391e 749 unsigned int max_win, hits, prev_win, win;
ec560175 750
ba518f4d 751 max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
dce08dd2
HY
752 if (max_win == 1)
753 return 1;
61b63972 754
ec560175 755 faddr = vmf->address;
eaf649eb 756 ra_val = GET_SWAP_RA_VAL(vma);
ba518f4d 757 prev_faddr = SWAP_RA_ADDR(ra_val);
eaf649eb
MK
758 prev_win = SWAP_RA_WIN(ra_val);
759 hits = SWAP_RA_HITS(ra_val);
ba518f4d
HY
760 win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
761 max_win, prev_win);
762 atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
18ad72f5 763 if (win == 1)
dce08dd2 764 return 1;
ec560175 765
ba518f4d
HY
766 if (faddr == prev_faddr + PAGE_SIZE)
767 left = faddr;
768 else if (prev_faddr == faddr + PAGE_SIZE)
769 left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
770 else
771 left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
772 right = left + (win << PAGE_SHIFT);
773 if ((long)left < 0)
774 left = 0;
775 *start = max3(left, vma->vm_start, faddr & PMD_MASK);
776 *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
dce08dd2
HY
777
778 return win;
ec560175
HY
779}
780
e9f59873
YS
781/**
782 * swap_vma_readahead - swap in pages in hope we need them soon
ddc1a5cb 783 * @targ_entry: swap entry of the targeted memory
e9f59873 784 * @gfp_mask: memory allocation flags
ddc1a5cb
HD
785 * @mpol: NUMA memory allocation policy to be applied
786 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
e9f59873
YS
787 * @vmf: fault information
788 *
a4575c41 789 * Returns the struct folio for entry and addr, after queueing swapin.
e9f59873 790 *
cb152a1a 791 * Primitive swap readahead code. We simply read in a few pages whose
e9f59873
YS
792 * virtual addresses are around the fault address in the same vma.
793 *
c1e8d7c6 794 * Caller must hold read mmap_lock if vmf->vma is not NULL.
e9f59873
YS
795 *
796 */
a4575c41
MWO
797static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
798 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
ec560175
HY
799{
800 struct blk_plug plug;
5169b844 801 struct swap_iocb *splug = NULL;
96c7b0b4 802 struct folio *folio;
4f8fcf4c 803 pte_t *pte = NULL, pentry;
ba518f4d
HY
804 int win;
805 unsigned long start, end, addr;
ec560175 806 swp_entry_t entry;
ddc1a5cb 807 pgoff_t ilx;
ec560175
HY
808 bool page_allocated;
809
ba518f4d 810 win = swap_vma_ra_win(vmf, &start, &end);
dce08dd2 811 if (win == 1)
ec560175
HY
812 goto skip;
813
ba518f4d 814 ilx = targ_ilx - PFN_DOWN(vmf->address - start);
4f8fcf4c 815
ec560175 816 blk_start_plug(&plug);
ba518f4d 817 for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
4f8fcf4c
HD
818 if (!pte++) {
819 pte = pte_offset_map(vmf->pmd, addr);
820 if (!pte)
821 break;
822 }
823 pentry = ptep_get_lockless(pte);
92bafb20 824 if (!is_swap_pte(pentry))
ec560175
HY
825 continue;
826 entry = pte_to_swp_entry(pentry);
827 if (unlikely(non_swap_entry(entry)))
828 continue;
4f8fcf4c
HD
829 pte_unmap(pte);
830 pte = NULL;
96c7b0b4 831 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
a65b0e76 832 &page_allocated, false);
96c7b0b4 833 if (!folio)
ec560175
HY
834 continue;
835 if (page_allocated) {
b2d1f38b 836 swap_read_folio(folio, &splug);
ba518f4d 837 if (addr != vmf->address) {
96c7b0b4 838 folio_set_readahead(folio);
ec560175
HY
839 count_vm_event(SWAP_RA);
840 }
841 }
96c7b0b4 842 folio_put(folio);
ec560175 843 }
4f8fcf4c
HD
844 if (pte)
845 pte_unmap(pte);
ec560175 846 blk_finish_plug(&plug);
5169b844 847 swap_read_unplug(splug);
ec560175
HY
848 lru_add_drain();
849skip:
96c7b0b4
MWO
850 /* The folio was likely read above, so no need for plugging here */
851 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
a65b0e76 852 &page_allocated, false);
16e96ba5
NP
853 if (unlikely(page_allocated)) {
854 zswap_folio_swapin(folio);
b2d1f38b 855 swap_read_folio(folio, NULL);
16e96ba5 856 }
a4575c41 857 return folio;
ec560175 858}
d9bfcfdc 859
e9e9b7ec
MK
860/**
861 * swapin_readahead - swap in pages in hope we need them soon
862 * @entry: swap entry of this memory
863 * @gfp_mask: memory allocation flags
864 * @vmf: fault information
865 *
866 * Returns the struct page for entry and addr, after queueing swapin.
867 *
868 * It's a main entry function for swap readahead. By the configuration,
869 * it will read ahead blocks by cluster-based(ie, physical disk based)
870 * or vma-based(ie, virtual address based on faulty address) readahead.
871 */
872struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
873 struct vm_fault *vmf)
874{
ddc1a5cb
HD
875 struct mempolicy *mpol;
876 pgoff_t ilx;
a4575c41 877 struct folio *folio;
ddc1a5cb
HD
878
879 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
a4575c41 880 folio = swap_use_vma_readahead() ?
ddc1a5cb
HD
881 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
882 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
883 mpol_cond_put(mpol);
a4575c41
MWO
884
885 if (!folio)
886 return NULL;
887 return folio_file_page(folio, swp_offset(entry));
e9e9b7ec
MK
888}
889
d9bfcfdc
HY
890#ifdef CONFIG_SYSFS
891static ssize_t vma_ra_enabled_show(struct kobject *kobj,
892 struct kobj_attribute *attr, char *buf)
893{
ae7a927d
JP
894 return sysfs_emit(buf, "%s\n",
895 enable_vma_readahead ? "true" : "false");
d9bfcfdc
HY
896}
897static ssize_t vma_ra_enabled_store(struct kobject *kobj,
898 struct kobj_attribute *attr,
899 const char *buf, size_t count)
900{
717aeab4
JG
901 ssize_t ret;
902
903 ret = kstrtobool(buf, &enable_vma_readahead);
904 if (ret)
905 return ret;
d9bfcfdc
HY
906
907 return count;
908}
6106b93e 909static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
d9bfcfdc 910
d9bfcfdc
HY
911static struct attribute *swap_attrs[] = {
912 &vma_ra_enabled_attr.attr,
d9bfcfdc
HY
913 NULL,
914};
915
e48333b6 916static const struct attribute_group swap_attr_group = {
d9bfcfdc
HY
917 .attrs = swap_attrs,
918};
919
920static int __init swap_init_sysfs(void)
921{
922 int err;
923 struct kobject *swap_kobj;
924
925 swap_kobj = kobject_create_and_add("swap", mm_kobj);
926 if (!swap_kobj) {
927 pr_err("failed to create swap kobject\n");
928 return -ENOMEM;
929 }
930 err = sysfs_create_group(swap_kobj, &swap_attr_group);
931 if (err) {
932 pr_err("failed to register swap group\n");
933 goto delete_obj;
934 }
935 return 0;
936
937delete_obj:
938 kobject_put(swap_kobj);
939 return err;
940}
941subsys_initcall(swap_init_sysfs);
942#endif