Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/mm/swap_state.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
6 | * Swap reorganised 29.12.95, Stephen Tweedie | |
7 | * | |
8 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie | |
9 | */ | |
1da177e4 | 10 | #include <linux/mm.h> |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
1da177e4 | 12 | #include <linux/kernel_stat.h> |
ddc1a5cb | 13 | #include <linux/mempolicy.h> |
1da177e4 | 14 | #include <linux/swap.h> |
46017e95 | 15 | #include <linux/swapops.h> |
1da177e4 LT |
16 | #include <linux/init.h> |
17 | #include <linux/pagemap.h> | |
4907e80b | 18 | #include <linux/pagevec.h> |
1da177e4 | 19 | #include <linux/backing-dev.h> |
3fb5c298 | 20 | #include <linux/blkdev.h> |
b20a3503 | 21 | #include <linux/migrate.h> |
4b3ef9da | 22 | #include <linux/vmalloc.h> |
38d8b4e6 | 23 | #include <linux/huge_mm.h> |
61ef1865 | 24 | #include <linux/shmem_fs.h> |
243bce09 | 25 | #include "internal.h" |
014bb1de | 26 | #include "swap.h" |
1da177e4 LT |
27 | |
28 | /* | |
29 | * swapper_space is a fiction, retained to simplify the path through | |
0ba5e806 | 30 | * vmscan's shrink_folio_list. |
1da177e4 | 31 | */ |
f5e54d6e | 32 | static const struct address_space_operations swap_aops = { |
4c4a7634 | 33 | .dirty_folio = noop_dirty_folio, |
1c93923c | 34 | #ifdef CONFIG_MIGRATION |
54184650 | 35 | .migrate_folio = migrate_folio, |
1c93923c | 36 | #endif |
1da177e4 LT |
37 | }; |
38 | ||
783cb68e CD |
39 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
40 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; | |
f5c754d6 | 41 | static bool enable_vma_readahead __read_mostly = true; |
ec560175 | 42 | |
dce08dd2 HY |
43 | #define SWAP_RA_ORDER_CEILING 5 |
44 | ||
ec560175 HY |
45 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
46 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | |
47 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK | |
48 | #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) | |
49 | ||
50 | #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) | |
51 | #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) | |
52 | #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) | |
53 | ||
54 | #define SWAP_RA_VAL(addr, win, hits) \ | |
55 | (((addr) & PAGE_MASK) | \ | |
56 | (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ | |
57 | ((hits) & SWAP_RA_HITS_MASK)) | |
58 | ||
59 | /* Initial readahead hits is 4 to start up with a small window */ | |
60 | #define GET_SWAP_RA_VAL(vma) \ | |
61 | (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) | |
1da177e4 | 62 | |
579f8290 SL |
63 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
64 | ||
1da177e4 LT |
65 | void show_swap_cache_info(void) |
66 | { | |
33806f06 | 67 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
3cb8eaa4 Z |
68 | printk("Free swap = %ldkB\n", K(get_nr_swap_pages())); |
69 | printk("Total swap = %lukB\n", K(total_swap_pages)); | |
1da177e4 LT |
70 | } |
71 | ||
aae466b0 JK |
72 | void *get_shadow_from_swap_cache(swp_entry_t entry) |
73 | { | |
74 | struct address_space *address_space = swap_address_space(entry); | |
7aad25b4 | 75 | pgoff_t idx = swap_cache_index(entry); |
4c773a44 | 76 | void *shadow; |
aae466b0 | 77 | |
4c773a44 MWO |
78 | shadow = xa_load(&address_space->i_pages, idx); |
79 | if (xa_is_value(shadow)) | |
80 | return shadow; | |
aae466b0 JK |
81 | return NULL; |
82 | } | |
83 | ||
1da177e4 | 84 | /* |
2bb876b5 | 85 | * add_to_swap_cache resembles filemap_add_folio on swapper_space, |
81fe88a9 | 86 | * but sets SwapCache flag and 'swap' instead of mapping and index. |
1da177e4 | 87 | */ |
a4c366f0 | 88 | int add_to_swap_cache(struct folio *folio, swp_entry_t entry, |
3852f676 | 89 | gfp_t gfp, void **shadowp) |
1da177e4 | 90 | { |
8d93b41c | 91 | struct address_space *address_space = swap_address_space(entry); |
7aad25b4 | 92 | pgoff_t idx = swap_cache_index(entry); |
a4c366f0 MWO |
93 | XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); |
94 | unsigned long i, nr = folio_nr_pages(folio); | |
3852f676 | 95 | void *old; |
1da177e4 | 96 | |
5649d113 YY |
97 | xas_set_update(&xas, workingset_update_node); |
98 | ||
a4c366f0 MWO |
99 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
100 | VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); | |
101 | VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); | |
51726b12 | 102 | |
a4c366f0 MWO |
103 | folio_ref_add(folio, nr); |
104 | folio_set_swapcache(folio); | |
3d2c9087 | 105 | folio->swap = entry; |
31a56396 | 106 | |
8d93b41c MW |
107 | do { |
108 | xas_lock_irq(&xas); | |
109 | xas_create_range(&xas); | |
110 | if (xas_error(&xas)) | |
111 | goto unlock; | |
112 | for (i = 0; i < nr; i++) { | |
a4c366f0 | 113 | VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); |
e5b306a0 KS |
114 | if (shadowp) { |
115 | old = xas_load(&xas); | |
116 | if (xa_is_value(old)) | |
3852f676 JK |
117 | *shadowp = old; |
118 | } | |
a4c366f0 | 119 | xas_store(&xas, folio); |
8d93b41c MW |
120 | xas_next(&xas); |
121 | } | |
38d8b4e6 | 122 | address_space->nrpages += nr; |
a4c366f0 MWO |
123 | __node_stat_mod_folio(folio, NR_FILE_PAGES, nr); |
124 | __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr); | |
8d93b41c MW |
125 | unlock: |
126 | xas_unlock_irq(&xas); | |
127 | } while (xas_nomem(&xas, gfp)); | |
31a56396 | 128 | |
8d93b41c MW |
129 | if (!xas_error(&xas)) |
130 | return 0; | |
31a56396 | 131 | |
a4c366f0 MWO |
132 | folio_clear_swapcache(folio); |
133 | folio_ref_sub(folio, nr); | |
8d93b41c | 134 | return xas_error(&xas); |
1da177e4 LT |
135 | } |
136 | ||
1da177e4 | 137 | /* |
ceff9d33 | 138 | * This must be called only on folios that have |
1da177e4 LT |
139 | * been verified to be in the swap cache. |
140 | */ | |
ceff9d33 | 141 | void __delete_from_swap_cache(struct folio *folio, |
3852f676 | 142 | swp_entry_t entry, void *shadow) |
1da177e4 | 143 | { |
4e17ec25 | 144 | struct address_space *address_space = swap_address_space(entry); |
ceff9d33 MWO |
145 | int i; |
146 | long nr = folio_nr_pages(folio); | |
7aad25b4 | 147 | pgoff_t idx = swap_cache_index(entry); |
4e17ec25 | 148 | XA_STATE(xas, &address_space->i_pages, idx); |
33806f06 | 149 | |
5649d113 YY |
150 | xas_set_update(&xas, workingset_update_node); |
151 | ||
ceff9d33 MWO |
152 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
153 | VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); | |
154 | VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); | |
1da177e4 | 155 | |
38d8b4e6 | 156 | for (i = 0; i < nr; i++) { |
3852f676 | 157 | void *entry = xas_store(&xas, shadow); |
b9eb7776 | 158 | VM_BUG_ON_PAGE(entry != folio, entry); |
4e17ec25 | 159 | xas_next(&xas); |
38d8b4e6 | 160 | } |
3d2c9087 | 161 | folio->swap.val = 0; |
ceff9d33 | 162 | folio_clear_swapcache(folio); |
38d8b4e6 | 163 | address_space->nrpages -= nr; |
ceff9d33 MWO |
164 | __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); |
165 | __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); | |
1da177e4 LT |
166 | } |
167 | ||
1da177e4 | 168 | /* |
75fa68a5 | 169 | * This must be called only on folios that have |
1da177e4 | 170 | * been verified to be in the swap cache and locked. |
75fa68a5 MWO |
171 | * It will never put the folio into the free list, |
172 | * the caller has a reference on the folio. | |
1da177e4 | 173 | */ |
75fa68a5 | 174 | void delete_from_swap_cache(struct folio *folio) |
1da177e4 | 175 | { |
3d2c9087 | 176 | swp_entry_t entry = folio->swap; |
4e17ec25 | 177 | struct address_space *address_space = swap_address_space(entry); |
1da177e4 | 178 | |
b93b0163 | 179 | xa_lock_irq(&address_space->i_pages); |
ceff9d33 | 180 | __delete_from_swap_cache(folio, entry, NULL); |
b93b0163 | 181 | xa_unlock_irq(&address_space->i_pages); |
1da177e4 | 182 | |
4081f744 | 183 | put_swap_folio(folio, entry); |
75fa68a5 | 184 | folio_ref_sub(folio, folio_nr_pages(folio)); |
1da177e4 LT |
185 | } |
186 | ||
3852f676 JK |
187 | void clear_shadow_from_swap_cache(int type, unsigned long begin, |
188 | unsigned long end) | |
189 | { | |
190 | unsigned long curr = begin; | |
191 | void *old; | |
192 | ||
193 | for (;;) { | |
3852f676 | 194 | swp_entry_t entry = swp_entry(type, curr); |
7aad25b4 | 195 | unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK; |
3852f676 | 196 | struct address_space *address_space = swap_address_space(entry); |
7aad25b4 | 197 | XA_STATE(xas, &address_space->i_pages, index); |
3852f676 | 198 | |
5649d113 YY |
199 | xas_set_update(&xas, workingset_update_node); |
200 | ||
3852f676 | 201 | xa_lock_irq(&address_space->i_pages); |
7aad25b4 | 202 | xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) { |
3852f676 JK |
203 | if (!xa_is_value(old)) |
204 | continue; | |
205 | xas_store(&xas, NULL); | |
3852f676 | 206 | } |
3852f676 JK |
207 | xa_unlock_irq(&address_space->i_pages); |
208 | ||
209 | /* search the next swapcache until we meet end */ | |
cd57a3fb | 210 | curr = ALIGN((curr + 1), SWAP_ADDRESS_SPACE_PAGES); |
3852f676 JK |
211 | if (curr > end) |
212 | break; | |
213 | } | |
214 | } | |
215 | ||
c33c7948 RR |
216 | /* |
217 | * If we are the only user, then try to free up the swap cache. | |
218 | * | |
aedd74d4 | 219 | * Its ok to check the swapcache flag without the folio lock |
a2c43eed | 220 | * here because we are going to recheck again inside |
aedd74d4 | 221 | * folio_free_swap() _with_ the lock. |
1da177e4 LT |
222 | * - Marcelo |
223 | */ | |
63b77499 | 224 | void free_swap_cache(struct folio *folio) |
1da177e4 | 225 | { |
aedd74d4 MWO |
226 | if (folio_test_swapcache(folio) && !folio_mapped(folio) && |
227 | folio_trylock(folio)) { | |
228 | folio_free_swap(folio); | |
229 | folio_unlock(folio); | |
1da177e4 LT |
230 | } |
231 | } | |
232 | ||
c33c7948 | 233 | /* |
06340b92 FN |
234 | * Freeing a folio and also freeing any swap cache associated with |
235 | * this folio if it is the last user. | |
1da177e4 | 236 | */ |
06340b92 | 237 | void free_folio_and_swap_cache(struct folio *folio) |
1da177e4 | 238 | { |
63b77499 | 239 | free_swap_cache(folio); |
5beaee54 | 240 | if (!is_huge_zero_folio(folio)) |
63b77499 | 241 | folio_put(folio); |
1da177e4 LT |
242 | } |
243 | ||
244 | /* | |
245 | * Passed an array of pages, drop them all from swapcache and then release | |
246 | * them. They are removed from the LRU and freed if this is their last use. | |
247 | */ | |
7cc8f9c7 | 248 | void free_pages_and_swap_cache(struct encoded_page **pages, int nr) |
1da177e4 | 249 | { |
4907e80b MWO |
250 | struct folio_batch folios; |
251 | unsigned int refs[PAGEVEC_SIZE]; | |
252 | ||
4907e80b | 253 | folio_batch_init(&folios); |
d7f861b9 | 254 | for (int i = 0; i < nr; i++) { |
4907e80b | 255 | struct folio *folio = page_folio(encoded_page_ptr(pages[i])); |
d7f861b9 | 256 | |
63b77499 | 257 | free_swap_cache(folio); |
4907e80b | 258 | refs[folios.nr] = 1; |
d7f861b9 DH |
259 | if (unlikely(encoded_page_flags(pages[i]) & |
260 | ENCODED_PAGE_BIT_NR_PAGES_NEXT)) | |
4907e80b | 261 | refs[folios.nr] = encoded_nr_pages(pages[++i]); |
d7f861b9 | 262 | |
4907e80b MWO |
263 | if (folio_batch_add(&folios, folio) == 0) |
264 | folios_put_refs(&folios, refs); | |
d7f861b9 | 265 | } |
4907e80b MWO |
266 | if (folios.nr) |
267 | folios_put_refs(&folios, refs); | |
1da177e4 LT |
268 | } |
269 | ||
e9e9b7ec MK |
270 | static inline bool swap_use_vma_readahead(void) |
271 | { | |
272 | return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); | |
273 | } | |
274 | ||
1da177e4 | 275 | /* |
c9edc242 | 276 | * Lookup a swap entry in the swap cache. A found folio will be returned |
1da177e4 | 277 | * unlocked and with its refcount incremented - we rely on the kernel |
c9edc242 | 278 | * lock getting page table operations atomic even if we drop the folio |
1da177e4 | 279 | * lock before returning. |
cbc2bd98 KS |
280 | * |
281 | * Caller must lock the swap device or hold a reference to keep it valid. | |
1da177e4 | 282 | */ |
c9edc242 MWO |
283 | struct folio *swap_cache_get_folio(swp_entry_t entry, |
284 | struct vm_area_struct *vma, unsigned long addr) | |
1da177e4 | 285 | { |
c9edc242 | 286 | struct folio *folio; |
1da177e4 | 287 | |
7aad25b4 | 288 | folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry)); |
66dabbb6 | 289 | if (!IS_ERR(folio)) { |
eaf649eb MK |
290 | bool vma_ra = swap_use_vma_readahead(); |
291 | bool readahead; | |
292 | ||
eaf649eb MK |
293 | /* |
294 | * At the moment, we don't support PG_readahead for anon THP | |
295 | * so let's bail out rather than confusing the readahead stat. | |
296 | */ | |
c9edc242 MWO |
297 | if (unlikely(folio_test_large(folio))) |
298 | return folio; | |
eaf649eb | 299 | |
c9edc242 | 300 | readahead = folio_test_clear_readahead(folio); |
eaf649eb MK |
301 | if (vma && vma_ra) { |
302 | unsigned long ra_val; | |
303 | int win, hits; | |
304 | ||
305 | ra_val = GET_SWAP_RA_VAL(vma); | |
306 | win = SWAP_RA_WIN(ra_val); | |
307 | hits = SWAP_RA_HITS(ra_val); | |
ec560175 HY |
308 | if (readahead) |
309 | hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); | |
310 | atomic_long_set(&vma->swap_readahead_info, | |
311 | SWAP_RA_VAL(addr, win, hits)); | |
312 | } | |
eaf649eb | 313 | |
ec560175 | 314 | if (readahead) { |
cbc65df2 | 315 | count_vm_event(SWAP_RA_HIT); |
eaf649eb | 316 | if (!vma || !vma_ra) |
ec560175 | 317 | atomic_inc(&swapin_readahead_hits); |
cbc65df2 | 318 | } |
66dabbb6 CH |
319 | } else { |
320 | folio = NULL; | |
579f8290 | 321 | } |
eaf649eb | 322 | |
c9edc242 MWO |
323 | return folio; |
324 | } | |
325 | ||
61ef1865 | 326 | /** |
524984ff | 327 | * filemap_get_incore_folio - Find and get a folio from the page or swap caches. |
61ef1865 MWO |
328 | * @mapping: The address_space to search. |
329 | * @index: The page cache index. | |
330 | * | |
524984ff MWO |
331 | * This differs from filemap_get_folio() in that it will also look for the |
332 | * folio in the swap cache. | |
61ef1865 | 333 | * |
524984ff | 334 | * Return: The found folio or %NULL. |
61ef1865 | 335 | */ |
524984ff MWO |
336 | struct folio *filemap_get_incore_folio(struct address_space *mapping, |
337 | pgoff_t index) | |
61ef1865 MWO |
338 | { |
339 | swp_entry_t swp; | |
340 | struct swap_info_struct *si; | |
097b3e59 | 341 | struct folio *folio = filemap_get_entry(mapping, index); |
61ef1865 | 342 | |
66dabbb6 CH |
343 | if (!folio) |
344 | return ERR_PTR(-ENOENT); | |
dd8095b1 | 345 | if (!xa_is_value(folio)) |
66dabbb6 | 346 | return folio; |
61ef1865 | 347 | if (!shmem_mapping(mapping)) |
66dabbb6 | 348 | return ERR_PTR(-ENOENT); |
61ef1865 | 349 | |
dd8095b1 | 350 | swp = radix_to_swp_entry(folio); |
ba6851b4 ML |
351 | /* There might be swapin error entries in shmem mapping. */ |
352 | if (non_swap_entry(swp)) | |
66dabbb6 | 353 | return ERR_PTR(-ENOENT); |
61ef1865 MWO |
354 | /* Prevent swapoff from happening to us */ |
355 | si = get_swap_device(swp); | |
356 | if (!si) | |
66dabbb6 | 357 | return ERR_PTR(-ENOENT); |
7aad25b4 | 358 | index = swap_cache_index(swp); |
dd8095b1 | 359 | folio = filemap_get_folio(swap_address_space(swp), index); |
61ef1865 | 360 | put_swap_device(si); |
524984ff | 361 | return folio; |
61ef1865 MWO |
362 | } |
363 | ||
96c7b0b4 MWO |
364 | struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
365 | struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, | |
366 | bool skip_if_exists) | |
1da177e4 | 367 | { |
78524b05 | 368 | struct swap_info_struct *si = swp_swap_info(entry); |
a0d3374b | 369 | struct folio *folio; |
1d344030 ZL |
370 | struct folio *new_folio = NULL; |
371 | struct folio *result = NULL; | |
aae466b0 | 372 | void *shadow = NULL; |
4c6355b2 | 373 | |
5b999aad | 374 | *new_page_allocated = false; |
4c6355b2 JW |
375 | for (;;) { |
376 | int err; | |
1da177e4 LT |
377 | /* |
378 | * First check the swap cache. Since this is normally | |
cb691e2f | 379 | * called after swap_cache_get_folio() failed, re-calling |
1da177e4 LT |
380 | * that would confuse statistics. |
381 | */ | |
a0d3374b | 382 | folio = filemap_get_folio(swap_address_space(entry), |
7aad25b4 | 383 | swap_cache_index(entry)); |
96c7b0b4 MWO |
384 | if (!IS_ERR(folio)) |
385 | goto got_folio; | |
1da177e4 | 386 | |
ba81f838 HY |
387 | /* |
388 | * Just skip read ahead for unused swap slot. | |
ba81f838 | 389 | */ |
0ff67f99 | 390 | if (!swap_entry_swapped(si, entry)) |
1d344030 | 391 | goto put_and_return; |
e8c26ab6 | 392 | |
1da177e4 | 393 | /* |
1d344030 ZL |
394 | * Get a new folio to read into from swap. Allocate it now if |
395 | * new_folio not exist, before marking swap_map SWAP_HAS_CACHE, | |
396 | * when -EEXIST will cause any racers to loop around until we | |
397 | * add it to cache. | |
1da177e4 | 398 | */ |
1d344030 ZL |
399 | if (!new_folio) { |
400 | new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); | |
401 | if (!new_folio) | |
402 | goto put_and_return; | |
403 | } | |
1da177e4 | 404 | |
f000944d HD |
405 | /* |
406 | * Swap entry may have been freed since our caller observed it. | |
407 | */ | |
9f101bef | 408 | err = swapcache_prepare(entry, 1); |
4c6355b2 | 409 | if (!err) |
f000944d | 410 | break; |
1d344030 ZL |
411 | else if (err != -EEXIST) |
412 | goto put_and_return; | |
4c6355b2 | 413 | |
a65b0e76 DC |
414 | /* |
415 | * Protect against a recursive call to __read_swap_cache_async() | |
416 | * on the same entry waiting forever here because SWAP_HAS_CACHE | |
417 | * is set but the folio is not the swap cache yet. This can | |
418 | * happen today if mem_cgroup_swapin_charge_folio() below | |
419 | * triggers reclaim through zswap, which may call | |
420 | * __read_swap_cache_async() in the writeback path. | |
421 | */ | |
422 | if (skip_if_exists) | |
1d344030 | 423 | goto put_and_return; |
a65b0e76 | 424 | |
2ca4532a | 425 | /* |
4c6355b2 JW |
426 | * We might race against __delete_from_swap_cache(), and |
427 | * stumble across a swap_map entry whose SWAP_HAS_CACHE | |
428 | * has not yet been cleared. Or race against another | |
429 | * __read_swap_cache_async(), which has set SWAP_HAS_CACHE | |
96c7b0b4 | 430 | * in swap_map, but not yet added its folio to swap cache. |
2ca4532a | 431 | */ |
029c4628 | 432 | schedule_timeout_uninterruptible(1); |
4c6355b2 JW |
433 | } |
434 | ||
435 | /* | |
96c7b0b4 | 436 | * The swap entry is ours to swap in. Prepare the new folio. |
4c6355b2 | 437 | */ |
1d344030 ZL |
438 | __folio_set_locked(new_folio); |
439 | __folio_set_swapbacked(new_folio); | |
4c6355b2 | 440 | |
1d344030 | 441 | if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) |
4c6355b2 | 442 | goto fail_unlock; |
4c6355b2 | 443 | |
0add0c77 | 444 | /* May fail (-ENOMEM) if XArray node allocation failed. */ |
1d344030 | 445 | if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) |
4c6355b2 | 446 | goto fail_unlock; |
0add0c77 | 447 | |
89ce924f | 448 | memcg1_swapin(entry, 1); |
4c6355b2 | 449 | |
aae466b0 | 450 | if (shadow) |
1d344030 | 451 | workingset_refault(new_folio, shadow); |
314b57fb | 452 | |
1d344030 ZL |
453 | /* Caller will initiate read into locked new_folio */ |
454 | folio_add_lru(new_folio); | |
4c6355b2 | 455 | *new_page_allocated = true; |
1d344030 | 456 | folio = new_folio; |
96c7b0b4 | 457 | got_folio: |
1d344030 ZL |
458 | result = folio; |
459 | goto put_and_return; | |
1da177e4 | 460 | |
4c6355b2 | 461 | fail_unlock: |
1d344030 ZL |
462 | put_swap_folio(new_folio, entry); |
463 | folio_unlock(new_folio); | |
464 | put_and_return: | |
1d344030 ZL |
465 | if (!(*new_page_allocated) && new_folio) |
466 | folio_put(new_folio); | |
467 | return result; | |
1da177e4 | 468 | } |
46017e95 | 469 | |
5b999aad DS |
470 | /* |
471 | * Locate a page of swap in physical memory, reserving swap cache space | |
472 | * and reading the disk if it is not already cached. | |
473 | * A failure return means that either the page allocation failed or that | |
474 | * the swap entry is no longer in use. | |
46a774d3 HY |
475 | * |
476 | * get/put_swap_device() aren't needed to call this function, because | |
c9bdf768 | 477 | * __read_swap_cache_async() call them and swap_read_folio() holds the |
46a774d3 | 478 | * swap cache folio lock. |
5b999aad | 479 | */ |
6e03492e MWO |
480 | struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
481 | struct vm_area_struct *vma, unsigned long addr, | |
482 | struct swap_iocb **plug) | |
5b999aad | 483 | { |
78524b05 | 484 | struct swap_info_struct *si; |
ddc1a5cb HD |
485 | bool page_allocated; |
486 | struct mempolicy *mpol; | |
487 | pgoff_t ilx; | |
96c7b0b4 | 488 | struct folio *folio; |
5b999aad | 489 | |
78524b05 KS |
490 | si = get_swap_device(entry); |
491 | if (!si) | |
492 | return NULL; | |
493 | ||
ddc1a5cb | 494 | mpol = get_vma_policy(vma, addr, 0, &ilx); |
96c7b0b4 | 495 | folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
a65b0e76 | 496 | &page_allocated, false); |
ddc1a5cb | 497 | mpol_cond_put(mpol); |
5b999aad | 498 | |
ddc1a5cb | 499 | if (page_allocated) |
b2d1f38b | 500 | swap_read_folio(folio, plug); |
78524b05 KS |
501 | |
502 | put_swap_device(si); | |
6e03492e | 503 | return folio; |
5b999aad DS |
504 | } |
505 | ||
ec560175 HY |
506 | static unsigned int __swapin_nr_pages(unsigned long prev_offset, |
507 | unsigned long offset, | |
508 | int hits, | |
509 | int max_pages, | |
510 | int prev_win) | |
579f8290 | 511 | { |
ec560175 | 512 | unsigned int pages, last_ra; |
579f8290 SL |
513 | |
514 | /* | |
515 | * This heuristic has been found to work well on both sequential and | |
516 | * random loads, swapping to hard disk or to SSD: please don't ask | |
517 | * what the "+ 2" means, it just happens to work well, that's all. | |
518 | */ | |
ec560175 | 519 | pages = hits + 2; |
579f8290 SL |
520 | if (pages == 2) { |
521 | /* | |
522 | * We can have no readahead hits to judge by: but must not get | |
523 | * stuck here forever, so check for an adjacent offset instead | |
524 | * (and don't even bother to check whether swap type is same). | |
525 | */ | |
526 | if (offset != prev_offset + 1 && offset != prev_offset - 1) | |
527 | pages = 1; | |
579f8290 SL |
528 | } else { |
529 | unsigned int roundup = 4; | |
530 | while (roundup < pages) | |
531 | roundup <<= 1; | |
532 | pages = roundup; | |
533 | } | |
534 | ||
535 | if (pages > max_pages) | |
536 | pages = max_pages; | |
537 | ||
538 | /* Don't shrink readahead too fast */ | |
ec560175 | 539 | last_ra = prev_win / 2; |
579f8290 SL |
540 | if (pages < last_ra) |
541 | pages = last_ra; | |
ec560175 HY |
542 | |
543 | return pages; | |
544 | } | |
545 | ||
546 | static unsigned long swapin_nr_pages(unsigned long offset) | |
547 | { | |
548 | static unsigned long prev_offset; | |
549 | unsigned int hits, pages, max_pages; | |
550 | static atomic_t last_readahead_pages; | |
551 | ||
552 | max_pages = 1 << READ_ONCE(page_cluster); | |
553 | if (max_pages <= 1) | |
554 | return 1; | |
555 | ||
556 | hits = atomic_xchg(&swapin_readahead_hits, 0); | |
d6c1f098 QC |
557 | pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
558 | max_pages, | |
ec560175 HY |
559 | atomic_read(&last_readahead_pages)); |
560 | if (!hits) | |
d6c1f098 | 561 | WRITE_ONCE(prev_offset, offset); |
579f8290 SL |
562 | atomic_set(&last_readahead_pages, pages); |
563 | ||
564 | return pages; | |
565 | } | |
566 | ||
46017e95 | 567 | /** |
e9e9b7ec | 568 | * swap_cluster_readahead - swap in pages in hope we need them soon |
46017e95 | 569 | * @entry: swap entry of this memory |
7682486b | 570 | * @gfp_mask: memory allocation flags |
ddc1a5cb HD |
571 | * @mpol: NUMA memory allocation policy to be applied |
572 | * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE | |
46017e95 | 573 | * |
a4575c41 | 574 | * Returns the struct folio for entry and addr, after queueing swapin. |
46017e95 HD |
575 | * |
576 | * Primitive swap readahead code. We simply read an aligned block of | |
577 | * (1 << page_cluster) entries in the swap area. This method is chosen | |
578 | * because it doesn't cost us any seek time. We also make sure to queue | |
579 | * the 'original' request together with the readahead ones... | |
580 | * | |
ddc1a5cb HD |
581 | * Note: it is intentional that the same NUMA policy and interleave index |
582 | * are used for every page of the readahead: neighbouring pages on swap | |
583 | * are fairly likely to have been swapped out from the same node. | |
46017e95 | 584 | */ |
a4575c41 | 585 | struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
ddc1a5cb | 586 | struct mempolicy *mpol, pgoff_t ilx) |
46017e95 | 587 | { |
96c7b0b4 | 588 | struct folio *folio; |
579f8290 SL |
589 | unsigned long entry_offset = swp_offset(entry); |
590 | unsigned long offset = entry_offset; | |
67f96aa2 | 591 | unsigned long start_offset, end_offset; |
579f8290 | 592 | unsigned long mask; |
e9a6effa | 593 | struct swap_info_struct *si = swp_swap_info(entry); |
3fb5c298 | 594 | struct blk_plug plug; |
5169b844 | 595 | struct swap_iocb *splug = NULL; |
b243dcbf | 596 | bool page_allocated; |
46017e95 | 597 | |
579f8290 SL |
598 | mask = swapin_nr_pages(offset) - 1; |
599 | if (!mask) | |
600 | goto skip; | |
601 | ||
67f96aa2 RR |
602 | /* Read a page_cluster sized and aligned cluster around offset. */ |
603 | start_offset = offset & ~mask; | |
604 | end_offset = offset | mask; | |
605 | if (!start_offset) /* First page is swap header. */ | |
606 | start_offset++; | |
e9a6effa HY |
607 | if (end_offset >= si->max) |
608 | end_offset = si->max - 1; | |
67f96aa2 | 609 | |
3fb5c298 | 610 | blk_start_plug(&plug); |
67f96aa2 | 611 | for (offset = start_offset; offset <= end_offset ; offset++) { |
46017e95 | 612 | /* Ok, do the async read-ahead now */ |
96c7b0b4 | 613 | folio = __read_swap_cache_async( |
ddc1a5cb | 614 | swp_entry(swp_type(entry), offset), |
a65b0e76 | 615 | gfp_mask, mpol, ilx, &page_allocated, false); |
96c7b0b4 | 616 | if (!folio) |
67f96aa2 | 617 | continue; |
c4fa6309 | 618 | if (page_allocated) { |
b2d1f38b | 619 | swap_read_folio(folio, &splug); |
eaf649eb | 620 | if (offset != entry_offset) { |
96c7b0b4 | 621 | folio_set_readahead(folio); |
c4fa6309 HY |
622 | count_vm_event(SWAP_RA); |
623 | } | |
cbc65df2 | 624 | } |
96c7b0b4 | 625 | folio_put(folio); |
46017e95 | 626 | } |
3fb5c298 | 627 | blk_finish_plug(&plug); |
5169b844 | 628 | swap_read_unplug(splug); |
46017e95 | 629 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
579f8290 | 630 | skip: |
5169b844 | 631 | /* The page was likely read above, so no need for plugging here */ |
96c7b0b4 | 632 | folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
a65b0e76 | 633 | &page_allocated, false); |
0e400844 | 634 | if (unlikely(page_allocated)) |
b2d1f38b | 635 | swap_read_folio(folio, NULL); |
a4575c41 | 636 | return folio; |
46017e95 | 637 | } |
4b3ef9da HY |
638 | |
639 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) | |
640 | { | |
641 | struct address_space *spaces, *space; | |
642 | unsigned int i, nr; | |
643 | ||
644 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); | |
778e1cdd | 645 | spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); |
4b3ef9da HY |
646 | if (!spaces) |
647 | return -ENOMEM; | |
648 | for (i = 0; i < nr; i++) { | |
649 | space = spaces + i; | |
a2833486 | 650 | xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); |
4b3ef9da HY |
651 | atomic_set(&space->i_mmap_writable, 0); |
652 | space->a_ops = &swap_aops; | |
653 | /* swap cache doesn't use writeback related tags */ | |
654 | mapping_set_no_writeback_tags(space); | |
4b3ef9da HY |
655 | } |
656 | nr_swapper_spaces[type] = nr; | |
054f1d1f | 657 | swapper_spaces[type] = spaces; |
4b3ef9da HY |
658 | |
659 | return 0; | |
660 | } | |
661 | ||
662 | void exit_swap_address_space(unsigned int type) | |
663 | { | |
eea4a501 HY |
664 | int i; |
665 | struct address_space *spaces = swapper_spaces[type]; | |
666 | ||
667 | for (i = 0; i < nr_swapper_spaces[type]; i++) | |
668 | VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); | |
669 | kvfree(spaces); | |
4b3ef9da | 670 | nr_swapper_spaces[type] = 0; |
054f1d1f | 671 | swapper_spaces[type] = NULL; |
4b3ef9da | 672 | } |
ec560175 | 673 | |
ba518f4d HY |
674 | static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start, |
675 | unsigned long *end) | |
ec560175 HY |
676 | { |
677 | struct vm_area_struct *vma = vmf->vma; | |
eaf649eb | 678 | unsigned long ra_val; |
ba518f4d | 679 | unsigned long faddr, prev_faddr, left, right; |
16ba391e | 680 | unsigned int max_win, hits, prev_win, win; |
ec560175 | 681 | |
ba518f4d | 682 | max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); |
dce08dd2 HY |
683 | if (max_win == 1) |
684 | return 1; | |
61b63972 | 685 | |
ec560175 | 686 | faddr = vmf->address; |
eaf649eb | 687 | ra_val = GET_SWAP_RA_VAL(vma); |
ba518f4d | 688 | prev_faddr = SWAP_RA_ADDR(ra_val); |
eaf649eb MK |
689 | prev_win = SWAP_RA_WIN(ra_val); |
690 | hits = SWAP_RA_HITS(ra_val); | |
ba518f4d HY |
691 | win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits, |
692 | max_win, prev_win); | |
693 | atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); | |
18ad72f5 | 694 | if (win == 1) |
dce08dd2 | 695 | return 1; |
ec560175 | 696 | |
ba518f4d HY |
697 | if (faddr == prev_faddr + PAGE_SIZE) |
698 | left = faddr; | |
699 | else if (prev_faddr == faddr + PAGE_SIZE) | |
700 | left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE; | |
701 | else | |
702 | left = faddr - (((win - 1) / 2) << PAGE_SHIFT); | |
703 | right = left + (win << PAGE_SHIFT); | |
704 | if ((long)left < 0) | |
705 | left = 0; | |
706 | *start = max3(left, vma->vm_start, faddr & PMD_MASK); | |
707 | *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE); | |
dce08dd2 HY |
708 | |
709 | return win; | |
ec560175 HY |
710 | } |
711 | ||
e9f59873 YS |
712 | /** |
713 | * swap_vma_readahead - swap in pages in hope we need them soon | |
ddc1a5cb | 714 | * @targ_entry: swap entry of the targeted memory |
e9f59873 | 715 | * @gfp_mask: memory allocation flags |
ddc1a5cb HD |
716 | * @mpol: NUMA memory allocation policy to be applied |
717 | * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE | |
e9f59873 YS |
718 | * @vmf: fault information |
719 | * | |
a4575c41 | 720 | * Returns the struct folio for entry and addr, after queueing swapin. |
e9f59873 | 721 | * |
cb152a1a | 722 | * Primitive swap readahead code. We simply read in a few pages whose |
e9f59873 YS |
723 | * virtual addresses are around the fault address in the same vma. |
724 | * | |
c1e8d7c6 | 725 | * Caller must hold read mmap_lock if vmf->vma is not NULL. |
e9f59873 YS |
726 | * |
727 | */ | |
a4575c41 MWO |
728 | static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, |
729 | struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) | |
ec560175 HY |
730 | { |
731 | struct blk_plug plug; | |
5169b844 | 732 | struct swap_iocb *splug = NULL; |
96c7b0b4 | 733 | struct folio *folio; |
4f8fcf4c | 734 | pte_t *pte = NULL, pentry; |
ba518f4d HY |
735 | int win; |
736 | unsigned long start, end, addr; | |
ec560175 | 737 | swp_entry_t entry; |
ddc1a5cb | 738 | pgoff_t ilx; |
ec560175 HY |
739 | bool page_allocated; |
740 | ||
ba518f4d | 741 | win = swap_vma_ra_win(vmf, &start, &end); |
dce08dd2 | 742 | if (win == 1) |
ec560175 HY |
743 | goto skip; |
744 | ||
ba518f4d | 745 | ilx = targ_ilx - PFN_DOWN(vmf->address - start); |
4f8fcf4c | 746 | |
ec560175 | 747 | blk_start_plug(&plug); |
ba518f4d | 748 | for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) { |
4f8fcf4c HD |
749 | if (!pte++) { |
750 | pte = pte_offset_map(vmf->pmd, addr); | |
751 | if (!pte) | |
752 | break; | |
753 | } | |
754 | pentry = ptep_get_lockless(pte); | |
92bafb20 | 755 | if (!is_swap_pte(pentry)) |
ec560175 HY |
756 | continue; |
757 | entry = pte_to_swp_entry(pentry); | |
758 | if (unlikely(non_swap_entry(entry))) | |
759 | continue; | |
4f8fcf4c HD |
760 | pte_unmap(pte); |
761 | pte = NULL; | |
96c7b0b4 | 762 | folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
a65b0e76 | 763 | &page_allocated, false); |
96c7b0b4 | 764 | if (!folio) |
ec560175 HY |
765 | continue; |
766 | if (page_allocated) { | |
b2d1f38b | 767 | swap_read_folio(folio, &splug); |
ba518f4d | 768 | if (addr != vmf->address) { |
96c7b0b4 | 769 | folio_set_readahead(folio); |
ec560175 HY |
770 | count_vm_event(SWAP_RA); |
771 | } | |
772 | } | |
96c7b0b4 | 773 | folio_put(folio); |
ec560175 | 774 | } |
4f8fcf4c HD |
775 | if (pte) |
776 | pte_unmap(pte); | |
ec560175 | 777 | blk_finish_plug(&plug); |
5169b844 | 778 | swap_read_unplug(splug); |
ec560175 HY |
779 | lru_add_drain(); |
780 | skip: | |
96c7b0b4 MWO |
781 | /* The folio was likely read above, so no need for plugging here */ |
782 | folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx, | |
a65b0e76 | 783 | &page_allocated, false); |
0e400844 | 784 | if (unlikely(page_allocated)) |
b2d1f38b | 785 | swap_read_folio(folio, NULL); |
a4575c41 | 786 | return folio; |
ec560175 | 787 | } |
d9bfcfdc | 788 | |
e9e9b7ec MK |
789 | /** |
790 | * swapin_readahead - swap in pages in hope we need them soon | |
791 | * @entry: swap entry of this memory | |
792 | * @gfp_mask: memory allocation flags | |
793 | * @vmf: fault information | |
794 | * | |
94dc8bff | 795 | * Returns the struct folio for entry and addr, after queueing swapin. |
e9e9b7ec MK |
796 | * |
797 | * It's a main entry function for swap readahead. By the configuration, | |
798 | * it will read ahead blocks by cluster-based(ie, physical disk based) | |
799 | * or vma-based(ie, virtual address based on faulty address) readahead. | |
800 | */ | |
94dc8bff | 801 | struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
e9e9b7ec MK |
802 | struct vm_fault *vmf) |
803 | { | |
ddc1a5cb HD |
804 | struct mempolicy *mpol; |
805 | pgoff_t ilx; | |
a4575c41 | 806 | struct folio *folio; |
ddc1a5cb HD |
807 | |
808 | mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); | |
a4575c41 | 809 | folio = swap_use_vma_readahead() ? |
ddc1a5cb HD |
810 | swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : |
811 | swap_cluster_readahead(entry, gfp_mask, mpol, ilx); | |
812 | mpol_cond_put(mpol); | |
a4575c41 | 813 | |
94dc8bff | 814 | return folio; |
e9e9b7ec MK |
815 | } |
816 | ||
d9bfcfdc HY |
817 | #ifdef CONFIG_SYSFS |
818 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, | |
819 | struct kobj_attribute *attr, char *buf) | |
820 | { | |
d3ea85c6 | 821 | return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead)); |
d9bfcfdc HY |
822 | } |
823 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, | |
824 | struct kobj_attribute *attr, | |
825 | const char *buf, size_t count) | |
826 | { | |
717aeab4 JG |
827 | ssize_t ret; |
828 | ||
829 | ret = kstrtobool(buf, &enable_vma_readahead); | |
830 | if (ret) | |
831 | return ret; | |
d9bfcfdc HY |
832 | |
833 | return count; | |
834 | } | |
6106b93e | 835 | static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); |
d9bfcfdc | 836 | |
d9bfcfdc HY |
837 | static struct attribute *swap_attrs[] = { |
838 | &vma_ra_enabled_attr.attr, | |
d9bfcfdc HY |
839 | NULL, |
840 | }; | |
841 | ||
e48333b6 | 842 | static const struct attribute_group swap_attr_group = { |
d9bfcfdc HY |
843 | .attrs = swap_attrs, |
844 | }; | |
845 | ||
846 | static int __init swap_init_sysfs(void) | |
847 | { | |
848 | int err; | |
849 | struct kobject *swap_kobj; | |
850 | ||
851 | swap_kobj = kobject_create_and_add("swap", mm_kobj); | |
852 | if (!swap_kobj) { | |
853 | pr_err("failed to create swap kobject\n"); | |
854 | return -ENOMEM; | |
855 | } | |
856 | err = sysfs_create_group(swap_kobj, &swap_attr_group); | |
857 | if (err) { | |
858 | pr_err("failed to register swap group\n"); | |
859 | goto delete_obj; | |
860 | } | |
861 | return 0; | |
862 | ||
863 | delete_obj: | |
864 | kobject_put(swap_kobj); | |
865 | return err; | |
866 | } | |
867 | subsys_initcall(swap_init_sysfs); | |
868 | #endif |