| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/mm/swap_state.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 6 | * Swap reorganised 29.12.95, Stephen Tweedie |
| 7 | * |
| 8 | * Rewritten to use page cache, (C) 1998 Stephen Tweedie |
| 9 | */ |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/gfp.h> |
| 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/mempolicy.h> |
| 14 | #include <linux/swap.h> |
| 15 | #include <linux/swapops.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/pagemap.h> |
| 18 | #include <linux/backing-dev.h> |
| 19 | #include <linux/blkdev.h> |
| 20 | #include <linux/migrate.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/swap_slots.h> |
| 23 | #include <linux/huge_mm.h> |
| 24 | #include <linux/shmem_fs.h> |
| 25 | #include "internal.h" |
| 26 | #include "swap.h" |
| 27 | |
| 28 | /* |
| 29 | * swapper_space is a fiction, retained to simplify the path through |
| 30 | * vmscan's shrink_page_list. |
| 31 | */ |
| 32 | static const struct address_space_operations swap_aops = { |
| 33 | .writepage = swap_writepage, |
| 34 | .dirty_folio = noop_dirty_folio, |
| 35 | #ifdef CONFIG_MIGRATION |
| 36 | .migrate_folio = migrate_folio, |
| 37 | #endif |
| 38 | }; |
| 39 | |
| 40 | struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; |
| 41 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; |
| 42 | static bool enable_vma_readahead __read_mostly = true; |
| 43 | |
| 44 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
| 45 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
| 46 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK |
| 47 | #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) |
| 48 | |
| 49 | #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) |
| 50 | #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) |
| 51 | #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) |
| 52 | |
| 53 | #define SWAP_RA_VAL(addr, win, hits) \ |
| 54 | (((addr) & PAGE_MASK) | \ |
| 55 | (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ |
| 56 | ((hits) & SWAP_RA_HITS_MASK)) |
| 57 | |
| 58 | /* Initial readahead hits is 4 to start up with a small window */ |
| 59 | #define GET_SWAP_RA_VAL(vma) \ |
| 60 | (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) |
| 61 | |
| 62 | static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); |
| 63 | |
| 64 | void show_swap_cache_info(void) |
| 65 | { |
| 66 | printk("%lu pages in swap cache\n", total_swapcache_pages()); |
| 67 | printk("Free swap = %ldkB\n", K(get_nr_swap_pages())); |
| 68 | printk("Total swap = %lukB\n", K(total_swap_pages)); |
| 69 | } |
| 70 | |
| 71 | void *get_shadow_from_swap_cache(swp_entry_t entry) |
| 72 | { |
| 73 | struct address_space *address_space = swap_address_space(entry); |
| 74 | pgoff_t idx = swp_offset(entry); |
| 75 | struct page *page; |
| 76 | |
| 77 | page = xa_load(&address_space->i_pages, idx); |
| 78 | if (xa_is_value(page)) |
| 79 | return page; |
| 80 | return NULL; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * add_to_swap_cache resembles filemap_add_folio on swapper_space, |
| 85 | * but sets SwapCache flag and private instead of mapping and index. |
| 86 | */ |
| 87 | int add_to_swap_cache(struct folio *folio, swp_entry_t entry, |
| 88 | gfp_t gfp, void **shadowp) |
| 89 | { |
| 90 | struct address_space *address_space = swap_address_space(entry); |
| 91 | pgoff_t idx = swp_offset(entry); |
| 92 | XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); |
| 93 | unsigned long i, nr = folio_nr_pages(folio); |
| 94 | void *old; |
| 95 | |
| 96 | xas_set_update(&xas, workingset_update_node); |
| 97 | |
| 98 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 99 | VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); |
| 100 | VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); |
| 101 | |
| 102 | folio_ref_add(folio, nr); |
| 103 | folio_set_swapcache(folio); |
| 104 | folio->swap = entry; |
| 105 | |
| 106 | do { |
| 107 | xas_lock_irq(&xas); |
| 108 | xas_create_range(&xas); |
| 109 | if (xas_error(&xas)) |
| 110 | goto unlock; |
| 111 | for (i = 0; i < nr; i++) { |
| 112 | VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); |
| 113 | if (shadowp) { |
| 114 | old = xas_load(&xas); |
| 115 | if (xa_is_value(old)) |
| 116 | *shadowp = old; |
| 117 | } |
| 118 | xas_store(&xas, folio); |
| 119 | xas_next(&xas); |
| 120 | } |
| 121 | address_space->nrpages += nr; |
| 122 | __node_stat_mod_folio(folio, NR_FILE_PAGES, nr); |
| 123 | __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr); |
| 124 | unlock: |
| 125 | xas_unlock_irq(&xas); |
| 126 | } while (xas_nomem(&xas, gfp)); |
| 127 | |
| 128 | if (!xas_error(&xas)) |
| 129 | return 0; |
| 130 | |
| 131 | folio_clear_swapcache(folio); |
| 132 | folio_ref_sub(folio, nr); |
| 133 | return xas_error(&xas); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * This must be called only on folios that have |
| 138 | * been verified to be in the swap cache. |
| 139 | */ |
| 140 | void __delete_from_swap_cache(struct folio *folio, |
| 141 | swp_entry_t entry, void *shadow) |
| 142 | { |
| 143 | struct address_space *address_space = swap_address_space(entry); |
| 144 | int i; |
| 145 | long nr = folio_nr_pages(folio); |
| 146 | pgoff_t idx = swp_offset(entry); |
| 147 | XA_STATE(xas, &address_space->i_pages, idx); |
| 148 | |
| 149 | xas_set_update(&xas, workingset_update_node); |
| 150 | |
| 151 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 152 | VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); |
| 153 | VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); |
| 154 | |
| 155 | for (i = 0; i < nr; i++) { |
| 156 | void *entry = xas_store(&xas, shadow); |
| 157 | VM_BUG_ON_PAGE(entry != folio, entry); |
| 158 | xas_next(&xas); |
| 159 | } |
| 160 | folio->swap.val = 0; |
| 161 | folio_clear_swapcache(folio); |
| 162 | address_space->nrpages -= nr; |
| 163 | __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); |
| 164 | __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); |
| 165 | } |
| 166 | |
| 167 | /** |
| 168 | * add_to_swap - allocate swap space for a folio |
| 169 | * @folio: folio we want to move to swap |
| 170 | * |
| 171 | * Allocate swap space for the folio and add the folio to the |
| 172 | * swap cache. |
| 173 | * |
| 174 | * Context: Caller needs to hold the folio lock. |
| 175 | * Return: Whether the folio was added to the swap cache. |
| 176 | */ |
| 177 | bool add_to_swap(struct folio *folio) |
| 178 | { |
| 179 | swp_entry_t entry; |
| 180 | int err; |
| 181 | |
| 182 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 183 | VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); |
| 184 | |
| 185 | entry = folio_alloc_swap(folio); |
| 186 | if (!entry.val) |
| 187 | return false; |
| 188 | |
| 189 | /* |
| 190 | * XArray node allocations from PF_MEMALLOC contexts could |
| 191 | * completely exhaust the page allocator. __GFP_NOMEMALLOC |
| 192 | * stops emergency reserves from being allocated. |
| 193 | * |
| 194 | * TODO: this could cause a theoretical memory reclaim |
| 195 | * deadlock in the swap out path. |
| 196 | */ |
| 197 | /* |
| 198 | * Add it to the swap cache. |
| 199 | */ |
| 200 | err = add_to_swap_cache(folio, entry, |
| 201 | __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); |
| 202 | if (err) |
| 203 | /* |
| 204 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely |
| 205 | * clear SWAP_HAS_CACHE flag. |
| 206 | */ |
| 207 | goto fail; |
| 208 | /* |
| 209 | * Normally the folio will be dirtied in unmap because its |
| 210 | * pte should be dirty. A special case is MADV_FREE page. The |
| 211 | * page's pte could have dirty bit cleared but the folio's |
| 212 | * SwapBacked flag is still set because clearing the dirty bit |
| 213 | * and SwapBacked flag has no lock protected. For such folio, |
| 214 | * unmap will not set dirty bit for it, so folio reclaim will |
| 215 | * not write the folio out. This can cause data corruption when |
| 216 | * the folio is swapped in later. Always setting the dirty flag |
| 217 | * for the folio solves the problem. |
| 218 | */ |
| 219 | folio_mark_dirty(folio); |
| 220 | |
| 221 | return true; |
| 222 | |
| 223 | fail: |
| 224 | put_swap_folio(folio, entry); |
| 225 | return false; |
| 226 | } |
| 227 | |
| 228 | /* |
| 229 | * This must be called only on folios that have |
| 230 | * been verified to be in the swap cache and locked. |
| 231 | * It will never put the folio into the free list, |
| 232 | * the caller has a reference on the folio. |
| 233 | */ |
| 234 | void delete_from_swap_cache(struct folio *folio) |
| 235 | { |
| 236 | swp_entry_t entry = folio->swap; |
| 237 | struct address_space *address_space = swap_address_space(entry); |
| 238 | |
| 239 | xa_lock_irq(&address_space->i_pages); |
| 240 | __delete_from_swap_cache(folio, entry, NULL); |
| 241 | xa_unlock_irq(&address_space->i_pages); |
| 242 | |
| 243 | put_swap_folio(folio, entry); |
| 244 | folio_ref_sub(folio, folio_nr_pages(folio)); |
| 245 | } |
| 246 | |
| 247 | void clear_shadow_from_swap_cache(int type, unsigned long begin, |
| 248 | unsigned long end) |
| 249 | { |
| 250 | unsigned long curr = begin; |
| 251 | void *old; |
| 252 | |
| 253 | for (;;) { |
| 254 | swp_entry_t entry = swp_entry(type, curr); |
| 255 | struct address_space *address_space = swap_address_space(entry); |
| 256 | XA_STATE(xas, &address_space->i_pages, curr); |
| 257 | |
| 258 | xas_set_update(&xas, workingset_update_node); |
| 259 | |
| 260 | xa_lock_irq(&address_space->i_pages); |
| 261 | xas_for_each(&xas, old, end) { |
| 262 | if (!xa_is_value(old)) |
| 263 | continue; |
| 264 | xas_store(&xas, NULL); |
| 265 | } |
| 266 | xa_unlock_irq(&address_space->i_pages); |
| 267 | |
| 268 | /* search the next swapcache until we meet end */ |
| 269 | curr >>= SWAP_ADDRESS_SPACE_SHIFT; |
| 270 | curr++; |
| 271 | curr <<= SWAP_ADDRESS_SPACE_SHIFT; |
| 272 | if (curr > end) |
| 273 | break; |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | /* |
| 278 | * If we are the only user, then try to free up the swap cache. |
| 279 | * |
| 280 | * Its ok to check the swapcache flag without the folio lock |
| 281 | * here because we are going to recheck again inside |
| 282 | * folio_free_swap() _with_ the lock. |
| 283 | * - Marcelo |
| 284 | */ |
| 285 | void free_swap_cache(struct page *page) |
| 286 | { |
| 287 | struct folio *folio = page_folio(page); |
| 288 | |
| 289 | if (folio_test_swapcache(folio) && !folio_mapped(folio) && |
| 290 | folio_trylock(folio)) { |
| 291 | folio_free_swap(folio); |
| 292 | folio_unlock(folio); |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * Perform a free_page(), also freeing any swap cache associated with |
| 298 | * this page if it is the last user of the page. |
| 299 | */ |
| 300 | void free_page_and_swap_cache(struct page *page) |
| 301 | { |
| 302 | free_swap_cache(page); |
| 303 | if (!is_huge_zero_page(page)) |
| 304 | put_page(page); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Passed an array of pages, drop them all from swapcache and then release |
| 309 | * them. They are removed from the LRU and freed if this is their last use. |
| 310 | */ |
| 311 | void free_pages_and_swap_cache(struct encoded_page **pages, int nr) |
| 312 | { |
| 313 | lru_add_drain(); |
| 314 | for (int i = 0; i < nr; i++) |
| 315 | free_swap_cache(encoded_page_ptr(pages[i])); |
| 316 | release_pages(pages, nr); |
| 317 | } |
| 318 | |
| 319 | static inline bool swap_use_vma_readahead(void) |
| 320 | { |
| 321 | return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); |
| 322 | } |
| 323 | |
| 324 | /* |
| 325 | * Lookup a swap entry in the swap cache. A found folio will be returned |
| 326 | * unlocked and with its refcount incremented - we rely on the kernel |
| 327 | * lock getting page table operations atomic even if we drop the folio |
| 328 | * lock before returning. |
| 329 | * |
| 330 | * Caller must lock the swap device or hold a reference to keep it valid. |
| 331 | */ |
| 332 | struct folio *swap_cache_get_folio(swp_entry_t entry, |
| 333 | struct vm_area_struct *vma, unsigned long addr) |
| 334 | { |
| 335 | struct folio *folio; |
| 336 | |
| 337 | folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); |
| 338 | if (!IS_ERR(folio)) { |
| 339 | bool vma_ra = swap_use_vma_readahead(); |
| 340 | bool readahead; |
| 341 | |
| 342 | /* |
| 343 | * At the moment, we don't support PG_readahead for anon THP |
| 344 | * so let's bail out rather than confusing the readahead stat. |
| 345 | */ |
| 346 | if (unlikely(folio_test_large(folio))) |
| 347 | return folio; |
| 348 | |
| 349 | readahead = folio_test_clear_readahead(folio); |
| 350 | if (vma && vma_ra) { |
| 351 | unsigned long ra_val; |
| 352 | int win, hits; |
| 353 | |
| 354 | ra_val = GET_SWAP_RA_VAL(vma); |
| 355 | win = SWAP_RA_WIN(ra_val); |
| 356 | hits = SWAP_RA_HITS(ra_val); |
| 357 | if (readahead) |
| 358 | hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); |
| 359 | atomic_long_set(&vma->swap_readahead_info, |
| 360 | SWAP_RA_VAL(addr, win, hits)); |
| 361 | } |
| 362 | |
| 363 | if (readahead) { |
| 364 | count_vm_event(SWAP_RA_HIT); |
| 365 | if (!vma || !vma_ra) |
| 366 | atomic_inc(&swapin_readahead_hits); |
| 367 | } |
| 368 | } else { |
| 369 | folio = NULL; |
| 370 | } |
| 371 | |
| 372 | return folio; |
| 373 | } |
| 374 | |
| 375 | /** |
| 376 | * filemap_get_incore_folio - Find and get a folio from the page or swap caches. |
| 377 | * @mapping: The address_space to search. |
| 378 | * @index: The page cache index. |
| 379 | * |
| 380 | * This differs from filemap_get_folio() in that it will also look for the |
| 381 | * folio in the swap cache. |
| 382 | * |
| 383 | * Return: The found folio or %NULL. |
| 384 | */ |
| 385 | struct folio *filemap_get_incore_folio(struct address_space *mapping, |
| 386 | pgoff_t index) |
| 387 | { |
| 388 | swp_entry_t swp; |
| 389 | struct swap_info_struct *si; |
| 390 | struct folio *folio = filemap_get_entry(mapping, index); |
| 391 | |
| 392 | if (!folio) |
| 393 | return ERR_PTR(-ENOENT); |
| 394 | if (!xa_is_value(folio)) |
| 395 | return folio; |
| 396 | if (!shmem_mapping(mapping)) |
| 397 | return ERR_PTR(-ENOENT); |
| 398 | |
| 399 | swp = radix_to_swp_entry(folio); |
| 400 | /* There might be swapin error entries in shmem mapping. */ |
| 401 | if (non_swap_entry(swp)) |
| 402 | return ERR_PTR(-ENOENT); |
| 403 | /* Prevent swapoff from happening to us */ |
| 404 | si = get_swap_device(swp); |
| 405 | if (!si) |
| 406 | return ERR_PTR(-ENOENT); |
| 407 | index = swp_offset(swp); |
| 408 | folio = filemap_get_folio(swap_address_space(swp), index); |
| 409 | put_swap_device(si); |
| 410 | return folio; |
| 411 | } |
| 412 | |
| 413 | struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 414 | struct mempolicy *mpol, pgoff_t ilx, |
| 415 | bool *new_page_allocated, |
| 416 | bool skip_if_exists) |
| 417 | { |
| 418 | struct swap_info_struct *si; |
| 419 | struct folio *folio; |
| 420 | struct page *page; |
| 421 | void *shadow = NULL; |
| 422 | |
| 423 | *new_page_allocated = false; |
| 424 | si = get_swap_device(entry); |
| 425 | if (!si) |
| 426 | return NULL; |
| 427 | |
| 428 | for (;;) { |
| 429 | int err; |
| 430 | /* |
| 431 | * First check the swap cache. Since this is normally |
| 432 | * called after swap_cache_get_folio() failed, re-calling |
| 433 | * that would confuse statistics. |
| 434 | */ |
| 435 | folio = filemap_get_folio(swap_address_space(entry), |
| 436 | swp_offset(entry)); |
| 437 | if (!IS_ERR(folio)) { |
| 438 | page = folio_file_page(folio, swp_offset(entry)); |
| 439 | goto got_page; |
| 440 | } |
| 441 | |
| 442 | /* |
| 443 | * Just skip read ahead for unused swap slot. |
| 444 | * During swap_off when swap_slot_cache is disabled, |
| 445 | * we have to handle the race between putting |
| 446 | * swap entry in swap cache and marking swap slot |
| 447 | * as SWAP_HAS_CACHE. That's done in later part of code or |
| 448 | * else swap_off will be aborted if we return NULL. |
| 449 | */ |
| 450 | if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) |
| 451 | goto fail_put_swap; |
| 452 | |
| 453 | /* |
| 454 | * Get a new page to read into from swap. Allocate it now, |
| 455 | * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will |
| 456 | * cause any racers to loop around until we add it to cache. |
| 457 | */ |
| 458 | folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0, |
| 459 | mpol, ilx, numa_node_id()); |
| 460 | if (!folio) |
| 461 | goto fail_put_swap; |
| 462 | |
| 463 | /* |
| 464 | * Swap entry may have been freed since our caller observed it. |
| 465 | */ |
| 466 | err = swapcache_prepare(entry); |
| 467 | if (!err) |
| 468 | break; |
| 469 | |
| 470 | folio_put(folio); |
| 471 | if (err != -EEXIST) |
| 472 | goto fail_put_swap; |
| 473 | |
| 474 | /* |
| 475 | * Protect against a recursive call to __read_swap_cache_async() |
| 476 | * on the same entry waiting forever here because SWAP_HAS_CACHE |
| 477 | * is set but the folio is not the swap cache yet. This can |
| 478 | * happen today if mem_cgroup_swapin_charge_folio() below |
| 479 | * triggers reclaim through zswap, which may call |
| 480 | * __read_swap_cache_async() in the writeback path. |
| 481 | */ |
| 482 | if (skip_if_exists) |
| 483 | goto fail_put_swap; |
| 484 | |
| 485 | /* |
| 486 | * We might race against __delete_from_swap_cache(), and |
| 487 | * stumble across a swap_map entry whose SWAP_HAS_CACHE |
| 488 | * has not yet been cleared. Or race against another |
| 489 | * __read_swap_cache_async(), which has set SWAP_HAS_CACHE |
| 490 | * in swap_map, but not yet added its page to swap cache. |
| 491 | */ |
| 492 | schedule_timeout_uninterruptible(1); |
| 493 | } |
| 494 | |
| 495 | /* |
| 496 | * The swap entry is ours to swap in. Prepare the new page. |
| 497 | */ |
| 498 | |
| 499 | __folio_set_locked(folio); |
| 500 | __folio_set_swapbacked(folio); |
| 501 | |
| 502 | if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry)) |
| 503 | goto fail_unlock; |
| 504 | |
| 505 | /* May fail (-ENOMEM) if XArray node allocation failed. */ |
| 506 | if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) |
| 507 | goto fail_unlock; |
| 508 | |
| 509 | mem_cgroup_swapin_uncharge_swap(entry); |
| 510 | |
| 511 | if (shadow) |
| 512 | workingset_refault(folio, shadow); |
| 513 | |
| 514 | /* Caller will initiate read into locked folio */ |
| 515 | folio_add_lru(folio); |
| 516 | *new_page_allocated = true; |
| 517 | page = &folio->page; |
| 518 | got_page: |
| 519 | put_swap_device(si); |
| 520 | return page; |
| 521 | |
| 522 | fail_unlock: |
| 523 | put_swap_folio(folio, entry); |
| 524 | folio_unlock(folio); |
| 525 | folio_put(folio); |
| 526 | fail_put_swap: |
| 527 | put_swap_device(si); |
| 528 | return NULL; |
| 529 | } |
| 530 | |
| 531 | /* |
| 532 | * Locate a page of swap in physical memory, reserving swap cache space |
| 533 | * and reading the disk if it is not already cached. |
| 534 | * A failure return means that either the page allocation failed or that |
| 535 | * the swap entry is no longer in use. |
| 536 | * |
| 537 | * get/put_swap_device() aren't needed to call this function, because |
| 538 | * __read_swap_cache_async() call them and swap_readpage() holds the |
| 539 | * swap cache folio lock. |
| 540 | */ |
| 541 | struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, |
| 542 | struct vm_area_struct *vma, |
| 543 | unsigned long addr, struct swap_iocb **plug) |
| 544 | { |
| 545 | bool page_allocated; |
| 546 | struct mempolicy *mpol; |
| 547 | pgoff_t ilx; |
| 548 | struct page *page; |
| 549 | |
| 550 | mpol = get_vma_policy(vma, addr, 0, &ilx); |
| 551 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
| 552 | &page_allocated, false); |
| 553 | mpol_cond_put(mpol); |
| 554 | |
| 555 | if (page_allocated) |
| 556 | swap_readpage(page, false, plug); |
| 557 | return page; |
| 558 | } |
| 559 | |
| 560 | static unsigned int __swapin_nr_pages(unsigned long prev_offset, |
| 561 | unsigned long offset, |
| 562 | int hits, |
| 563 | int max_pages, |
| 564 | int prev_win) |
| 565 | { |
| 566 | unsigned int pages, last_ra; |
| 567 | |
| 568 | /* |
| 569 | * This heuristic has been found to work well on both sequential and |
| 570 | * random loads, swapping to hard disk or to SSD: please don't ask |
| 571 | * what the "+ 2" means, it just happens to work well, that's all. |
| 572 | */ |
| 573 | pages = hits + 2; |
| 574 | if (pages == 2) { |
| 575 | /* |
| 576 | * We can have no readahead hits to judge by: but must not get |
| 577 | * stuck here forever, so check for an adjacent offset instead |
| 578 | * (and don't even bother to check whether swap type is same). |
| 579 | */ |
| 580 | if (offset != prev_offset + 1 && offset != prev_offset - 1) |
| 581 | pages = 1; |
| 582 | } else { |
| 583 | unsigned int roundup = 4; |
| 584 | while (roundup < pages) |
| 585 | roundup <<= 1; |
| 586 | pages = roundup; |
| 587 | } |
| 588 | |
| 589 | if (pages > max_pages) |
| 590 | pages = max_pages; |
| 591 | |
| 592 | /* Don't shrink readahead too fast */ |
| 593 | last_ra = prev_win / 2; |
| 594 | if (pages < last_ra) |
| 595 | pages = last_ra; |
| 596 | |
| 597 | return pages; |
| 598 | } |
| 599 | |
| 600 | static unsigned long swapin_nr_pages(unsigned long offset) |
| 601 | { |
| 602 | static unsigned long prev_offset; |
| 603 | unsigned int hits, pages, max_pages; |
| 604 | static atomic_t last_readahead_pages; |
| 605 | |
| 606 | max_pages = 1 << READ_ONCE(page_cluster); |
| 607 | if (max_pages <= 1) |
| 608 | return 1; |
| 609 | |
| 610 | hits = atomic_xchg(&swapin_readahead_hits, 0); |
| 611 | pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, |
| 612 | max_pages, |
| 613 | atomic_read(&last_readahead_pages)); |
| 614 | if (!hits) |
| 615 | WRITE_ONCE(prev_offset, offset); |
| 616 | atomic_set(&last_readahead_pages, pages); |
| 617 | |
| 618 | return pages; |
| 619 | } |
| 620 | |
| 621 | /** |
| 622 | * swap_cluster_readahead - swap in pages in hope we need them soon |
| 623 | * @entry: swap entry of this memory |
| 624 | * @gfp_mask: memory allocation flags |
| 625 | * @mpol: NUMA memory allocation policy to be applied |
| 626 | * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE |
| 627 | * |
| 628 | * Returns the struct page for entry and addr, after queueing swapin. |
| 629 | * |
| 630 | * Primitive swap readahead code. We simply read an aligned block of |
| 631 | * (1 << page_cluster) entries in the swap area. This method is chosen |
| 632 | * because it doesn't cost us any seek time. We also make sure to queue |
| 633 | * the 'original' request together with the readahead ones... |
| 634 | * |
| 635 | * Note: it is intentional that the same NUMA policy and interleave index |
| 636 | * are used for every page of the readahead: neighbouring pages on swap |
| 637 | * are fairly likely to have been swapped out from the same node. |
| 638 | */ |
| 639 | struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, |
| 640 | struct mempolicy *mpol, pgoff_t ilx) |
| 641 | { |
| 642 | struct page *page; |
| 643 | unsigned long entry_offset = swp_offset(entry); |
| 644 | unsigned long offset = entry_offset; |
| 645 | unsigned long start_offset, end_offset; |
| 646 | unsigned long mask; |
| 647 | struct swap_info_struct *si = swp_swap_info(entry); |
| 648 | struct blk_plug plug; |
| 649 | struct swap_iocb *splug = NULL; |
| 650 | bool page_allocated; |
| 651 | |
| 652 | mask = swapin_nr_pages(offset) - 1; |
| 653 | if (!mask) |
| 654 | goto skip; |
| 655 | |
| 656 | /* Read a page_cluster sized and aligned cluster around offset. */ |
| 657 | start_offset = offset & ~mask; |
| 658 | end_offset = offset | mask; |
| 659 | if (!start_offset) /* First page is swap header. */ |
| 660 | start_offset++; |
| 661 | if (end_offset >= si->max) |
| 662 | end_offset = si->max - 1; |
| 663 | |
| 664 | blk_start_plug(&plug); |
| 665 | for (offset = start_offset; offset <= end_offset ; offset++) { |
| 666 | /* Ok, do the async read-ahead now */ |
| 667 | page = __read_swap_cache_async( |
| 668 | swp_entry(swp_type(entry), offset), |
| 669 | gfp_mask, mpol, ilx, &page_allocated, false); |
| 670 | if (!page) |
| 671 | continue; |
| 672 | if (page_allocated) { |
| 673 | swap_readpage(page, false, &splug); |
| 674 | if (offset != entry_offset) { |
| 675 | SetPageReadahead(page); |
| 676 | count_vm_event(SWAP_RA); |
| 677 | } |
| 678 | } |
| 679 | put_page(page); |
| 680 | } |
| 681 | blk_finish_plug(&plug); |
| 682 | swap_read_unplug(splug); |
| 683 | lru_add_drain(); /* Push any new pages onto the LRU now */ |
| 684 | skip: |
| 685 | /* The page was likely read above, so no need for plugging here */ |
| 686 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
| 687 | &page_allocated, false); |
| 688 | if (unlikely(page_allocated)) |
| 689 | swap_readpage(page, false, NULL); |
| 690 | zswap_page_swapin(page); |
| 691 | return page; |
| 692 | } |
| 693 | |
| 694 | int init_swap_address_space(unsigned int type, unsigned long nr_pages) |
| 695 | { |
| 696 | struct address_space *spaces, *space; |
| 697 | unsigned int i, nr; |
| 698 | |
| 699 | nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); |
| 700 | spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); |
| 701 | if (!spaces) |
| 702 | return -ENOMEM; |
| 703 | for (i = 0; i < nr; i++) { |
| 704 | space = spaces + i; |
| 705 | xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); |
| 706 | atomic_set(&space->i_mmap_writable, 0); |
| 707 | space->a_ops = &swap_aops; |
| 708 | /* swap cache doesn't use writeback related tags */ |
| 709 | mapping_set_no_writeback_tags(space); |
| 710 | } |
| 711 | nr_swapper_spaces[type] = nr; |
| 712 | swapper_spaces[type] = spaces; |
| 713 | |
| 714 | return 0; |
| 715 | } |
| 716 | |
| 717 | void exit_swap_address_space(unsigned int type) |
| 718 | { |
| 719 | int i; |
| 720 | struct address_space *spaces = swapper_spaces[type]; |
| 721 | |
| 722 | for (i = 0; i < nr_swapper_spaces[type]; i++) |
| 723 | VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); |
| 724 | kvfree(spaces); |
| 725 | nr_swapper_spaces[type] = 0; |
| 726 | swapper_spaces[type] = NULL; |
| 727 | } |
| 728 | |
| 729 | #define SWAP_RA_ORDER_CEILING 5 |
| 730 | |
| 731 | struct vma_swap_readahead { |
| 732 | unsigned short win; |
| 733 | unsigned short offset; |
| 734 | unsigned short nr_pte; |
| 735 | }; |
| 736 | |
| 737 | static void swap_ra_info(struct vm_fault *vmf, |
| 738 | struct vma_swap_readahead *ra_info) |
| 739 | { |
| 740 | struct vm_area_struct *vma = vmf->vma; |
| 741 | unsigned long ra_val; |
| 742 | unsigned long faddr, pfn, fpfn, lpfn, rpfn; |
| 743 | unsigned long start, end; |
| 744 | unsigned int max_win, hits, prev_win, win; |
| 745 | |
| 746 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), |
| 747 | SWAP_RA_ORDER_CEILING); |
| 748 | if (max_win == 1) { |
| 749 | ra_info->win = 1; |
| 750 | return; |
| 751 | } |
| 752 | |
| 753 | faddr = vmf->address; |
| 754 | fpfn = PFN_DOWN(faddr); |
| 755 | ra_val = GET_SWAP_RA_VAL(vma); |
| 756 | pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); |
| 757 | prev_win = SWAP_RA_WIN(ra_val); |
| 758 | hits = SWAP_RA_HITS(ra_val); |
| 759 | ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, |
| 760 | max_win, prev_win); |
| 761 | atomic_long_set(&vma->swap_readahead_info, |
| 762 | SWAP_RA_VAL(faddr, win, 0)); |
| 763 | if (win == 1) |
| 764 | return; |
| 765 | |
| 766 | if (fpfn == pfn + 1) { |
| 767 | lpfn = fpfn; |
| 768 | rpfn = fpfn + win; |
| 769 | } else if (pfn == fpfn + 1) { |
| 770 | lpfn = fpfn - win + 1; |
| 771 | rpfn = fpfn + 1; |
| 772 | } else { |
| 773 | unsigned int left = (win - 1) / 2; |
| 774 | |
| 775 | lpfn = fpfn - left; |
| 776 | rpfn = fpfn + win - left; |
| 777 | } |
| 778 | start = max3(lpfn, PFN_DOWN(vma->vm_start), |
| 779 | PFN_DOWN(faddr & PMD_MASK)); |
| 780 | end = min3(rpfn, PFN_DOWN(vma->vm_end), |
| 781 | PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); |
| 782 | |
| 783 | ra_info->nr_pte = end - start; |
| 784 | ra_info->offset = fpfn - start; |
| 785 | } |
| 786 | |
| 787 | /** |
| 788 | * swap_vma_readahead - swap in pages in hope we need them soon |
| 789 | * @targ_entry: swap entry of the targeted memory |
| 790 | * @gfp_mask: memory allocation flags |
| 791 | * @mpol: NUMA memory allocation policy to be applied |
| 792 | * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE |
| 793 | * @vmf: fault information |
| 794 | * |
| 795 | * Returns the struct page for entry and addr, after queueing swapin. |
| 796 | * |
| 797 | * Primitive swap readahead code. We simply read in a few pages whose |
| 798 | * virtual addresses are around the fault address in the same vma. |
| 799 | * |
| 800 | * Caller must hold read mmap_lock if vmf->vma is not NULL. |
| 801 | * |
| 802 | */ |
| 803 | static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, |
| 804 | struct mempolicy *mpol, pgoff_t targ_ilx, |
| 805 | struct vm_fault *vmf) |
| 806 | { |
| 807 | struct blk_plug plug; |
| 808 | struct swap_iocb *splug = NULL; |
| 809 | struct page *page; |
| 810 | pte_t *pte = NULL, pentry; |
| 811 | unsigned long addr; |
| 812 | swp_entry_t entry; |
| 813 | pgoff_t ilx; |
| 814 | unsigned int i; |
| 815 | bool page_allocated; |
| 816 | struct vma_swap_readahead ra_info = { |
| 817 | .win = 1, |
| 818 | }; |
| 819 | |
| 820 | swap_ra_info(vmf, &ra_info); |
| 821 | if (ra_info.win == 1) |
| 822 | goto skip; |
| 823 | |
| 824 | addr = vmf->address - (ra_info.offset * PAGE_SIZE); |
| 825 | ilx = targ_ilx - ra_info.offset; |
| 826 | |
| 827 | blk_start_plug(&plug); |
| 828 | for (i = 0; i < ra_info.nr_pte; i++, ilx++, addr += PAGE_SIZE) { |
| 829 | if (!pte++) { |
| 830 | pte = pte_offset_map(vmf->pmd, addr); |
| 831 | if (!pte) |
| 832 | break; |
| 833 | } |
| 834 | pentry = ptep_get_lockless(pte); |
| 835 | if (!is_swap_pte(pentry)) |
| 836 | continue; |
| 837 | entry = pte_to_swp_entry(pentry); |
| 838 | if (unlikely(non_swap_entry(entry))) |
| 839 | continue; |
| 840 | pte_unmap(pte); |
| 841 | pte = NULL; |
| 842 | page = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, |
| 843 | &page_allocated, false); |
| 844 | if (!page) |
| 845 | continue; |
| 846 | if (page_allocated) { |
| 847 | swap_readpage(page, false, &splug); |
| 848 | if (i != ra_info.offset) { |
| 849 | SetPageReadahead(page); |
| 850 | count_vm_event(SWAP_RA); |
| 851 | } |
| 852 | } |
| 853 | put_page(page); |
| 854 | } |
| 855 | if (pte) |
| 856 | pte_unmap(pte); |
| 857 | blk_finish_plug(&plug); |
| 858 | swap_read_unplug(splug); |
| 859 | lru_add_drain(); |
| 860 | skip: |
| 861 | /* The page was likely read above, so no need for plugging here */ |
| 862 | page = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx, |
| 863 | &page_allocated, false); |
| 864 | if (unlikely(page_allocated)) |
| 865 | swap_readpage(page, false, NULL); |
| 866 | zswap_page_swapin(page); |
| 867 | return page; |
| 868 | } |
| 869 | |
| 870 | /** |
| 871 | * swapin_readahead - swap in pages in hope we need them soon |
| 872 | * @entry: swap entry of this memory |
| 873 | * @gfp_mask: memory allocation flags |
| 874 | * @vmf: fault information |
| 875 | * |
| 876 | * Returns the struct page for entry and addr, after queueing swapin. |
| 877 | * |
| 878 | * It's a main entry function for swap readahead. By the configuration, |
| 879 | * it will read ahead blocks by cluster-based(ie, physical disk based) |
| 880 | * or vma-based(ie, virtual address based on faulty address) readahead. |
| 881 | */ |
| 882 | struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, |
| 883 | struct vm_fault *vmf) |
| 884 | { |
| 885 | struct mempolicy *mpol; |
| 886 | pgoff_t ilx; |
| 887 | struct page *page; |
| 888 | |
| 889 | mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); |
| 890 | page = swap_use_vma_readahead() ? |
| 891 | swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : |
| 892 | swap_cluster_readahead(entry, gfp_mask, mpol, ilx); |
| 893 | mpol_cond_put(mpol); |
| 894 | return page; |
| 895 | } |
| 896 | |
| 897 | #ifdef CONFIG_SYSFS |
| 898 | static ssize_t vma_ra_enabled_show(struct kobject *kobj, |
| 899 | struct kobj_attribute *attr, char *buf) |
| 900 | { |
| 901 | return sysfs_emit(buf, "%s\n", |
| 902 | enable_vma_readahead ? "true" : "false"); |
| 903 | } |
| 904 | static ssize_t vma_ra_enabled_store(struct kobject *kobj, |
| 905 | struct kobj_attribute *attr, |
| 906 | const char *buf, size_t count) |
| 907 | { |
| 908 | ssize_t ret; |
| 909 | |
| 910 | ret = kstrtobool(buf, &enable_vma_readahead); |
| 911 | if (ret) |
| 912 | return ret; |
| 913 | |
| 914 | return count; |
| 915 | } |
| 916 | static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); |
| 917 | |
| 918 | static struct attribute *swap_attrs[] = { |
| 919 | &vma_ra_enabled_attr.attr, |
| 920 | NULL, |
| 921 | }; |
| 922 | |
| 923 | static const struct attribute_group swap_attr_group = { |
| 924 | .attrs = swap_attrs, |
| 925 | }; |
| 926 | |
| 927 | static int __init swap_init_sysfs(void) |
| 928 | { |
| 929 | int err; |
| 930 | struct kobject *swap_kobj; |
| 931 | |
| 932 | swap_kobj = kobject_create_and_add("swap", mm_kobj); |
| 933 | if (!swap_kobj) { |
| 934 | pr_err("failed to create swap kobject\n"); |
| 935 | return -ENOMEM; |
| 936 | } |
| 937 | err = sysfs_create_group(swap_kobj, &swap_attr_group); |
| 938 | if (err) { |
| 939 | pr_err("failed to register swap group\n"); |
| 940 | goto delete_obj; |
| 941 | } |
| 942 | return 0; |
| 943 | |
| 944 | delete_obj: |
| 945 | kobject_put(swap_kobj); |
| 946 | return err; |
| 947 | } |
| 948 | subsys_initcall(swap_init_sysfs); |
| 949 | #endif |