| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/mm/page_io.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 6 | * |
| 7 | * Swap reorganised 29.12.95, |
| 8 | * Asynchronous swapping added 30.12.95. Stephen Tweedie |
| 9 | * Removed race in async swapping. 14.4.1996. Bruno Haible |
| 10 | * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie |
| 11 | * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman |
| 12 | */ |
| 13 | |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/kernel_stat.h> |
| 16 | #include <linux/gfp.h> |
| 17 | #include <linux/pagemap.h> |
| 18 | #include <linux/swap.h> |
| 19 | #include <linux/bio.h> |
| 20 | #include <linux/swapops.h> |
| 21 | #include <linux/writeback.h> |
| 22 | #include <linux/blkdev.h> |
| 23 | #include <linux/psi.h> |
| 24 | #include <linux/uio.h> |
| 25 | #include <linux/sched/task.h> |
| 26 | #include <linux/delayacct.h> |
| 27 | #include <linux/zswap.h> |
| 28 | #include "swap.h" |
| 29 | |
| 30 | static void __end_swap_bio_write(struct bio *bio) |
| 31 | { |
| 32 | struct folio *folio = bio_first_folio_all(bio); |
| 33 | |
| 34 | if (bio->bi_status) { |
| 35 | /* |
| 36 | * We failed to write the page out to swap-space. |
| 37 | * Re-dirty the page in order to avoid it being reclaimed. |
| 38 | * Also print a dire warning that things will go BAD (tm) |
| 39 | * very quickly. |
| 40 | * |
| 41 | * Also clear PG_reclaim to avoid folio_rotate_reclaimable() |
| 42 | */ |
| 43 | folio_mark_dirty(folio); |
| 44 | pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n", |
| 45 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
| 46 | (unsigned long long)bio->bi_iter.bi_sector); |
| 47 | folio_clear_reclaim(folio); |
| 48 | } |
| 49 | folio_end_writeback(folio); |
| 50 | } |
| 51 | |
| 52 | static void end_swap_bio_write(struct bio *bio) |
| 53 | { |
| 54 | __end_swap_bio_write(bio); |
| 55 | bio_put(bio); |
| 56 | } |
| 57 | |
| 58 | static void __end_swap_bio_read(struct bio *bio) |
| 59 | { |
| 60 | struct folio *folio = bio_first_folio_all(bio); |
| 61 | |
| 62 | if (bio->bi_status) { |
| 63 | pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n", |
| 64 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
| 65 | (unsigned long long)bio->bi_iter.bi_sector); |
| 66 | } else { |
| 67 | folio_mark_uptodate(folio); |
| 68 | } |
| 69 | folio_unlock(folio); |
| 70 | } |
| 71 | |
| 72 | static void end_swap_bio_read(struct bio *bio) |
| 73 | { |
| 74 | __end_swap_bio_read(bio); |
| 75 | bio_put(bio); |
| 76 | } |
| 77 | |
| 78 | int generic_swapfile_activate(struct swap_info_struct *sis, |
| 79 | struct file *swap_file, |
| 80 | sector_t *span) |
| 81 | { |
| 82 | struct address_space *mapping = swap_file->f_mapping; |
| 83 | struct inode *inode = mapping->host; |
| 84 | unsigned blocks_per_page; |
| 85 | unsigned long page_no; |
| 86 | unsigned blkbits; |
| 87 | sector_t probe_block; |
| 88 | sector_t last_block; |
| 89 | sector_t lowest_block = -1; |
| 90 | sector_t highest_block = 0; |
| 91 | int nr_extents = 0; |
| 92 | int ret; |
| 93 | |
| 94 | blkbits = inode->i_blkbits; |
| 95 | blocks_per_page = PAGE_SIZE >> blkbits; |
| 96 | |
| 97 | /* |
| 98 | * Map all the blocks into the extent tree. This code doesn't try |
| 99 | * to be very smart. |
| 100 | */ |
| 101 | probe_block = 0; |
| 102 | page_no = 0; |
| 103 | last_block = i_size_read(inode) >> blkbits; |
| 104 | while ((probe_block + blocks_per_page) <= last_block && |
| 105 | page_no < sis->max) { |
| 106 | unsigned block_in_page; |
| 107 | sector_t first_block; |
| 108 | |
| 109 | cond_resched(); |
| 110 | |
| 111 | first_block = probe_block; |
| 112 | ret = bmap(inode, &first_block); |
| 113 | if (ret || !first_block) |
| 114 | goto bad_bmap; |
| 115 | |
| 116 | /* |
| 117 | * It must be PAGE_SIZE aligned on-disk |
| 118 | */ |
| 119 | if (first_block & (blocks_per_page - 1)) { |
| 120 | probe_block++; |
| 121 | goto reprobe; |
| 122 | } |
| 123 | |
| 124 | for (block_in_page = 1; block_in_page < blocks_per_page; |
| 125 | block_in_page++) { |
| 126 | sector_t block; |
| 127 | |
| 128 | block = probe_block + block_in_page; |
| 129 | ret = bmap(inode, &block); |
| 130 | if (ret || !block) |
| 131 | goto bad_bmap; |
| 132 | |
| 133 | if (block != first_block + block_in_page) { |
| 134 | /* Discontiguity */ |
| 135 | probe_block++; |
| 136 | goto reprobe; |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | first_block >>= (PAGE_SHIFT - blkbits); |
| 141 | if (page_no) { /* exclude the header page */ |
| 142 | if (first_block < lowest_block) |
| 143 | lowest_block = first_block; |
| 144 | if (first_block > highest_block) |
| 145 | highest_block = first_block; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks |
| 150 | */ |
| 151 | ret = add_swap_extent(sis, page_no, 1, first_block); |
| 152 | if (ret < 0) |
| 153 | goto out; |
| 154 | nr_extents += ret; |
| 155 | page_no++; |
| 156 | probe_block += blocks_per_page; |
| 157 | reprobe: |
| 158 | continue; |
| 159 | } |
| 160 | ret = nr_extents; |
| 161 | *span = 1 + highest_block - lowest_block; |
| 162 | if (page_no == 0) |
| 163 | page_no = 1; /* force Empty message */ |
| 164 | sis->max = page_no; |
| 165 | sis->pages = page_no - 1; |
| 166 | out: |
| 167 | return ret; |
| 168 | bad_bmap: |
| 169 | pr_err("swapon: swapfile has holes\n"); |
| 170 | ret = -EINVAL; |
| 171 | goto out; |
| 172 | } |
| 173 | |
| 174 | static bool is_folio_zero_filled(struct folio *folio) |
| 175 | { |
| 176 | unsigned int pos, last_pos; |
| 177 | unsigned long *data; |
| 178 | unsigned int i; |
| 179 | |
| 180 | last_pos = PAGE_SIZE / sizeof(*data) - 1; |
| 181 | for (i = 0; i < folio_nr_pages(folio); i++) { |
| 182 | data = kmap_local_folio(folio, i * PAGE_SIZE); |
| 183 | /* |
| 184 | * Check last word first, incase the page is zero-filled at |
| 185 | * the start and has non-zero data at the end, which is common |
| 186 | * in real-world workloads. |
| 187 | */ |
| 188 | if (data[last_pos]) { |
| 189 | kunmap_local(data); |
| 190 | return false; |
| 191 | } |
| 192 | for (pos = 0; pos < last_pos; pos++) { |
| 193 | if (data[pos]) { |
| 194 | kunmap_local(data); |
| 195 | return false; |
| 196 | } |
| 197 | } |
| 198 | kunmap_local(data); |
| 199 | } |
| 200 | |
| 201 | return true; |
| 202 | } |
| 203 | |
| 204 | static void swap_zeromap_folio_set(struct folio *folio) |
| 205 | { |
| 206 | struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); |
| 207 | struct swap_info_struct *sis = swp_swap_info(folio->swap); |
| 208 | int nr_pages = folio_nr_pages(folio); |
| 209 | swp_entry_t entry; |
| 210 | unsigned int i; |
| 211 | |
| 212 | for (i = 0; i < folio_nr_pages(folio); i++) { |
| 213 | entry = page_swap_entry(folio_page(folio, i)); |
| 214 | set_bit(swp_offset(entry), sis->zeromap); |
| 215 | } |
| 216 | |
| 217 | count_vm_events(SWPOUT_ZERO, nr_pages); |
| 218 | if (objcg) { |
| 219 | count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); |
| 220 | obj_cgroup_put(objcg); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | static void swap_zeromap_folio_clear(struct folio *folio) |
| 225 | { |
| 226 | struct swap_info_struct *sis = swp_swap_info(folio->swap); |
| 227 | swp_entry_t entry; |
| 228 | unsigned int i; |
| 229 | |
| 230 | for (i = 0; i < folio_nr_pages(folio); i++) { |
| 231 | entry = page_swap_entry(folio_page(folio, i)); |
| 232 | clear_bit(swp_offset(entry), sis->zeromap); |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | /* |
| 237 | * We may have stale swap cache pages in memory: notice |
| 238 | * them here and get rid of the unnecessary final write. |
| 239 | */ |
| 240 | int swap_writeout(struct folio *folio, struct writeback_control *wbc) |
| 241 | { |
| 242 | int ret; |
| 243 | |
| 244 | if (folio_free_swap(folio)) { |
| 245 | folio_unlock(folio); |
| 246 | return 0; |
| 247 | } |
| 248 | /* |
| 249 | * Arch code may have to preserve more data than just the page |
| 250 | * contents, e.g. memory tags. |
| 251 | */ |
| 252 | ret = arch_prepare_to_swap(folio); |
| 253 | if (ret) { |
| 254 | folio_mark_dirty(folio); |
| 255 | folio_unlock(folio); |
| 256 | return ret; |
| 257 | } |
| 258 | |
| 259 | /* |
| 260 | * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages. |
| 261 | * The bits in zeromap are protected by the locked swapcache folio |
| 262 | * and atomic updates are used to protect against read-modify-write |
| 263 | * corruption due to other zero swap entries seeing concurrent updates. |
| 264 | */ |
| 265 | if (is_folio_zero_filled(folio)) { |
| 266 | swap_zeromap_folio_set(folio); |
| 267 | folio_unlock(folio); |
| 268 | return 0; |
| 269 | } else { |
| 270 | /* |
| 271 | * Clear bits this folio occupies in the zeromap to prevent |
| 272 | * zero data being read in from any previous zero writes that |
| 273 | * occupied the same swap entries. |
| 274 | */ |
| 275 | swap_zeromap_folio_clear(folio); |
| 276 | } |
| 277 | if (zswap_store(folio)) { |
| 278 | count_mthp_stat(folio_order(folio), MTHP_STAT_ZSWPOUT); |
| 279 | folio_unlock(folio); |
| 280 | return 0; |
| 281 | } |
| 282 | if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) { |
| 283 | folio_mark_dirty(folio); |
| 284 | return AOP_WRITEPAGE_ACTIVATE; |
| 285 | } |
| 286 | |
| 287 | __swap_writepage(folio, wbc); |
| 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | static inline void count_swpout_vm_event(struct folio *folio) |
| 292 | { |
| 293 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 294 | if (unlikely(folio_test_pmd_mappable(folio))) { |
| 295 | count_memcg_folio_events(folio, THP_SWPOUT, 1); |
| 296 | count_vm_event(THP_SWPOUT); |
| 297 | } |
| 298 | #endif |
| 299 | count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); |
| 300 | count_memcg_folio_events(folio, PSWPOUT, folio_nr_pages(folio)); |
| 301 | count_vm_events(PSWPOUT, folio_nr_pages(folio)); |
| 302 | } |
| 303 | |
| 304 | #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) |
| 305 | static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio) |
| 306 | { |
| 307 | struct cgroup_subsys_state *css; |
| 308 | struct mem_cgroup *memcg; |
| 309 | |
| 310 | memcg = folio_memcg(folio); |
| 311 | if (!memcg) |
| 312 | return; |
| 313 | |
| 314 | rcu_read_lock(); |
| 315 | css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); |
| 316 | bio_associate_blkg_from_css(bio, css); |
| 317 | rcu_read_unlock(); |
| 318 | } |
| 319 | #else |
| 320 | #define bio_associate_blkg_from_page(bio, folio) do { } while (0) |
| 321 | #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */ |
| 322 | |
| 323 | struct swap_iocb { |
| 324 | struct kiocb iocb; |
| 325 | struct bio_vec bvec[SWAP_CLUSTER_MAX]; |
| 326 | int pages; |
| 327 | int len; |
| 328 | }; |
| 329 | static mempool_t *sio_pool; |
| 330 | |
| 331 | int sio_pool_init(void) |
| 332 | { |
| 333 | if (!sio_pool) { |
| 334 | mempool_t *pool = mempool_create_kmalloc_pool( |
| 335 | SWAP_CLUSTER_MAX, sizeof(struct swap_iocb)); |
| 336 | if (cmpxchg(&sio_pool, NULL, pool)) |
| 337 | mempool_destroy(pool); |
| 338 | } |
| 339 | if (!sio_pool) |
| 340 | return -ENOMEM; |
| 341 | return 0; |
| 342 | } |
| 343 | |
| 344 | static void sio_write_complete(struct kiocb *iocb, long ret) |
| 345 | { |
| 346 | struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); |
| 347 | struct page *page = sio->bvec[0].bv_page; |
| 348 | int p; |
| 349 | |
| 350 | if (ret != sio->len) { |
| 351 | /* |
| 352 | * In the case of swap-over-nfs, this can be a |
| 353 | * temporary failure if the system has limited |
| 354 | * memory for allocating transmit buffers. |
| 355 | * Mark the page dirty and avoid |
| 356 | * folio_rotate_reclaimable but rate-limit the |
| 357 | * messages. |
| 358 | */ |
| 359 | pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n", |
| 360 | ret, swap_dev_pos(page_swap_entry(page))); |
| 361 | for (p = 0; p < sio->pages; p++) { |
| 362 | page = sio->bvec[p].bv_page; |
| 363 | set_page_dirty(page); |
| 364 | ClearPageReclaim(page); |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | for (p = 0; p < sio->pages; p++) |
| 369 | end_page_writeback(sio->bvec[p].bv_page); |
| 370 | |
| 371 | mempool_free(sio, sio_pool); |
| 372 | } |
| 373 | |
| 374 | static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc) |
| 375 | { |
| 376 | struct swap_iocb *sio = NULL; |
| 377 | struct swap_info_struct *sis = swp_swap_info(folio->swap); |
| 378 | struct file *swap_file = sis->swap_file; |
| 379 | loff_t pos = swap_dev_pos(folio->swap); |
| 380 | |
| 381 | count_swpout_vm_event(folio); |
| 382 | folio_start_writeback(folio); |
| 383 | folio_unlock(folio); |
| 384 | if (wbc->swap_plug) |
| 385 | sio = *wbc->swap_plug; |
| 386 | if (sio) { |
| 387 | if (sio->iocb.ki_filp != swap_file || |
| 388 | sio->iocb.ki_pos + sio->len != pos) { |
| 389 | swap_write_unplug(sio); |
| 390 | sio = NULL; |
| 391 | } |
| 392 | } |
| 393 | if (!sio) { |
| 394 | sio = mempool_alloc(sio_pool, GFP_NOIO); |
| 395 | init_sync_kiocb(&sio->iocb, swap_file); |
| 396 | sio->iocb.ki_complete = sio_write_complete; |
| 397 | sio->iocb.ki_pos = pos; |
| 398 | sio->pages = 0; |
| 399 | sio->len = 0; |
| 400 | } |
| 401 | bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); |
| 402 | sio->len += folio_size(folio); |
| 403 | sio->pages += 1; |
| 404 | if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) { |
| 405 | swap_write_unplug(sio); |
| 406 | sio = NULL; |
| 407 | } |
| 408 | if (wbc->swap_plug) |
| 409 | *wbc->swap_plug = sio; |
| 410 | } |
| 411 | |
| 412 | static void swap_writepage_bdev_sync(struct folio *folio, |
| 413 | struct writeback_control *wbc, struct swap_info_struct *sis) |
| 414 | { |
| 415 | struct bio_vec bv; |
| 416 | struct bio bio; |
| 417 | |
| 418 | bio_init(&bio, sis->bdev, &bv, 1, |
| 419 | REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc)); |
| 420 | bio.bi_iter.bi_sector = swap_folio_sector(folio); |
| 421 | bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); |
| 422 | |
| 423 | bio_associate_blkg_from_page(&bio, folio); |
| 424 | count_swpout_vm_event(folio); |
| 425 | |
| 426 | folio_start_writeback(folio); |
| 427 | folio_unlock(folio); |
| 428 | |
| 429 | submit_bio_wait(&bio); |
| 430 | __end_swap_bio_write(&bio); |
| 431 | } |
| 432 | |
| 433 | static void swap_writepage_bdev_async(struct folio *folio, |
| 434 | struct writeback_control *wbc, struct swap_info_struct *sis) |
| 435 | { |
| 436 | struct bio *bio; |
| 437 | |
| 438 | bio = bio_alloc(sis->bdev, 1, |
| 439 | REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc), |
| 440 | GFP_NOIO); |
| 441 | bio->bi_iter.bi_sector = swap_folio_sector(folio); |
| 442 | bio->bi_end_io = end_swap_bio_write; |
| 443 | bio_add_folio_nofail(bio, folio, folio_size(folio), 0); |
| 444 | |
| 445 | bio_associate_blkg_from_page(bio, folio); |
| 446 | count_swpout_vm_event(folio); |
| 447 | folio_start_writeback(folio); |
| 448 | folio_unlock(folio); |
| 449 | submit_bio(bio); |
| 450 | } |
| 451 | |
| 452 | void __swap_writepage(struct folio *folio, struct writeback_control *wbc) |
| 453 | { |
| 454 | struct swap_info_struct *sis = swp_swap_info(folio->swap); |
| 455 | |
| 456 | VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); |
| 457 | /* |
| 458 | * ->flags can be updated non-atomicially (scan_swap_map_slots), |
| 459 | * but that will never affect SWP_FS_OPS, so the data_race |
| 460 | * is safe. |
| 461 | */ |
| 462 | if (data_race(sis->flags & SWP_FS_OPS)) |
| 463 | swap_writepage_fs(folio, wbc); |
| 464 | /* |
| 465 | * ->flags can be updated non-atomicially (scan_swap_map_slots), |
| 466 | * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race |
| 467 | * is safe. |
| 468 | */ |
| 469 | else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO)) |
| 470 | swap_writepage_bdev_sync(folio, wbc, sis); |
| 471 | else |
| 472 | swap_writepage_bdev_async(folio, wbc, sis); |
| 473 | } |
| 474 | |
| 475 | void swap_write_unplug(struct swap_iocb *sio) |
| 476 | { |
| 477 | struct iov_iter from; |
| 478 | struct address_space *mapping = sio->iocb.ki_filp->f_mapping; |
| 479 | int ret; |
| 480 | |
| 481 | iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len); |
| 482 | ret = mapping->a_ops->swap_rw(&sio->iocb, &from); |
| 483 | if (ret != -EIOCBQUEUED) |
| 484 | sio_write_complete(&sio->iocb, ret); |
| 485 | } |
| 486 | |
| 487 | static void sio_read_complete(struct kiocb *iocb, long ret) |
| 488 | { |
| 489 | struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb); |
| 490 | int p; |
| 491 | |
| 492 | if (ret == sio->len) { |
| 493 | for (p = 0; p < sio->pages; p++) { |
| 494 | struct folio *folio = page_folio(sio->bvec[p].bv_page); |
| 495 | |
| 496 | count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); |
| 497 | count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); |
| 498 | folio_mark_uptodate(folio); |
| 499 | folio_unlock(folio); |
| 500 | } |
| 501 | count_vm_events(PSWPIN, sio->pages); |
| 502 | } else { |
| 503 | for (p = 0; p < sio->pages; p++) { |
| 504 | struct folio *folio = page_folio(sio->bvec[p].bv_page); |
| 505 | |
| 506 | folio_unlock(folio); |
| 507 | } |
| 508 | pr_alert_ratelimited("Read-error on swap-device\n"); |
| 509 | } |
| 510 | mempool_free(sio, sio_pool); |
| 511 | } |
| 512 | |
| 513 | static bool swap_read_folio_zeromap(struct folio *folio) |
| 514 | { |
| 515 | int nr_pages = folio_nr_pages(folio); |
| 516 | struct obj_cgroup *objcg; |
| 517 | bool is_zeromap; |
| 518 | |
| 519 | /* |
| 520 | * Swapping in a large folio that is partially in the zeromap is not |
| 521 | * currently handled. Return true without marking the folio uptodate so |
| 522 | * that an IO error is emitted (e.g. do_swap_page() will sigbus). |
| 523 | */ |
| 524 | if (WARN_ON_ONCE(swap_zeromap_batch(folio->swap, nr_pages, |
| 525 | &is_zeromap) != nr_pages)) |
| 526 | return true; |
| 527 | |
| 528 | if (!is_zeromap) |
| 529 | return false; |
| 530 | |
| 531 | objcg = get_obj_cgroup_from_folio(folio); |
| 532 | count_vm_events(SWPIN_ZERO, nr_pages); |
| 533 | if (objcg) { |
| 534 | count_objcg_events(objcg, SWPIN_ZERO, nr_pages); |
| 535 | obj_cgroup_put(objcg); |
| 536 | } |
| 537 | |
| 538 | folio_zero_range(folio, 0, folio_size(folio)); |
| 539 | folio_mark_uptodate(folio); |
| 540 | return true; |
| 541 | } |
| 542 | |
| 543 | static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug) |
| 544 | { |
| 545 | struct swap_info_struct *sis = swp_swap_info(folio->swap); |
| 546 | struct swap_iocb *sio = NULL; |
| 547 | loff_t pos = swap_dev_pos(folio->swap); |
| 548 | |
| 549 | if (plug) |
| 550 | sio = *plug; |
| 551 | if (sio) { |
| 552 | if (sio->iocb.ki_filp != sis->swap_file || |
| 553 | sio->iocb.ki_pos + sio->len != pos) { |
| 554 | swap_read_unplug(sio); |
| 555 | sio = NULL; |
| 556 | } |
| 557 | } |
| 558 | if (!sio) { |
| 559 | sio = mempool_alloc(sio_pool, GFP_KERNEL); |
| 560 | init_sync_kiocb(&sio->iocb, sis->swap_file); |
| 561 | sio->iocb.ki_pos = pos; |
| 562 | sio->iocb.ki_complete = sio_read_complete; |
| 563 | sio->pages = 0; |
| 564 | sio->len = 0; |
| 565 | } |
| 566 | bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0); |
| 567 | sio->len += folio_size(folio); |
| 568 | sio->pages += 1; |
| 569 | if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) { |
| 570 | swap_read_unplug(sio); |
| 571 | sio = NULL; |
| 572 | } |
| 573 | if (plug) |
| 574 | *plug = sio; |
| 575 | } |
| 576 | |
| 577 | static void swap_read_folio_bdev_sync(struct folio *folio, |
| 578 | struct swap_info_struct *sis) |
| 579 | { |
| 580 | struct bio_vec bv; |
| 581 | struct bio bio; |
| 582 | |
| 583 | bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ); |
| 584 | bio.bi_iter.bi_sector = swap_folio_sector(folio); |
| 585 | bio_add_folio_nofail(&bio, folio, folio_size(folio), 0); |
| 586 | /* |
| 587 | * Keep this task valid during swap readpage because the oom killer may |
| 588 | * attempt to access it in the page fault retry time check. |
| 589 | */ |
| 590 | get_task_struct(current); |
| 591 | count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); |
| 592 | count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); |
| 593 | count_vm_events(PSWPIN, folio_nr_pages(folio)); |
| 594 | submit_bio_wait(&bio); |
| 595 | __end_swap_bio_read(&bio); |
| 596 | put_task_struct(current); |
| 597 | } |
| 598 | |
| 599 | static void swap_read_folio_bdev_async(struct folio *folio, |
| 600 | struct swap_info_struct *sis) |
| 601 | { |
| 602 | struct bio *bio; |
| 603 | |
| 604 | bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL); |
| 605 | bio->bi_iter.bi_sector = swap_folio_sector(folio); |
| 606 | bio->bi_end_io = end_swap_bio_read; |
| 607 | bio_add_folio_nofail(bio, folio, folio_size(folio), 0); |
| 608 | count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN); |
| 609 | count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio)); |
| 610 | count_vm_events(PSWPIN, folio_nr_pages(folio)); |
| 611 | submit_bio(bio); |
| 612 | } |
| 613 | |
| 614 | void swap_read_folio(struct folio *folio, struct swap_iocb **plug) |
| 615 | { |
| 616 | struct swap_info_struct *sis = swp_swap_info(folio->swap); |
| 617 | bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO; |
| 618 | bool workingset = folio_test_workingset(folio); |
| 619 | unsigned long pflags; |
| 620 | bool in_thrashing; |
| 621 | |
| 622 | VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio); |
| 623 | VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); |
| 624 | VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio); |
| 625 | |
| 626 | /* |
| 627 | * Count submission time as memory stall and delay. When the device |
| 628 | * is congested, or the submitting cgroup IO-throttled, submission |
| 629 | * can be a significant part of overall IO time. |
| 630 | */ |
| 631 | if (workingset) { |
| 632 | delayacct_thrashing_start(&in_thrashing); |
| 633 | psi_memstall_enter(&pflags); |
| 634 | } |
| 635 | delayacct_swapin_start(); |
| 636 | |
| 637 | if (swap_read_folio_zeromap(folio)) { |
| 638 | folio_unlock(folio); |
| 639 | goto finish; |
| 640 | } |
| 641 | |
| 642 | if (zswap_load(folio) != -ENOENT) |
| 643 | goto finish; |
| 644 | |
| 645 | /* We have to read from slower devices. Increase zswap protection. */ |
| 646 | zswap_folio_swapin(folio); |
| 647 | |
| 648 | if (data_race(sis->flags & SWP_FS_OPS)) { |
| 649 | swap_read_folio_fs(folio, plug); |
| 650 | } else if (synchronous) { |
| 651 | swap_read_folio_bdev_sync(folio, sis); |
| 652 | } else { |
| 653 | swap_read_folio_bdev_async(folio, sis); |
| 654 | } |
| 655 | |
| 656 | finish: |
| 657 | if (workingset) { |
| 658 | delayacct_thrashing_end(&in_thrashing); |
| 659 | psi_memstall_leave(&pflags); |
| 660 | } |
| 661 | delayacct_swapin_end(); |
| 662 | } |
| 663 | |
| 664 | void __swap_read_unplug(struct swap_iocb *sio) |
| 665 | { |
| 666 | struct iov_iter from; |
| 667 | struct address_space *mapping = sio->iocb.ki_filp->f_mapping; |
| 668 | int ret; |
| 669 | |
| 670 | iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len); |
| 671 | ret = mapping->a_ops->swap_rw(&sio->iocb, &from); |
| 672 | if (ret != -EIOCBQUEUED) |
| 673 | sio_read_complete(&sio->iocb, ret); |
| 674 | } |