zswap: memcontrol: implement zswap writeback disabling
[linux-2.6-block.git] / mm / page_io.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/page_io.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * Swap reorganised 29.12.95,
8 * Asynchronous swapping added 30.12.95. Stephen Tweedie
9 * Removed race in async swapping. 14.4.1996. Bruno Haible
10 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12 */
13
14#include <linux/mm.h>
15#include <linux/kernel_stat.h>
5a0e3ad6 16#include <linux/gfp.h>
1da177e4
LT
17#include <linux/pagemap.h>
18#include <linux/swap.h>
19#include <linux/bio.h>
20#include <linux/swapops.h>
21#include <linux/writeback.h>
b430e9d1 22#include <linux/blkdev.h>
93779069 23#include <linux/psi.h>
e2e40f2c 24#include <linux/uio.h>
b0ba2d0f 25#include <linux/sched/task.h>
a3d5dc90 26#include <linux/delayacct.h>
42c06a0e 27#include <linux/zswap.h>
014bb1de 28#include "swap.h"
1da177e4 29
3222d8c2 30static void __end_swap_bio_write(struct bio *bio)
1da177e4 31{
a3ed1e9b 32 struct folio *folio = bio_first_folio_all(bio);
1da177e4 33
4e4cbee9 34 if (bio->bi_status) {
6ddab3b9
PZ
35 /*
36 * We failed to write the page out to swap-space.
37 * Re-dirty the page in order to avoid it being reclaimed.
38 * Also print a dire warning that things will go BAD (tm)
39 * very quickly.
40 *
575ced1c 41 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
6ddab3b9 42 */
a3ed1e9b 43 folio_mark_dirty(folio);
25eaab43
GD
44 pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
46 (unsigned long long)bio->bi_iter.bi_sector);
a3ed1e9b 47 folio_clear_reclaim(folio);
6ddab3b9 48 }
a3ed1e9b 49 folio_end_writeback(folio);
3222d8c2
CH
50}
51
52static void end_swap_bio_write(struct bio *bio)
53{
54 __end_swap_bio_write(bio);
1da177e4 55 bio_put(bio);
1da177e4
LT
56}
57
9b4e30bd 58static void __end_swap_bio_read(struct bio *bio)
1da177e4 59{
bc74b53f 60 struct folio *folio = bio_first_folio_all(bio);
1da177e4 61
4e4cbee9 62 if (bio->bi_status) {
25eaab43
GD
63 pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65 (unsigned long long)bio->bi_iter.bi_sector);
9b4e30bd 66 } else {
bc74b53f 67 folio_mark_uptodate(folio);
1da177e4 68 }
bc74b53f 69 folio_unlock(folio);
9b4e30bd
CH
70}
71
72static void end_swap_bio_read(struct bio *bio)
73{
74 __end_swap_bio_read(bio);
1da177e4 75 bio_put(bio);
1da177e4
LT
76}
77
a509bc1a
MG
78int generic_swapfile_activate(struct swap_info_struct *sis,
79 struct file *swap_file,
80 sector_t *span)
81{
82 struct address_space *mapping = swap_file->f_mapping;
83 struct inode *inode = mapping->host;
84 unsigned blocks_per_page;
85 unsigned long page_no;
86 unsigned blkbits;
87 sector_t probe_block;
88 sector_t last_block;
89 sector_t lowest_block = -1;
90 sector_t highest_block = 0;
91 int nr_extents = 0;
92 int ret;
93
94 blkbits = inode->i_blkbits;
95 blocks_per_page = PAGE_SIZE >> blkbits;
96
97 /*
4efaceb1 98 * Map all the blocks into the extent tree. This code doesn't try
a509bc1a
MG
99 * to be very smart.
100 */
101 probe_block = 0;
102 page_no = 0;
103 last_block = i_size_read(inode) >> blkbits;
104 while ((probe_block + blocks_per_page) <= last_block &&
105 page_no < sis->max) {
106 unsigned block_in_page;
107 sector_t first_block;
108
7e4411bf
MP
109 cond_resched();
110
30460e1e
CM
111 first_block = probe_block;
112 ret = bmap(inode, &first_block);
113 if (ret || !first_block)
a509bc1a
MG
114 goto bad_bmap;
115
116 /*
117 * It must be PAGE_SIZE aligned on-disk
118 */
119 if (first_block & (blocks_per_page - 1)) {
120 probe_block++;
121 goto reprobe;
122 }
123
124 for (block_in_page = 1; block_in_page < blocks_per_page;
125 block_in_page++) {
126 sector_t block;
127
30460e1e
CM
128 block = probe_block + block_in_page;
129 ret = bmap(inode, &block);
130 if (ret || !block)
a509bc1a 131 goto bad_bmap;
30460e1e 132
a509bc1a
MG
133 if (block != first_block + block_in_page) {
134 /* Discontiguity */
135 probe_block++;
136 goto reprobe;
137 }
138 }
139
140 first_block >>= (PAGE_SHIFT - blkbits);
141 if (page_no) { /* exclude the header page */
142 if (first_block < lowest_block)
143 lowest_block = first_block;
144 if (first_block > highest_block)
145 highest_block = first_block;
146 }
147
148 /*
149 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
150 */
151 ret = add_swap_extent(sis, page_no, 1, first_block);
152 if (ret < 0)
153 goto out;
154 nr_extents += ret;
155 page_no++;
156 probe_block += blocks_per_page;
157reprobe:
158 continue;
159 }
160 ret = nr_extents;
161 *span = 1 + highest_block - lowest_block;
162 if (page_no == 0)
163 page_no = 1; /* force Empty message */
164 sis->max = page_no;
165 sis->pages = page_no - 1;
166 sis->highest_bit = page_no - 1;
167out:
168 return ret;
169bad_bmap:
1170532b 170 pr_err("swapon: swapfile has holes\n");
a509bc1a
MG
171 ret = -EINVAL;
172 goto out;
173}
174
1da177e4
LT
175/*
176 * We may have stale swap cache pages in memory: notice
177 * them here and get rid of the unnecessary final write.
178 */
179int swap_writepage(struct page *page, struct writeback_control *wbc)
180{
71fa1a53 181 struct folio *folio = page_folio(page);
e3e2762b 182 int ret;
1da177e4 183
71fa1a53
MWO
184 if (folio_free_swap(folio)) {
185 folio_unlock(folio);
e3e2762b 186 return 0;
1da177e4 187 }
8a84802e
SP
188 /*
189 * Arch code may have to preserve more data than just the page
190 * contents, e.g. memory tags.
191 */
71fa1a53 192 ret = arch_prepare_to_swap(&folio->page);
8a84802e 193 if (ret) {
71fa1a53
MWO
194 folio_mark_dirty(folio);
195 folio_unlock(folio);
e3e2762b 196 return ret;
8a84802e 197 }
34f4c198 198 if (zswap_store(folio)) {
71fa1a53
MWO
199 folio_start_writeback(folio);
200 folio_unlock(folio);
201 folio_end_writeback(folio);
e3e2762b 202 return 0;
38b5faf4 203 }
501a06fe
NP
204 if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
205 folio_mark_dirty(folio);
206 return AOP_WRITEPAGE_ACTIVATE;
207 }
208
b99b4e0d 209 __swap_writepage(folio, wbc);
e3e2762b 210 return 0;
2f772e6c
SJ
211}
212
9b72b134 213static inline void count_swpout_vm_event(struct folio *folio)
225311a4
HY
214{
215#ifdef CONFIG_TRANSPARENT_HUGEPAGE
811244a5
XH
216 if (unlikely(folio_test_pmd_mappable(folio))) {
217 count_memcg_folio_events(folio, THP_SWPOUT, 1);
225311a4 218 count_vm_event(THP_SWPOUT);
811244a5 219 }
225311a4 220#endif
9b72b134 221 count_vm_events(PSWPOUT, folio_nr_pages(folio));
225311a4
HY
222}
223
a18b9b15 224#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
98630cfd 225static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
a18b9b15
CH
226{
227 struct cgroup_subsys_state *css;
bcfe06bf 228 struct mem_cgroup *memcg;
a18b9b15 229
98630cfd 230 memcg = folio_memcg(folio);
bcfe06bf 231 if (!memcg)
a18b9b15
CH
232 return;
233
234 rcu_read_lock();
bcfe06bf 235 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
a18b9b15
CH
236 bio_associate_blkg_from_css(bio, css);
237 rcu_read_unlock();
238}
239#else
98630cfd 240#define bio_associate_blkg_from_page(bio, folio) do { } while (0)
a18b9b15
CH
241#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
242
e1209d3a
N
243struct swap_iocb {
244 struct kiocb iocb;
5169b844
N
245 struct bio_vec bvec[SWAP_CLUSTER_MAX];
246 int pages;
a1a0dfd5 247 int len;
e1209d3a
N
248};
249static mempool_t *sio_pool;
250
251int sio_pool_init(void)
2f772e6c 252{
e1209d3a
N
253 if (!sio_pool) {
254 mempool_t *pool = mempool_create_kmalloc_pool(
255 SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
256 if (cmpxchg(&sio_pool, NULL, pool))
257 mempool_destroy(pool);
258 }
259 if (!sio_pool)
260 return -ENOMEM;
261 return 0;
262}
62c230bc 263
7eadabc0
N
264static void sio_write_complete(struct kiocb *iocb, long ret)
265{
266 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 267 struct page *page = sio->bvec[0].bv_page;
2282679f 268 int p;
62c230bc 269
a1a0dfd5 270 if (ret != sio->len) {
7eadabc0
N
271 /*
272 * In the case of swap-over-nfs, this can be a
273 * temporary failure if the system has limited
274 * memory for allocating transmit buffers.
275 * Mark the page dirty and avoid
276 * folio_rotate_reclaimable but rate-limit the
277 * messages but do not flag PageError like
278 * the normal direct-to-bio case as it could
279 * be temporary.
280 */
7eadabc0
N
281 pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
282 ret, page_file_offset(page));
2282679f
N
283 for (p = 0; p < sio->pages; p++) {
284 page = sio->bvec[p].bv_page;
2d30d31e 285 set_page_dirty(page);
0cdc444a 286 ClearPageReclaim(page);
62c230bc 287 }
62c230bc
MG
288 }
289
2282679f
N
290 for (p = 0; p < sio->pages; p++)
291 end_page_writeback(sio->bvec[p].bv_page);
292
7eadabc0
N
293 mempool_free(sio, sio_pool);
294}
295
bfcd44d5 296static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
7eadabc0 297{
2282679f 298 struct swap_iocb *sio = NULL;
bfcd44d5 299 struct swap_info_struct *sis = swp_swap_info(folio->swap);
7eadabc0 300 struct file *swap_file = sis->swap_file;
bfcd44d5 301 loff_t pos = folio_file_pos(folio);
7eadabc0 302
bfcd44d5
MWO
303 count_swpout_vm_event(folio);
304 folio_start_writeback(folio);
305 folio_unlock(folio);
2282679f
N
306 if (wbc->swap_plug)
307 sio = *wbc->swap_plug;
308 if (sio) {
309 if (sio->iocb.ki_filp != swap_file ||
a1a0dfd5 310 sio->iocb.ki_pos + sio->len != pos) {
2282679f
N
311 swap_write_unplug(sio);
312 sio = NULL;
313 }
314 }
315 if (!sio) {
316 sio = mempool_alloc(sio_pool, GFP_NOIO);
317 init_sync_kiocb(&sio->iocb, swap_file);
318 sio->iocb.ki_complete = sio_write_complete;
319 sio->iocb.ki_pos = pos;
320 sio->pages = 0;
a1a0dfd5 321 sio->len = 0;
2282679f 322 }
bfcd44d5
MWO
323 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
324 sio->len += folio_size(folio);
2282679f
N
325 sio->pages += 1;
326 if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
327 swap_write_unplug(sio);
328 sio = NULL;
329 }
330 if (wbc->swap_plug)
331 *wbc->swap_plug = sio;
7eadabc0
N
332}
333
6de62c7b 334static void swap_writepage_bdev_sync(struct folio *folio,
05cda97e 335 struct writeback_control *wbc, struct swap_info_struct *sis)
2f772e6c 336{
3222d8c2
CH
337 struct bio_vec bv;
338 struct bio bio;
62c230bc 339
3222d8c2
CH
340 bio_init(&bio, sis->bdev, &bv, 1,
341 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
3a61e6f6 342 bio.bi_iter.bi_sector = swap_folio_sector(folio);
6de62c7b 343 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
62c230bc 344
98630cfd 345 bio_associate_blkg_from_page(&bio, folio);
9b72b134 346 count_swpout_vm_event(folio);
3222d8c2 347
f54fcaab
Z
348 folio_start_writeback(folio);
349 folio_unlock(folio);
3222d8c2
CH
350
351 submit_bio_wait(&bio);
352 __end_swap_bio_write(&bio);
353}
354
ee1b1d9b 355static void swap_writepage_bdev_async(struct folio *folio,
3222d8c2
CH
356 struct writeback_control *wbc, struct swap_info_struct *sis)
357{
358 struct bio *bio;
dd6bd0d9 359
07888c66
CH
360 bio = bio_alloc(sis->bdev, 1,
361 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
362 GFP_NOIO);
3a61e6f6 363 bio->bi_iter.bi_sector = swap_folio_sector(folio);
cf1e3fe4 364 bio->bi_end_io = end_swap_bio_write;
ee1b1d9b 365 bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
48d15436 366
98630cfd 367 bio_associate_blkg_from_page(bio, folio);
9b72b134 368 count_swpout_vm_event(folio);
2675251d
Z
369 folio_start_writeback(folio);
370 folio_unlock(folio);
4e49ea4a 371 submit_bio(bio);
1da177e4 372}
548d9782 373
b99b4e0d 374void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
05cda97e 375{
b99b4e0d 376 struct swap_info_struct *sis = swp_swap_info(folio->swap);
05cda97e 377
b99b4e0d 378 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
05cda97e
CH
379 /*
380 * ->flags can be updated non-atomicially (scan_swap_map_slots),
381 * but that will never affect SWP_FS_OPS, so the data_race
382 * is safe.
383 */
384 if (data_race(sis->flags & SWP_FS_OPS))
bfcd44d5 385 swap_writepage_fs(folio, wbc);
3222d8c2 386 else if (sis->flags & SWP_SYNCHRONOUS_IO)
6de62c7b 387 swap_writepage_bdev_sync(folio, wbc, sis);
05cda97e 388 else
ee1b1d9b 389 swap_writepage_bdev_async(folio, wbc, sis);
1da177e4
LT
390}
391
2282679f
N
392void swap_write_unplug(struct swap_iocb *sio)
393{
394 struct iov_iter from;
395 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
396 int ret;
397
de4eda9d 398 iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
2282679f
N
399 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
400 if (ret != -EIOCBQUEUED)
401 sio_write_complete(&sio->iocb, ret);
402}
403
e1209d3a
N
404static void sio_read_complete(struct kiocb *iocb, long ret)
405{
406 struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
5169b844 407 int p;
e1209d3a 408
a1a0dfd5 409 if (ret == sio->len) {
5169b844 410 for (p = 0; p < sio->pages; p++) {
6a8c0687 411 struct folio *folio = page_folio(sio->bvec[p].bv_page);
5169b844 412
6a8c0687
Z
413 folio_mark_uptodate(folio);
414 folio_unlock(folio);
5169b844
N
415 }
416 count_vm_events(PSWPIN, sio->pages);
e1209d3a 417 } else {
5169b844 418 for (p = 0; p < sio->pages; p++) {
6a8c0687 419 struct folio *folio = page_folio(sio->bvec[p].bv_page);
5169b844 420
6a8c0687 421 folio_unlock(folio);
5169b844
N
422 }
423 pr_alert_ratelimited("Read-error on swap-device\n");
e1209d3a 424 }
e1209d3a
N
425 mempool_free(sio, sio_pool);
426}
427
c9bdf768 428static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
e1209d3a 429{
64a24e55 430 struct swap_info_struct *sis = swp_swap_info(folio->swap);
5169b844 431 struct swap_iocb *sio = NULL;
64a24e55 432 loff_t pos = folio_file_pos(folio);
e1209d3a 433
5169b844
N
434 if (plug)
435 sio = *plug;
436 if (sio) {
437 if (sio->iocb.ki_filp != sis->swap_file ||
a1a0dfd5 438 sio->iocb.ki_pos + sio->len != pos) {
5169b844
N
439 swap_read_unplug(sio);
440 sio = NULL;
441 }
442 }
443 if (!sio) {
444 sio = mempool_alloc(sio_pool, GFP_KERNEL);
445 init_sync_kiocb(&sio->iocb, sis->swap_file);
446 sio->iocb.ki_pos = pos;
447 sio->iocb.ki_complete = sio_read_complete;
448 sio->pages = 0;
a1a0dfd5 449 sio->len = 0;
5169b844 450 }
64a24e55
MWO
451 bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
452 sio->len += folio_size(folio);
5169b844
N
453 sio->pages += 1;
454 if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
455 swap_read_unplug(sio);
456 sio = NULL;
457 }
458 if (plug)
459 *plug = sio;
e1209d3a
N
460}
461
c9bdf768 462static void swap_read_folio_bdev_sync(struct folio *folio,
14bd75f5 463 struct swap_info_struct *sis)
1da177e4 464{
9b4e30bd
CH
465 struct bio_vec bv;
466 struct bio bio;
62c230bc 467
9b4e30bd 468 bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
3a61e6f6 469 bio.bi_iter.bi_sector = swap_folio_sector(folio);
2c184d82 470 bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
b0ba2d0f
TH
471 /*
472 * Keep this task valid during swap readpage because the oom killer may
473 * attempt to access it in the page fault retry time check.
474 */
9b4e30bd 475 get_task_struct(current);
f8891e5e 476 count_vm_event(PSWPIN);
9b4e30bd
CH
477 submit_bio_wait(&bio);
478 __end_swap_bio_read(&bio);
479 put_task_struct(current);
480}
481
c9bdf768 482static void swap_read_folio_bdev_async(struct folio *folio,
9b4e30bd 483 struct swap_info_struct *sis)
1da177e4
LT
484{
485 struct bio *bio;
23955622 486
9b4e30bd 487 bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
3a61e6f6 488 bio->bi_iter.bi_sector = swap_folio_sector(folio);
9b4e30bd 489 bio->bi_end_io = end_swap_bio_read;
3c3ebd82 490 bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
9b4e30bd
CH
491 count_vm_event(PSWPIN);
492 submit_bio(bio);
14bd75f5
CH
493}
494
c9bdf768
MWO
495void swap_read_folio(struct folio *folio, bool synchronous,
496 struct swap_iocb **plug)
14bd75f5 497{
c9bdf768 498 struct swap_info_struct *sis = swp_swap_info(folio->swap);
fbcec6a3 499 bool workingset = folio_test_workingset(folio);
93779069 500 unsigned long pflags;
3a9bb7b1 501 bool in_thrashing;
1da177e4 502
fbcec6a3
MWO
503 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
504 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
505 VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
93779069
MK
506
507 /*
3a9bb7b1
YY
508 * Count submission time as memory stall and delay. When the device
509 * is congested, or the submitting cgroup IO-throttled, submission
510 * can be a significant part of overall IO time.
93779069 511 */
3a9bb7b1
YY
512 if (workingset) {
513 delayacct_thrashing_start(&in_thrashing);
d8c47cc7 514 psi_memstall_enter(&pflags);
3a9bb7b1 515 }
a3d5dc90 516 delayacct_swapin_start();
93779069 517
ca54f6d8 518 if (zswap_load(folio)) {
fbcec6a3
MWO
519 folio_mark_uptodate(folio);
520 folio_unlock(folio);
14bd75f5 521 } else if (data_race(sis->flags & SWP_FS_OPS)) {
c9bdf768 522 swap_read_folio_fs(folio, plug);
3222d8c2 523 } else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
c9bdf768 524 swap_read_folio_bdev_sync(folio, sis);
14bd75f5 525 } else {
c9bdf768 526 swap_read_folio_bdev_async(folio, sis);
23955622 527 }
23955622 528
3a9bb7b1
YY
529 if (workingset) {
530 delayacct_thrashing_end(&in_thrashing);
d8c47cc7 531 psi_memstall_leave(&pflags);
3a9bb7b1 532 }
a3d5dc90 533 delayacct_swapin_end();
1da177e4 534}
62c230bc 535
5169b844 536void __swap_read_unplug(struct swap_iocb *sio)
62c230bc 537{
5169b844
N
538 struct iov_iter from;
539 struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
540 int ret;
cc30c5d6 541
de4eda9d 542 iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
5169b844
N
543 ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
544 if (ret != -EIOCBQUEUED)
545 sio_read_complete(&sio->iocb, ret);
62c230bc 546}