| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * fs/f2fs/data.c |
| 4 | * |
| 5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. |
| 6 | * http://www.samsung.com/ |
| 7 | */ |
| 8 | #include <linux/fs.h> |
| 9 | #include <linux/f2fs_fs.h> |
| 10 | #include <linux/sched/mm.h> |
| 11 | #include <linux/mpage.h> |
| 12 | #include <linux/writeback.h> |
| 13 | #include <linux/pagevec.h> |
| 14 | #include <linux/blkdev.h> |
| 15 | #include <linux/bio.h> |
| 16 | #include <linux/blk-crypto.h> |
| 17 | #include <linux/swap.h> |
| 18 | #include <linux/prefetch.h> |
| 19 | #include <linux/uio.h> |
| 20 | #include <linux/sched/signal.h> |
| 21 | #include <linux/fiemap.h> |
| 22 | #include <linux/iomap.h> |
| 23 | |
| 24 | #include "f2fs.h" |
| 25 | #include "node.h" |
| 26 | #include "segment.h" |
| 27 | #include "iostat.h" |
| 28 | #include <trace/events/f2fs.h> |
| 29 | |
| 30 | #define NUM_PREALLOC_POST_READ_CTXS 128 |
| 31 | |
| 32 | static struct kmem_cache *bio_post_read_ctx_cache; |
| 33 | static struct kmem_cache *bio_entry_slab; |
| 34 | static mempool_t *bio_post_read_ctx_pool; |
| 35 | static struct bio_set f2fs_bioset; |
| 36 | |
| 37 | #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE |
| 38 | |
| 39 | int __init f2fs_init_bioset(void) |
| 40 | { |
| 41 | return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE, |
| 42 | 0, BIOSET_NEED_BVECS); |
| 43 | } |
| 44 | |
| 45 | void f2fs_destroy_bioset(void) |
| 46 | { |
| 47 | bioset_exit(&f2fs_bioset); |
| 48 | } |
| 49 | |
| 50 | bool f2fs_is_cp_guaranteed(struct page *page) |
| 51 | { |
| 52 | struct address_space *mapping = page_folio(page)->mapping; |
| 53 | struct inode *inode; |
| 54 | struct f2fs_sb_info *sbi; |
| 55 | |
| 56 | if (fscrypt_is_bounce_page(page)) |
| 57 | return page_private_gcing(fscrypt_pagecache_page(page)); |
| 58 | |
| 59 | inode = mapping->host; |
| 60 | sbi = F2FS_I_SB(inode); |
| 61 | |
| 62 | if (inode->i_ino == F2FS_META_INO(sbi) || |
| 63 | inode->i_ino == F2FS_NODE_INO(sbi) || |
| 64 | S_ISDIR(inode->i_mode)) |
| 65 | return true; |
| 66 | |
| 67 | if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) || |
| 68 | page_private_gcing(page)) |
| 69 | return true; |
| 70 | return false; |
| 71 | } |
| 72 | |
| 73 | static enum count_type __read_io_type(struct folio *folio) |
| 74 | { |
| 75 | struct address_space *mapping = folio->mapping; |
| 76 | |
| 77 | if (mapping) { |
| 78 | struct inode *inode = mapping->host; |
| 79 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 80 | |
| 81 | if (inode->i_ino == F2FS_META_INO(sbi)) |
| 82 | return F2FS_RD_META; |
| 83 | |
| 84 | if (inode->i_ino == F2FS_NODE_INO(sbi)) |
| 85 | return F2FS_RD_NODE; |
| 86 | } |
| 87 | return F2FS_RD_DATA; |
| 88 | } |
| 89 | |
| 90 | /* postprocessing steps for read bios */ |
| 91 | enum bio_post_read_step { |
| 92 | #ifdef CONFIG_FS_ENCRYPTION |
| 93 | STEP_DECRYPT = BIT(0), |
| 94 | #else |
| 95 | STEP_DECRYPT = 0, /* compile out the decryption-related code */ |
| 96 | #endif |
| 97 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 98 | STEP_DECOMPRESS = BIT(1), |
| 99 | #else |
| 100 | STEP_DECOMPRESS = 0, /* compile out the decompression-related code */ |
| 101 | #endif |
| 102 | #ifdef CONFIG_FS_VERITY |
| 103 | STEP_VERITY = BIT(2), |
| 104 | #else |
| 105 | STEP_VERITY = 0, /* compile out the verity-related code */ |
| 106 | #endif |
| 107 | }; |
| 108 | |
| 109 | struct bio_post_read_ctx { |
| 110 | struct bio *bio; |
| 111 | struct f2fs_sb_info *sbi; |
| 112 | struct work_struct work; |
| 113 | unsigned int enabled_steps; |
| 114 | /* |
| 115 | * decompression_attempted keeps track of whether |
| 116 | * f2fs_end_read_compressed_page() has been called on the pages in the |
| 117 | * bio that belong to a compressed cluster yet. |
| 118 | */ |
| 119 | bool decompression_attempted; |
| 120 | block_t fs_blkaddr; |
| 121 | }; |
| 122 | |
| 123 | /* |
| 124 | * Update and unlock a bio's pages, and free the bio. |
| 125 | * |
| 126 | * This marks pages up-to-date only if there was no error in the bio (I/O error, |
| 127 | * decryption error, or verity error), as indicated by bio->bi_status. |
| 128 | * |
| 129 | * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk) |
| 130 | * aren't marked up-to-date here, as decompression is done on a per-compression- |
| 131 | * cluster basis rather than a per-bio basis. Instead, we only must do two |
| 132 | * things for each compressed page here: call f2fs_end_read_compressed_page() |
| 133 | * with failed=true if an error occurred before it would have normally gotten |
| 134 | * called (i.e., I/O error or decryption error, but *not* verity error), and |
| 135 | * release the bio's reference to the decompress_io_ctx of the page's cluster. |
| 136 | */ |
| 137 | static void f2fs_finish_read_bio(struct bio *bio, bool in_task) |
| 138 | { |
| 139 | struct folio_iter fi; |
| 140 | struct bio_post_read_ctx *ctx = bio->bi_private; |
| 141 | |
| 142 | bio_for_each_folio_all(fi, bio) { |
| 143 | struct folio *folio = fi.folio; |
| 144 | |
| 145 | if (f2fs_is_compressed_page(&folio->page)) { |
| 146 | if (ctx && !ctx->decompression_attempted) |
| 147 | f2fs_end_read_compressed_page(&folio->page, true, 0, |
| 148 | in_task); |
| 149 | f2fs_put_folio_dic(folio, in_task); |
| 150 | continue; |
| 151 | } |
| 152 | |
| 153 | dec_page_count(F2FS_F_SB(folio), __read_io_type(folio)); |
| 154 | folio_end_read(folio, bio->bi_status == BLK_STS_OK); |
| 155 | } |
| 156 | |
| 157 | if (ctx) |
| 158 | mempool_free(ctx, bio_post_read_ctx_pool); |
| 159 | bio_put(bio); |
| 160 | } |
| 161 | |
| 162 | static void f2fs_verify_bio(struct work_struct *work) |
| 163 | { |
| 164 | struct bio_post_read_ctx *ctx = |
| 165 | container_of(work, struct bio_post_read_ctx, work); |
| 166 | struct bio *bio = ctx->bio; |
| 167 | bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS); |
| 168 | |
| 169 | /* |
| 170 | * fsverity_verify_bio() may call readahead() again, and while verity |
| 171 | * will be disabled for this, decryption and/or decompression may still |
| 172 | * be needed, resulting in another bio_post_read_ctx being allocated. |
| 173 | * So to prevent deadlocks we need to release the current ctx to the |
| 174 | * mempool first. This assumes that verity is the last post-read step. |
| 175 | */ |
| 176 | mempool_free(ctx, bio_post_read_ctx_pool); |
| 177 | bio->bi_private = NULL; |
| 178 | |
| 179 | /* |
| 180 | * Verify the bio's pages with fs-verity. Exclude compressed pages, |
| 181 | * as those were handled separately by f2fs_end_read_compressed_page(). |
| 182 | */ |
| 183 | if (may_have_compressed_pages) { |
| 184 | struct bio_vec *bv; |
| 185 | struct bvec_iter_all iter_all; |
| 186 | |
| 187 | bio_for_each_segment_all(bv, bio, iter_all) { |
| 188 | struct page *page = bv->bv_page; |
| 189 | |
| 190 | if (!f2fs_is_compressed_page(page) && |
| 191 | !fsverity_verify_page(page)) { |
| 192 | bio->bi_status = BLK_STS_IOERR; |
| 193 | break; |
| 194 | } |
| 195 | } |
| 196 | } else { |
| 197 | fsverity_verify_bio(bio); |
| 198 | } |
| 199 | |
| 200 | f2fs_finish_read_bio(bio, true); |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * If the bio's data needs to be verified with fs-verity, then enqueue the |
| 205 | * verity work for the bio. Otherwise finish the bio now. |
| 206 | * |
| 207 | * Note that to avoid deadlocks, the verity work can't be done on the |
| 208 | * decryption/decompression workqueue. This is because verifying the data pages |
| 209 | * can involve reading verity metadata pages from the file, and these verity |
| 210 | * metadata pages may be encrypted and/or compressed. |
| 211 | */ |
| 212 | static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task) |
| 213 | { |
| 214 | struct bio_post_read_ctx *ctx = bio->bi_private; |
| 215 | |
| 216 | if (ctx && (ctx->enabled_steps & STEP_VERITY)) { |
| 217 | INIT_WORK(&ctx->work, f2fs_verify_bio); |
| 218 | fsverity_enqueue_verify_work(&ctx->work); |
| 219 | } else { |
| 220 | f2fs_finish_read_bio(bio, in_task); |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last |
| 226 | * remaining page was read by @ctx->bio. |
| 227 | * |
| 228 | * Note that a bio may span clusters (even a mix of compressed and uncompressed |
| 229 | * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates |
| 230 | * that the bio includes at least one compressed page. The actual decompression |
| 231 | * is done on a per-cluster basis, not a per-bio basis. |
| 232 | */ |
| 233 | static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx, |
| 234 | bool in_task) |
| 235 | { |
| 236 | struct bio_vec *bv; |
| 237 | struct bvec_iter_all iter_all; |
| 238 | bool all_compressed = true; |
| 239 | block_t blkaddr = ctx->fs_blkaddr; |
| 240 | |
| 241 | bio_for_each_segment_all(bv, ctx->bio, iter_all) { |
| 242 | struct page *page = bv->bv_page; |
| 243 | |
| 244 | if (f2fs_is_compressed_page(page)) |
| 245 | f2fs_end_read_compressed_page(page, false, blkaddr, |
| 246 | in_task); |
| 247 | else |
| 248 | all_compressed = false; |
| 249 | |
| 250 | blkaddr++; |
| 251 | } |
| 252 | |
| 253 | ctx->decompression_attempted = true; |
| 254 | |
| 255 | /* |
| 256 | * Optimization: if all the bio's pages are compressed, then scheduling |
| 257 | * the per-bio verity work is unnecessary, as verity will be fully |
| 258 | * handled at the compression cluster level. |
| 259 | */ |
| 260 | if (all_compressed) |
| 261 | ctx->enabled_steps &= ~STEP_VERITY; |
| 262 | } |
| 263 | |
| 264 | static void f2fs_post_read_work(struct work_struct *work) |
| 265 | { |
| 266 | struct bio_post_read_ctx *ctx = |
| 267 | container_of(work, struct bio_post_read_ctx, work); |
| 268 | struct bio *bio = ctx->bio; |
| 269 | |
| 270 | if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) { |
| 271 | f2fs_finish_read_bio(bio, true); |
| 272 | return; |
| 273 | } |
| 274 | |
| 275 | if (ctx->enabled_steps & STEP_DECOMPRESS) |
| 276 | f2fs_handle_step_decompress(ctx, true); |
| 277 | |
| 278 | f2fs_verify_and_finish_bio(bio, true); |
| 279 | } |
| 280 | |
| 281 | static void f2fs_read_end_io(struct bio *bio) |
| 282 | { |
| 283 | struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio)); |
| 284 | struct bio_post_read_ctx *ctx; |
| 285 | bool intask = in_task(); |
| 286 | |
| 287 | iostat_update_and_unbind_ctx(bio); |
| 288 | ctx = bio->bi_private; |
| 289 | |
| 290 | if (time_to_inject(sbi, FAULT_READ_IO)) |
| 291 | bio->bi_status = BLK_STS_IOERR; |
| 292 | |
| 293 | if (bio->bi_status != BLK_STS_OK) { |
| 294 | f2fs_finish_read_bio(bio, intask); |
| 295 | return; |
| 296 | } |
| 297 | |
| 298 | if (ctx) { |
| 299 | unsigned int enabled_steps = ctx->enabled_steps & |
| 300 | (STEP_DECRYPT | STEP_DECOMPRESS); |
| 301 | |
| 302 | /* |
| 303 | * If we have only decompression step between decompression and |
| 304 | * decrypt, we don't need post processing for this. |
| 305 | */ |
| 306 | if (enabled_steps == STEP_DECOMPRESS && |
| 307 | !f2fs_low_mem_mode(sbi)) { |
| 308 | f2fs_handle_step_decompress(ctx, intask); |
| 309 | } else if (enabled_steps) { |
| 310 | INIT_WORK(&ctx->work, f2fs_post_read_work); |
| 311 | queue_work(ctx->sbi->post_read_wq, &ctx->work); |
| 312 | return; |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | f2fs_verify_and_finish_bio(bio, intask); |
| 317 | } |
| 318 | |
| 319 | static void f2fs_write_end_io(struct bio *bio) |
| 320 | { |
| 321 | struct f2fs_sb_info *sbi; |
| 322 | struct folio_iter fi; |
| 323 | |
| 324 | iostat_update_and_unbind_ctx(bio); |
| 325 | sbi = bio->bi_private; |
| 326 | |
| 327 | if (time_to_inject(sbi, FAULT_WRITE_IO)) |
| 328 | bio->bi_status = BLK_STS_IOERR; |
| 329 | |
| 330 | bio_for_each_folio_all(fi, bio) { |
| 331 | struct folio *folio = fi.folio; |
| 332 | enum count_type type; |
| 333 | |
| 334 | if (fscrypt_is_bounce_folio(folio)) { |
| 335 | struct folio *io_folio = folio; |
| 336 | |
| 337 | folio = fscrypt_pagecache_folio(io_folio); |
| 338 | fscrypt_free_bounce_page(&io_folio->page); |
| 339 | } |
| 340 | |
| 341 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 342 | if (f2fs_is_compressed_page(&folio->page)) { |
| 343 | f2fs_compress_write_end_io(bio, &folio->page); |
| 344 | continue; |
| 345 | } |
| 346 | #endif |
| 347 | |
| 348 | type = WB_DATA_TYPE(&folio->page, false); |
| 349 | |
| 350 | if (unlikely(bio->bi_status != BLK_STS_OK)) { |
| 351 | mapping_set_error(folio->mapping, -EIO); |
| 352 | if (type == F2FS_WB_CP_DATA) |
| 353 | f2fs_stop_checkpoint(sbi, true, |
| 354 | STOP_CP_REASON_WRITE_FAIL); |
| 355 | } |
| 356 | |
| 357 | f2fs_bug_on(sbi, is_node_folio(folio) && |
| 358 | folio->index != nid_of_node(&folio->page)); |
| 359 | |
| 360 | dec_page_count(sbi, type); |
| 361 | if (f2fs_in_warm_node_list(sbi, folio)) |
| 362 | f2fs_del_fsync_node_entry(sbi, folio); |
| 363 | clear_page_private_gcing(&folio->page); |
| 364 | folio_end_writeback(folio); |
| 365 | } |
| 366 | if (!get_pages(sbi, F2FS_WB_CP_DATA) && |
| 367 | wq_has_sleeper(&sbi->cp_wait)) |
| 368 | wake_up(&sbi->cp_wait); |
| 369 | |
| 370 | bio_put(bio); |
| 371 | } |
| 372 | |
| 373 | #ifdef CONFIG_BLK_DEV_ZONED |
| 374 | static void f2fs_zone_write_end_io(struct bio *bio) |
| 375 | { |
| 376 | struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private; |
| 377 | |
| 378 | bio->bi_private = io->bi_private; |
| 379 | complete(&io->zone_wait); |
| 380 | f2fs_write_end_io(bio); |
| 381 | } |
| 382 | #endif |
| 383 | |
| 384 | struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, |
| 385 | block_t blk_addr, sector_t *sector) |
| 386 | { |
| 387 | struct block_device *bdev = sbi->sb->s_bdev; |
| 388 | int i; |
| 389 | |
| 390 | if (f2fs_is_multi_device(sbi)) { |
| 391 | for (i = 0; i < sbi->s_ndevs; i++) { |
| 392 | if (FDEV(i).start_blk <= blk_addr && |
| 393 | FDEV(i).end_blk >= blk_addr) { |
| 394 | blk_addr -= FDEV(i).start_blk; |
| 395 | bdev = FDEV(i).bdev; |
| 396 | break; |
| 397 | } |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | if (sector) |
| 402 | *sector = SECTOR_FROM_BLOCK(blk_addr); |
| 403 | return bdev; |
| 404 | } |
| 405 | |
| 406 | int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr) |
| 407 | { |
| 408 | int i; |
| 409 | |
| 410 | if (!f2fs_is_multi_device(sbi)) |
| 411 | return 0; |
| 412 | |
| 413 | for (i = 0; i < sbi->s_ndevs; i++) |
| 414 | if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr) |
| 415 | return i; |
| 416 | return 0; |
| 417 | } |
| 418 | |
| 419 | static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio) |
| 420 | { |
| 421 | unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0); |
| 422 | struct folio *fio_folio = page_folio(fio->page); |
| 423 | unsigned int fua_flag, meta_flag, io_flag; |
| 424 | blk_opf_t op_flags = 0; |
| 425 | |
| 426 | if (fio->op != REQ_OP_WRITE) |
| 427 | return 0; |
| 428 | if (fio->type == DATA) |
| 429 | io_flag = fio->sbi->data_io_flag; |
| 430 | else if (fio->type == NODE) |
| 431 | io_flag = fio->sbi->node_io_flag; |
| 432 | else |
| 433 | return 0; |
| 434 | |
| 435 | fua_flag = io_flag & temp_mask; |
| 436 | meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask; |
| 437 | |
| 438 | /* |
| 439 | * data/node io flag bits per temp: |
| 440 | * REQ_META | REQ_FUA | |
| 441 | * 5 | 4 | 3 | 2 | 1 | 0 | |
| 442 | * Cold | Warm | Hot | Cold | Warm | Hot | |
| 443 | */ |
| 444 | if (BIT(fio->temp) & meta_flag) |
| 445 | op_flags |= REQ_META; |
| 446 | if (BIT(fio->temp) & fua_flag) |
| 447 | op_flags |= REQ_FUA; |
| 448 | |
| 449 | if (fio->type == DATA && |
| 450 | F2FS_I(fio_folio->mapping->host)->ioprio_hint == F2FS_IOPRIO_WRITE) |
| 451 | op_flags |= REQ_PRIO; |
| 452 | |
| 453 | return op_flags; |
| 454 | } |
| 455 | |
| 456 | static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) |
| 457 | { |
| 458 | struct f2fs_sb_info *sbi = fio->sbi; |
| 459 | struct block_device *bdev; |
| 460 | sector_t sector; |
| 461 | struct bio *bio; |
| 462 | |
| 463 | bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or); |
| 464 | bio = bio_alloc_bioset(bdev, npages, |
| 465 | fio->op | fio->op_flags | f2fs_io_flags(fio), |
| 466 | GFP_NOIO, &f2fs_bioset); |
| 467 | bio->bi_iter.bi_sector = sector; |
| 468 | if (is_read_io(fio->op)) { |
| 469 | bio->bi_end_io = f2fs_read_end_io; |
| 470 | bio->bi_private = NULL; |
| 471 | } else { |
| 472 | bio->bi_end_io = f2fs_write_end_io; |
| 473 | bio->bi_private = sbi; |
| 474 | bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, |
| 475 | fio->type, fio->temp); |
| 476 | } |
| 477 | iostat_alloc_and_bind_ctx(sbi, bio, NULL); |
| 478 | |
| 479 | if (fio->io_wbc) |
| 480 | wbc_init_bio(fio->io_wbc, bio); |
| 481 | |
| 482 | return bio; |
| 483 | } |
| 484 | |
| 485 | static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, |
| 486 | pgoff_t first_idx, |
| 487 | const struct f2fs_io_info *fio, |
| 488 | gfp_t gfp_mask) |
| 489 | { |
| 490 | /* |
| 491 | * The f2fs garbage collector sets ->encrypted_page when it wants to |
| 492 | * read/write raw data without encryption. |
| 493 | */ |
| 494 | if (!fio || !fio->encrypted_page) |
| 495 | fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); |
| 496 | } |
| 497 | |
| 498 | static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, |
| 499 | pgoff_t next_idx, |
| 500 | const struct f2fs_io_info *fio) |
| 501 | { |
| 502 | /* |
| 503 | * The f2fs garbage collector sets ->encrypted_page when it wants to |
| 504 | * read/write raw data without encryption. |
| 505 | */ |
| 506 | if (fio && fio->encrypted_page) |
| 507 | return !bio_has_crypt_ctx(bio); |
| 508 | |
| 509 | return fscrypt_mergeable_bio(bio, inode, next_idx); |
| 510 | } |
| 511 | |
| 512 | void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, |
| 513 | enum page_type type) |
| 514 | { |
| 515 | WARN_ON_ONCE(!is_read_io(bio_op(bio))); |
| 516 | trace_f2fs_submit_read_bio(sbi->sb, type, bio); |
| 517 | |
| 518 | iostat_update_submit_ctx(bio, type); |
| 519 | submit_bio(bio); |
| 520 | } |
| 521 | |
| 522 | static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio, |
| 523 | enum page_type type) |
| 524 | { |
| 525 | WARN_ON_ONCE(is_read_io(bio_op(bio))); |
| 526 | trace_f2fs_submit_write_bio(sbi->sb, type, bio); |
| 527 | iostat_update_submit_ctx(bio, type); |
| 528 | submit_bio(bio); |
| 529 | } |
| 530 | |
| 531 | static void __submit_merged_bio(struct f2fs_bio_info *io) |
| 532 | { |
| 533 | struct f2fs_io_info *fio = &io->fio; |
| 534 | |
| 535 | if (!io->bio) |
| 536 | return; |
| 537 | |
| 538 | if (is_read_io(fio->op)) { |
| 539 | trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); |
| 540 | f2fs_submit_read_bio(io->sbi, io->bio, fio->type); |
| 541 | } else { |
| 542 | trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); |
| 543 | f2fs_submit_write_bio(io->sbi, io->bio, fio->type); |
| 544 | } |
| 545 | io->bio = NULL; |
| 546 | } |
| 547 | |
| 548 | static bool __has_merged_page(struct bio *bio, struct inode *inode, |
| 549 | struct page *page, nid_t ino) |
| 550 | { |
| 551 | struct folio_iter fi; |
| 552 | |
| 553 | if (!bio) |
| 554 | return false; |
| 555 | |
| 556 | if (!inode && !page && !ino) |
| 557 | return true; |
| 558 | |
| 559 | bio_for_each_folio_all(fi, bio) { |
| 560 | struct folio *target = fi.folio; |
| 561 | |
| 562 | if (fscrypt_is_bounce_folio(target)) { |
| 563 | target = fscrypt_pagecache_folio(target); |
| 564 | if (IS_ERR(target)) |
| 565 | continue; |
| 566 | } |
| 567 | if (f2fs_is_compressed_page(&target->page)) { |
| 568 | target = f2fs_compress_control_folio(target); |
| 569 | if (IS_ERR(target)) |
| 570 | continue; |
| 571 | } |
| 572 | |
| 573 | if (inode && inode == target->mapping->host) |
| 574 | return true; |
| 575 | if (page && page == &target->page) |
| 576 | return true; |
| 577 | if (ino && ino == ino_of_node(&target->page)) |
| 578 | return true; |
| 579 | } |
| 580 | |
| 581 | return false; |
| 582 | } |
| 583 | |
| 584 | int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi) |
| 585 | { |
| 586 | int i; |
| 587 | |
| 588 | for (i = 0; i < NR_PAGE_TYPE; i++) { |
| 589 | int n = (i == META) ? 1 : NR_TEMP_TYPE; |
| 590 | int j; |
| 591 | |
| 592 | sbi->write_io[i] = f2fs_kmalloc(sbi, |
| 593 | array_size(n, sizeof(struct f2fs_bio_info)), |
| 594 | GFP_KERNEL); |
| 595 | if (!sbi->write_io[i]) |
| 596 | return -ENOMEM; |
| 597 | |
| 598 | for (j = HOT; j < n; j++) { |
| 599 | struct f2fs_bio_info *io = &sbi->write_io[i][j]; |
| 600 | |
| 601 | init_f2fs_rwsem(&io->io_rwsem); |
| 602 | io->sbi = sbi; |
| 603 | io->bio = NULL; |
| 604 | io->last_block_in_bio = 0; |
| 605 | spin_lock_init(&io->io_lock); |
| 606 | INIT_LIST_HEAD(&io->io_list); |
| 607 | INIT_LIST_HEAD(&io->bio_list); |
| 608 | init_f2fs_rwsem(&io->bio_list_lock); |
| 609 | #ifdef CONFIG_BLK_DEV_ZONED |
| 610 | init_completion(&io->zone_wait); |
| 611 | io->zone_pending_bio = NULL; |
| 612 | io->bi_private = NULL; |
| 613 | #endif |
| 614 | } |
| 615 | } |
| 616 | |
| 617 | return 0; |
| 618 | } |
| 619 | |
| 620 | static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi, |
| 621 | enum page_type type, enum temp_type temp) |
| 622 | { |
| 623 | enum page_type btype = PAGE_TYPE_OF_BIO(type); |
| 624 | struct f2fs_bio_info *io = sbi->write_io[btype] + temp; |
| 625 | |
| 626 | f2fs_down_write(&io->io_rwsem); |
| 627 | |
| 628 | if (!io->bio) |
| 629 | goto unlock_out; |
| 630 | |
| 631 | /* change META to META_FLUSH in the checkpoint procedure */ |
| 632 | if (type >= META_FLUSH) { |
| 633 | io->fio.type = META_FLUSH; |
| 634 | io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC; |
| 635 | if (!test_opt(sbi, NOBARRIER)) |
| 636 | io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA; |
| 637 | } |
| 638 | __submit_merged_bio(io); |
| 639 | unlock_out: |
| 640 | f2fs_up_write(&io->io_rwsem); |
| 641 | } |
| 642 | |
| 643 | static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, |
| 644 | struct inode *inode, struct page *page, |
| 645 | nid_t ino, enum page_type type, bool force) |
| 646 | { |
| 647 | enum temp_type temp; |
| 648 | bool ret = true; |
| 649 | |
| 650 | for (temp = HOT; temp < NR_TEMP_TYPE; temp++) { |
| 651 | if (!force) { |
| 652 | enum page_type btype = PAGE_TYPE_OF_BIO(type); |
| 653 | struct f2fs_bio_info *io = sbi->write_io[btype] + temp; |
| 654 | |
| 655 | f2fs_down_read(&io->io_rwsem); |
| 656 | ret = __has_merged_page(io->bio, inode, page, ino); |
| 657 | f2fs_up_read(&io->io_rwsem); |
| 658 | } |
| 659 | if (ret) |
| 660 | __f2fs_submit_merged_write(sbi, type, temp); |
| 661 | |
| 662 | /* TODO: use HOT temp only for meta pages now. */ |
| 663 | if (type >= META) |
| 664 | break; |
| 665 | } |
| 666 | } |
| 667 | |
| 668 | void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type) |
| 669 | { |
| 670 | __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true); |
| 671 | } |
| 672 | |
| 673 | void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, |
| 674 | struct inode *inode, struct page *page, |
| 675 | nid_t ino, enum page_type type) |
| 676 | { |
| 677 | __submit_merged_write_cond(sbi, inode, page, ino, type, false); |
| 678 | } |
| 679 | |
| 680 | void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi) |
| 681 | { |
| 682 | f2fs_submit_merged_write(sbi, DATA); |
| 683 | f2fs_submit_merged_write(sbi, NODE); |
| 684 | f2fs_submit_merged_write(sbi, META); |
| 685 | } |
| 686 | |
| 687 | /* |
| 688 | * Fill the locked page with data located in the block address. |
| 689 | * A caller needs to unlock the page on failure. |
| 690 | */ |
| 691 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) |
| 692 | { |
| 693 | struct bio *bio; |
| 694 | struct folio *fio_folio = page_folio(fio->page); |
| 695 | struct folio *data_folio = fio->encrypted_page ? |
| 696 | page_folio(fio->encrypted_page) : fio_folio; |
| 697 | |
| 698 | if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, |
| 699 | fio->is_por ? META_POR : (__is_meta_io(fio) ? |
| 700 | META_GENERIC : DATA_GENERIC_ENHANCE))) |
| 701 | return -EFSCORRUPTED; |
| 702 | |
| 703 | trace_f2fs_submit_folio_bio(data_folio, fio); |
| 704 | |
| 705 | /* Allocate a new bio */ |
| 706 | bio = __bio_alloc(fio, 1); |
| 707 | |
| 708 | f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host, |
| 709 | fio_folio->index, fio, GFP_NOIO); |
| 710 | bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0); |
| 711 | |
| 712 | if (fio->io_wbc && !is_read_io(fio->op)) |
| 713 | wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE); |
| 714 | |
| 715 | inc_page_count(fio->sbi, is_read_io(fio->op) ? |
| 716 | __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false)); |
| 717 | |
| 718 | if (is_read_io(bio_op(bio))) |
| 719 | f2fs_submit_read_bio(fio->sbi, bio, fio->type); |
| 720 | else |
| 721 | f2fs_submit_write_bio(fio->sbi, bio, fio->type); |
| 722 | return 0; |
| 723 | } |
| 724 | |
| 725 | static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, |
| 726 | block_t last_blkaddr, block_t cur_blkaddr) |
| 727 | { |
| 728 | if (unlikely(sbi->max_io_bytes && |
| 729 | bio->bi_iter.bi_size >= sbi->max_io_bytes)) |
| 730 | return false; |
| 731 | if (last_blkaddr + 1 != cur_blkaddr) |
| 732 | return false; |
| 733 | return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL); |
| 734 | } |
| 735 | |
| 736 | static bool io_type_is_mergeable(struct f2fs_bio_info *io, |
| 737 | struct f2fs_io_info *fio) |
| 738 | { |
| 739 | if (io->fio.op != fio->op) |
| 740 | return false; |
| 741 | return io->fio.op_flags == fio->op_flags; |
| 742 | } |
| 743 | |
| 744 | static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, |
| 745 | struct f2fs_bio_info *io, |
| 746 | struct f2fs_io_info *fio, |
| 747 | block_t last_blkaddr, |
| 748 | block_t cur_blkaddr) |
| 749 | { |
| 750 | if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr)) |
| 751 | return false; |
| 752 | return io_type_is_mergeable(io, fio); |
| 753 | } |
| 754 | |
| 755 | static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio, |
| 756 | struct page *page, enum temp_type temp) |
| 757 | { |
| 758 | struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; |
| 759 | struct bio_entry *be; |
| 760 | |
| 761 | be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL); |
| 762 | be->bio = bio; |
| 763 | bio_get(bio); |
| 764 | |
| 765 | if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) |
| 766 | f2fs_bug_on(sbi, 1); |
| 767 | |
| 768 | f2fs_down_write(&io->bio_list_lock); |
| 769 | list_add_tail(&be->list, &io->bio_list); |
| 770 | f2fs_up_write(&io->bio_list_lock); |
| 771 | } |
| 772 | |
| 773 | static void del_bio_entry(struct bio_entry *be) |
| 774 | { |
| 775 | list_del(&be->list); |
| 776 | kmem_cache_free(bio_entry_slab, be); |
| 777 | } |
| 778 | |
| 779 | static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, |
| 780 | struct page *page) |
| 781 | { |
| 782 | struct folio *fio_folio = page_folio(fio->page); |
| 783 | struct f2fs_sb_info *sbi = fio->sbi; |
| 784 | enum temp_type temp; |
| 785 | bool found = false; |
| 786 | int ret = -EAGAIN; |
| 787 | |
| 788 | for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) { |
| 789 | struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; |
| 790 | struct list_head *head = &io->bio_list; |
| 791 | struct bio_entry *be; |
| 792 | |
| 793 | f2fs_down_write(&io->bio_list_lock); |
| 794 | list_for_each_entry(be, head, list) { |
| 795 | if (be->bio != *bio) |
| 796 | continue; |
| 797 | |
| 798 | found = true; |
| 799 | |
| 800 | f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio, |
| 801 | *fio->last_block, |
| 802 | fio->new_blkaddr)); |
| 803 | if (f2fs_crypt_mergeable_bio(*bio, |
| 804 | fio_folio->mapping->host, |
| 805 | fio_folio->index, fio) && |
| 806 | bio_add_page(*bio, page, PAGE_SIZE, 0) == |
| 807 | PAGE_SIZE) { |
| 808 | ret = 0; |
| 809 | break; |
| 810 | } |
| 811 | |
| 812 | /* page can't be merged into bio; submit the bio */ |
| 813 | del_bio_entry(be); |
| 814 | f2fs_submit_write_bio(sbi, *bio, DATA); |
| 815 | break; |
| 816 | } |
| 817 | f2fs_up_write(&io->bio_list_lock); |
| 818 | } |
| 819 | |
| 820 | if (ret) { |
| 821 | bio_put(*bio); |
| 822 | *bio = NULL; |
| 823 | } |
| 824 | |
| 825 | return ret; |
| 826 | } |
| 827 | |
| 828 | void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, |
| 829 | struct bio **bio, struct folio *folio) |
| 830 | { |
| 831 | enum temp_type temp; |
| 832 | bool found = false; |
| 833 | struct bio *target = bio ? *bio : NULL; |
| 834 | |
| 835 | f2fs_bug_on(sbi, !target && !folio); |
| 836 | |
| 837 | for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) { |
| 838 | struct f2fs_bio_info *io = sbi->write_io[DATA] + temp; |
| 839 | struct list_head *head = &io->bio_list; |
| 840 | struct bio_entry *be; |
| 841 | |
| 842 | if (list_empty(head)) |
| 843 | continue; |
| 844 | |
| 845 | f2fs_down_read(&io->bio_list_lock); |
| 846 | list_for_each_entry(be, head, list) { |
| 847 | if (target) |
| 848 | found = (target == be->bio); |
| 849 | else |
| 850 | found = __has_merged_page(be->bio, NULL, |
| 851 | &folio->page, 0); |
| 852 | if (found) |
| 853 | break; |
| 854 | } |
| 855 | f2fs_up_read(&io->bio_list_lock); |
| 856 | |
| 857 | if (!found) |
| 858 | continue; |
| 859 | |
| 860 | found = false; |
| 861 | |
| 862 | f2fs_down_write(&io->bio_list_lock); |
| 863 | list_for_each_entry(be, head, list) { |
| 864 | if (target) |
| 865 | found = (target == be->bio); |
| 866 | else |
| 867 | found = __has_merged_page(be->bio, NULL, |
| 868 | &folio->page, 0); |
| 869 | if (found) { |
| 870 | target = be->bio; |
| 871 | del_bio_entry(be); |
| 872 | break; |
| 873 | } |
| 874 | } |
| 875 | f2fs_up_write(&io->bio_list_lock); |
| 876 | } |
| 877 | |
| 878 | if (found) |
| 879 | f2fs_submit_write_bio(sbi, target, DATA); |
| 880 | if (bio && *bio) { |
| 881 | bio_put(*bio); |
| 882 | *bio = NULL; |
| 883 | } |
| 884 | } |
| 885 | |
| 886 | int f2fs_merge_page_bio(struct f2fs_io_info *fio) |
| 887 | { |
| 888 | struct bio *bio = *fio->bio; |
| 889 | struct page *page = fio->encrypted_page ? |
| 890 | fio->encrypted_page : fio->page; |
| 891 | struct folio *folio = page_folio(fio->page); |
| 892 | |
| 893 | if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, |
| 894 | __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) |
| 895 | return -EFSCORRUPTED; |
| 896 | |
| 897 | trace_f2fs_submit_folio_bio(page_folio(page), fio); |
| 898 | |
| 899 | if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, |
| 900 | fio->new_blkaddr)) |
| 901 | f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); |
| 902 | alloc_new: |
| 903 | if (!bio) { |
| 904 | bio = __bio_alloc(fio, BIO_MAX_VECS); |
| 905 | f2fs_set_bio_crypt_ctx(bio, folio->mapping->host, |
| 906 | folio->index, fio, GFP_NOIO); |
| 907 | |
| 908 | add_bio_entry(fio->sbi, bio, page, fio->temp); |
| 909 | } else { |
| 910 | if (add_ipu_page(fio, &bio, page)) |
| 911 | goto alloc_new; |
| 912 | } |
| 913 | |
| 914 | if (fio->io_wbc) |
| 915 | wbc_account_cgroup_owner(fio->io_wbc, folio, folio_size(folio)); |
| 916 | |
| 917 | inc_page_count(fio->sbi, WB_DATA_TYPE(page, false)); |
| 918 | |
| 919 | *fio->last_block = fio->new_blkaddr; |
| 920 | *fio->bio = bio; |
| 921 | |
| 922 | return 0; |
| 923 | } |
| 924 | |
| 925 | #ifdef CONFIG_BLK_DEV_ZONED |
| 926 | static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr) |
| 927 | { |
| 928 | struct block_device *bdev = sbi->sb->s_bdev; |
| 929 | int devi = 0; |
| 930 | |
| 931 | if (f2fs_is_multi_device(sbi)) { |
| 932 | devi = f2fs_target_device_index(sbi, blkaddr); |
| 933 | if (blkaddr < FDEV(devi).start_blk || |
| 934 | blkaddr > FDEV(devi).end_blk) { |
| 935 | f2fs_err(sbi, "Invalid block %x", blkaddr); |
| 936 | return false; |
| 937 | } |
| 938 | blkaddr -= FDEV(devi).start_blk; |
| 939 | bdev = FDEV(devi).bdev; |
| 940 | } |
| 941 | return bdev_is_zoned(bdev) && |
| 942 | f2fs_blkz_is_seq(sbi, devi, blkaddr) && |
| 943 | (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1); |
| 944 | } |
| 945 | #endif |
| 946 | |
| 947 | void f2fs_submit_page_write(struct f2fs_io_info *fio) |
| 948 | { |
| 949 | struct f2fs_sb_info *sbi = fio->sbi; |
| 950 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
| 951 | struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp; |
| 952 | struct page *bio_page; |
| 953 | enum count_type type; |
| 954 | |
| 955 | f2fs_bug_on(sbi, is_read_io(fio->op)); |
| 956 | |
| 957 | f2fs_down_write(&io->io_rwsem); |
| 958 | next: |
| 959 | #ifdef CONFIG_BLK_DEV_ZONED |
| 960 | if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) { |
| 961 | wait_for_completion_io(&io->zone_wait); |
| 962 | bio_put(io->zone_pending_bio); |
| 963 | io->zone_pending_bio = NULL; |
| 964 | io->bi_private = NULL; |
| 965 | } |
| 966 | #endif |
| 967 | |
| 968 | if (fio->in_list) { |
| 969 | spin_lock(&io->io_lock); |
| 970 | if (list_empty(&io->io_list)) { |
| 971 | spin_unlock(&io->io_lock); |
| 972 | goto out; |
| 973 | } |
| 974 | fio = list_first_entry(&io->io_list, |
| 975 | struct f2fs_io_info, list); |
| 976 | list_del(&fio->list); |
| 977 | spin_unlock(&io->io_lock); |
| 978 | } |
| 979 | |
| 980 | verify_fio_blkaddr(fio); |
| 981 | |
| 982 | if (fio->encrypted_page) |
| 983 | bio_page = fio->encrypted_page; |
| 984 | else if (fio->compressed_page) |
| 985 | bio_page = fio->compressed_page; |
| 986 | else |
| 987 | bio_page = fio->page; |
| 988 | |
| 989 | /* set submitted = true as a return value */ |
| 990 | fio->submitted = 1; |
| 991 | |
| 992 | type = WB_DATA_TYPE(bio_page, fio->compressed_page); |
| 993 | inc_page_count(sbi, type); |
| 994 | |
| 995 | if (io->bio && |
| 996 | (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, |
| 997 | fio->new_blkaddr) || |
| 998 | !f2fs_crypt_mergeable_bio(io->bio, fio_inode(fio), |
| 999 | page_folio(bio_page)->index, fio))) |
| 1000 | __submit_merged_bio(io); |
| 1001 | alloc_new: |
| 1002 | if (io->bio == NULL) { |
| 1003 | io->bio = __bio_alloc(fio, BIO_MAX_VECS); |
| 1004 | f2fs_set_bio_crypt_ctx(io->bio, fio_inode(fio), |
| 1005 | page_folio(bio_page)->index, fio, GFP_NOIO); |
| 1006 | io->fio = *fio; |
| 1007 | } |
| 1008 | |
| 1009 | if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { |
| 1010 | __submit_merged_bio(io); |
| 1011 | goto alloc_new; |
| 1012 | } |
| 1013 | |
| 1014 | if (fio->io_wbc) |
| 1015 | wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page), |
| 1016 | PAGE_SIZE); |
| 1017 | |
| 1018 | io->last_block_in_bio = fio->new_blkaddr; |
| 1019 | |
| 1020 | trace_f2fs_submit_folio_write(page_folio(fio->page), fio); |
| 1021 | #ifdef CONFIG_BLK_DEV_ZONED |
| 1022 | if (f2fs_sb_has_blkzoned(sbi) && btype < META && |
| 1023 | is_end_zone_blkaddr(sbi, fio->new_blkaddr)) { |
| 1024 | bio_get(io->bio); |
| 1025 | reinit_completion(&io->zone_wait); |
| 1026 | io->bi_private = io->bio->bi_private; |
| 1027 | io->bio->bi_private = io; |
| 1028 | io->bio->bi_end_io = f2fs_zone_write_end_io; |
| 1029 | io->zone_pending_bio = io->bio; |
| 1030 | __submit_merged_bio(io); |
| 1031 | } |
| 1032 | #endif |
| 1033 | if (fio->in_list) |
| 1034 | goto next; |
| 1035 | out: |
| 1036 | if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || |
| 1037 | !f2fs_is_checkpoint_ready(sbi)) |
| 1038 | __submit_merged_bio(io); |
| 1039 | f2fs_up_write(&io->io_rwsem); |
| 1040 | } |
| 1041 | |
| 1042 | static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, |
| 1043 | unsigned nr_pages, blk_opf_t op_flag, |
| 1044 | pgoff_t first_idx, bool for_write) |
| 1045 | { |
| 1046 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1047 | struct bio *bio; |
| 1048 | struct bio_post_read_ctx *ctx = NULL; |
| 1049 | unsigned int post_read_steps = 0; |
| 1050 | sector_t sector; |
| 1051 | struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or); |
| 1052 | |
| 1053 | bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages), |
| 1054 | REQ_OP_READ | op_flag, |
| 1055 | for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset); |
| 1056 | bio->bi_iter.bi_sector = sector; |
| 1057 | f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); |
| 1058 | bio->bi_end_io = f2fs_read_end_io; |
| 1059 | |
| 1060 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) |
| 1061 | post_read_steps |= STEP_DECRYPT; |
| 1062 | |
| 1063 | if (f2fs_need_verity(inode, first_idx)) |
| 1064 | post_read_steps |= STEP_VERITY; |
| 1065 | |
| 1066 | /* |
| 1067 | * STEP_DECOMPRESS is handled specially, since a compressed file might |
| 1068 | * contain both compressed and uncompressed clusters. We'll allocate a |
| 1069 | * bio_post_read_ctx if the file is compressed, but the caller is |
| 1070 | * responsible for enabling STEP_DECOMPRESS if it's actually needed. |
| 1071 | */ |
| 1072 | |
| 1073 | if (post_read_steps || f2fs_compressed_file(inode)) { |
| 1074 | /* Due to the mempool, this never fails. */ |
| 1075 | ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); |
| 1076 | ctx->bio = bio; |
| 1077 | ctx->sbi = sbi; |
| 1078 | ctx->enabled_steps = post_read_steps; |
| 1079 | ctx->fs_blkaddr = blkaddr; |
| 1080 | ctx->decompression_attempted = false; |
| 1081 | bio->bi_private = ctx; |
| 1082 | } |
| 1083 | iostat_alloc_and_bind_ctx(sbi, bio, ctx); |
| 1084 | |
| 1085 | return bio; |
| 1086 | } |
| 1087 | |
| 1088 | /* This can handle encryption stuffs */ |
| 1089 | static int f2fs_submit_page_read(struct inode *inode, struct folio *folio, |
| 1090 | block_t blkaddr, blk_opf_t op_flags, |
| 1091 | bool for_write) |
| 1092 | { |
| 1093 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1094 | struct bio *bio; |
| 1095 | |
| 1096 | bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags, |
| 1097 | folio->index, for_write); |
| 1098 | if (IS_ERR(bio)) |
| 1099 | return PTR_ERR(bio); |
| 1100 | |
| 1101 | /* wait for GCed page writeback via META_MAPPING */ |
| 1102 | f2fs_wait_on_block_writeback(inode, blkaddr); |
| 1103 | |
| 1104 | if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) { |
| 1105 | iostat_update_and_unbind_ctx(bio); |
| 1106 | if (bio->bi_private) |
| 1107 | mempool_free(bio->bi_private, bio_post_read_ctx_pool); |
| 1108 | bio_put(bio); |
| 1109 | return -EFAULT; |
| 1110 | } |
| 1111 | inc_page_count(sbi, F2FS_RD_DATA); |
| 1112 | f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE); |
| 1113 | f2fs_submit_read_bio(sbi, bio, DATA); |
| 1114 | return 0; |
| 1115 | } |
| 1116 | |
| 1117 | static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) |
| 1118 | { |
| 1119 | __le32 *addr = get_dnode_addr(dn->inode, dn->node_folio); |
| 1120 | |
| 1121 | dn->data_blkaddr = blkaddr; |
| 1122 | addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); |
| 1123 | } |
| 1124 | |
| 1125 | /* |
| 1126 | * Lock ordering for the change of data block address: |
| 1127 | * ->data_page |
| 1128 | * ->node_folio |
| 1129 | * update block addresses in the node page |
| 1130 | */ |
| 1131 | void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) |
| 1132 | { |
| 1133 | f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); |
| 1134 | __set_data_blkaddr(dn, blkaddr); |
| 1135 | if (folio_mark_dirty(dn->node_folio)) |
| 1136 | dn->node_changed = true; |
| 1137 | } |
| 1138 | |
| 1139 | void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) |
| 1140 | { |
| 1141 | f2fs_set_data_blkaddr(dn, blkaddr); |
| 1142 | f2fs_update_read_extent_cache(dn); |
| 1143 | } |
| 1144 | |
| 1145 | /* dn->ofs_in_node will be returned with up-to-date last block pointer */ |
| 1146 | int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) |
| 1147 | { |
| 1148 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
| 1149 | int err; |
| 1150 | |
| 1151 | if (!count) |
| 1152 | return 0; |
| 1153 | |
| 1154 | if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) |
| 1155 | return -EPERM; |
| 1156 | err = inc_valid_block_count(sbi, dn->inode, &count, true); |
| 1157 | if (unlikely(err)) |
| 1158 | return err; |
| 1159 | |
| 1160 | trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, |
| 1161 | dn->ofs_in_node, count); |
| 1162 | |
| 1163 | f2fs_folio_wait_writeback(dn->node_folio, NODE, true, true); |
| 1164 | |
| 1165 | for (; count > 0; dn->ofs_in_node++) { |
| 1166 | block_t blkaddr = f2fs_data_blkaddr(dn); |
| 1167 | |
| 1168 | if (blkaddr == NULL_ADDR) { |
| 1169 | __set_data_blkaddr(dn, NEW_ADDR); |
| 1170 | count--; |
| 1171 | } |
| 1172 | } |
| 1173 | |
| 1174 | if (folio_mark_dirty(dn->node_folio)) |
| 1175 | dn->node_changed = true; |
| 1176 | return 0; |
| 1177 | } |
| 1178 | |
| 1179 | /* Should keep dn->ofs_in_node unchanged */ |
| 1180 | int f2fs_reserve_new_block(struct dnode_of_data *dn) |
| 1181 | { |
| 1182 | unsigned int ofs_in_node = dn->ofs_in_node; |
| 1183 | int ret; |
| 1184 | |
| 1185 | ret = f2fs_reserve_new_blocks(dn, 1); |
| 1186 | dn->ofs_in_node = ofs_in_node; |
| 1187 | return ret; |
| 1188 | } |
| 1189 | |
| 1190 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) |
| 1191 | { |
| 1192 | bool need_put = dn->inode_folio ? false : true; |
| 1193 | int err; |
| 1194 | |
| 1195 | err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE); |
| 1196 | if (err) |
| 1197 | return err; |
| 1198 | |
| 1199 | if (dn->data_blkaddr == NULL_ADDR) |
| 1200 | err = f2fs_reserve_new_block(dn); |
| 1201 | if (err || need_put) |
| 1202 | f2fs_put_dnode(dn); |
| 1203 | return err; |
| 1204 | } |
| 1205 | |
| 1206 | struct folio *f2fs_get_read_data_folio(struct inode *inode, pgoff_t index, |
| 1207 | blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs) |
| 1208 | { |
| 1209 | struct address_space *mapping = inode->i_mapping; |
| 1210 | struct dnode_of_data dn; |
| 1211 | struct folio *folio; |
| 1212 | int err; |
| 1213 | |
| 1214 | folio = f2fs_grab_cache_folio(mapping, index, for_write); |
| 1215 | if (IS_ERR(folio)) |
| 1216 | return folio; |
| 1217 | |
| 1218 | if (f2fs_lookup_read_extent_cache_block(inode, index, |
| 1219 | &dn.data_blkaddr)) { |
| 1220 | if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, |
| 1221 | DATA_GENERIC_ENHANCE_READ)) { |
| 1222 | err = -EFSCORRUPTED; |
| 1223 | goto put_err; |
| 1224 | } |
| 1225 | goto got_it; |
| 1226 | } |
| 1227 | |
| 1228 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 1229 | err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); |
| 1230 | if (err) { |
| 1231 | if (err == -ENOENT && next_pgofs) |
| 1232 | *next_pgofs = f2fs_get_next_page_offset(&dn, index); |
| 1233 | goto put_err; |
| 1234 | } |
| 1235 | f2fs_put_dnode(&dn); |
| 1236 | |
| 1237 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
| 1238 | err = -ENOENT; |
| 1239 | if (next_pgofs) |
| 1240 | *next_pgofs = index + 1; |
| 1241 | goto put_err; |
| 1242 | } |
| 1243 | if (dn.data_blkaddr != NEW_ADDR && |
| 1244 | !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), |
| 1245 | dn.data_blkaddr, |
| 1246 | DATA_GENERIC_ENHANCE)) { |
| 1247 | err = -EFSCORRUPTED; |
| 1248 | goto put_err; |
| 1249 | } |
| 1250 | got_it: |
| 1251 | if (folio_test_uptodate(folio)) { |
| 1252 | folio_unlock(folio); |
| 1253 | return folio; |
| 1254 | } |
| 1255 | |
| 1256 | /* |
| 1257 | * A new dentry page is allocated but not able to be written, since its |
| 1258 | * new inode page couldn't be allocated due to -ENOSPC. |
| 1259 | * In such the case, its blkaddr can be remained as NEW_ADDR. |
| 1260 | * see, f2fs_add_link -> f2fs_get_new_data_folio -> |
| 1261 | * f2fs_init_inode_metadata. |
| 1262 | */ |
| 1263 | if (dn.data_blkaddr == NEW_ADDR) { |
| 1264 | folio_zero_segment(folio, 0, folio_size(folio)); |
| 1265 | if (!folio_test_uptodate(folio)) |
| 1266 | folio_mark_uptodate(folio); |
| 1267 | folio_unlock(folio); |
| 1268 | return folio; |
| 1269 | } |
| 1270 | |
| 1271 | err = f2fs_submit_page_read(inode, folio, dn.data_blkaddr, |
| 1272 | op_flags, for_write); |
| 1273 | if (err) |
| 1274 | goto put_err; |
| 1275 | return folio; |
| 1276 | |
| 1277 | put_err: |
| 1278 | f2fs_folio_put(folio, true); |
| 1279 | return ERR_PTR(err); |
| 1280 | } |
| 1281 | |
| 1282 | struct folio *f2fs_find_data_folio(struct inode *inode, pgoff_t index, |
| 1283 | pgoff_t *next_pgofs) |
| 1284 | { |
| 1285 | struct address_space *mapping = inode->i_mapping; |
| 1286 | struct folio *folio; |
| 1287 | |
| 1288 | folio = __filemap_get_folio(mapping, index, FGP_ACCESSED, 0); |
| 1289 | if (IS_ERR(folio)) |
| 1290 | goto read; |
| 1291 | if (folio_test_uptodate(folio)) |
| 1292 | return folio; |
| 1293 | f2fs_folio_put(folio, false); |
| 1294 | |
| 1295 | read: |
| 1296 | folio = f2fs_get_read_data_folio(inode, index, 0, false, next_pgofs); |
| 1297 | if (IS_ERR(folio)) |
| 1298 | return folio; |
| 1299 | |
| 1300 | if (folio_test_uptodate(folio)) |
| 1301 | return folio; |
| 1302 | |
| 1303 | folio_wait_locked(folio); |
| 1304 | if (unlikely(!folio_test_uptodate(folio))) { |
| 1305 | f2fs_folio_put(folio, false); |
| 1306 | return ERR_PTR(-EIO); |
| 1307 | } |
| 1308 | return folio; |
| 1309 | } |
| 1310 | |
| 1311 | /* |
| 1312 | * If it tries to access a hole, return an error. |
| 1313 | * Because, the callers, functions in dir.c and GC, should be able to know |
| 1314 | * whether this page exists or not. |
| 1315 | */ |
| 1316 | struct folio *f2fs_get_lock_data_folio(struct inode *inode, pgoff_t index, |
| 1317 | bool for_write) |
| 1318 | { |
| 1319 | struct address_space *mapping = inode->i_mapping; |
| 1320 | struct folio *folio; |
| 1321 | |
| 1322 | folio = f2fs_get_read_data_folio(inode, index, 0, for_write, NULL); |
| 1323 | if (IS_ERR(folio)) |
| 1324 | return folio; |
| 1325 | |
| 1326 | /* wait for read completion */ |
| 1327 | folio_lock(folio); |
| 1328 | if (unlikely(folio->mapping != mapping || !folio_test_uptodate(folio))) { |
| 1329 | f2fs_folio_put(folio, true); |
| 1330 | return ERR_PTR(-EIO); |
| 1331 | } |
| 1332 | return folio; |
| 1333 | } |
| 1334 | |
| 1335 | /* |
| 1336 | * Caller ensures that this data page is never allocated. |
| 1337 | * A new zero-filled data page is allocated in the page cache. |
| 1338 | * |
| 1339 | * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and |
| 1340 | * f2fs_unlock_op(). |
| 1341 | * Note that, ifolio is set only by make_empty_dir, and if any error occur, |
| 1342 | * ifolio should be released by this function. |
| 1343 | */ |
| 1344 | struct folio *f2fs_get_new_data_folio(struct inode *inode, |
| 1345 | struct folio *ifolio, pgoff_t index, bool new_i_size) |
| 1346 | { |
| 1347 | struct address_space *mapping = inode->i_mapping; |
| 1348 | struct folio *folio; |
| 1349 | struct dnode_of_data dn; |
| 1350 | int err; |
| 1351 | |
| 1352 | folio = f2fs_grab_cache_folio(mapping, index, true); |
| 1353 | if (IS_ERR(folio)) { |
| 1354 | /* |
| 1355 | * before exiting, we should make sure ifolio will be released |
| 1356 | * if any error occur. |
| 1357 | */ |
| 1358 | f2fs_folio_put(ifolio, true); |
| 1359 | return ERR_PTR(-ENOMEM); |
| 1360 | } |
| 1361 | |
| 1362 | set_new_dnode(&dn, inode, ifolio, NULL, 0); |
| 1363 | err = f2fs_reserve_block(&dn, index); |
| 1364 | if (err) { |
| 1365 | f2fs_folio_put(folio, true); |
| 1366 | return ERR_PTR(err); |
| 1367 | } |
| 1368 | if (!ifolio) |
| 1369 | f2fs_put_dnode(&dn); |
| 1370 | |
| 1371 | if (folio_test_uptodate(folio)) |
| 1372 | goto got_it; |
| 1373 | |
| 1374 | if (dn.data_blkaddr == NEW_ADDR) { |
| 1375 | folio_zero_segment(folio, 0, folio_size(folio)); |
| 1376 | if (!folio_test_uptodate(folio)) |
| 1377 | folio_mark_uptodate(folio); |
| 1378 | } else { |
| 1379 | f2fs_folio_put(folio, true); |
| 1380 | |
| 1381 | /* if ifolio exists, blkaddr should be NEW_ADDR */ |
| 1382 | f2fs_bug_on(F2FS_I_SB(inode), ifolio); |
| 1383 | folio = f2fs_get_lock_data_folio(inode, index, true); |
| 1384 | if (IS_ERR(folio)) |
| 1385 | return folio; |
| 1386 | } |
| 1387 | got_it: |
| 1388 | if (new_i_size && i_size_read(inode) < |
| 1389 | ((loff_t)(index + 1) << PAGE_SHIFT)) |
| 1390 | f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); |
| 1391 | return folio; |
| 1392 | } |
| 1393 | |
| 1394 | static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) |
| 1395 | { |
| 1396 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
| 1397 | struct f2fs_summary sum; |
| 1398 | struct node_info ni; |
| 1399 | block_t old_blkaddr; |
| 1400 | blkcnt_t count = 1; |
| 1401 | int err; |
| 1402 | |
| 1403 | if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) |
| 1404 | return -EPERM; |
| 1405 | |
| 1406 | err = f2fs_get_node_info(sbi, dn->nid, &ni, false); |
| 1407 | if (err) |
| 1408 | return err; |
| 1409 | |
| 1410 | dn->data_blkaddr = f2fs_data_blkaddr(dn); |
| 1411 | if (dn->data_blkaddr == NULL_ADDR) { |
| 1412 | err = inc_valid_block_count(sbi, dn->inode, &count, true); |
| 1413 | if (unlikely(err)) |
| 1414 | return err; |
| 1415 | } |
| 1416 | |
| 1417 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); |
| 1418 | old_blkaddr = dn->data_blkaddr; |
| 1419 | err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr, |
| 1420 | &dn->data_blkaddr, &sum, seg_type, NULL); |
| 1421 | if (err) |
| 1422 | return err; |
| 1423 | |
| 1424 | if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) |
| 1425 | f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1); |
| 1426 | |
| 1427 | f2fs_update_data_blkaddr(dn, dn->data_blkaddr); |
| 1428 | return 0; |
| 1429 | } |
| 1430 | |
| 1431 | static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag) |
| 1432 | { |
| 1433 | if (flag == F2FS_GET_BLOCK_PRE_AIO) |
| 1434 | f2fs_down_read(&sbi->node_change); |
| 1435 | else |
| 1436 | f2fs_lock_op(sbi); |
| 1437 | } |
| 1438 | |
| 1439 | static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag) |
| 1440 | { |
| 1441 | if (flag == F2FS_GET_BLOCK_PRE_AIO) |
| 1442 | f2fs_up_read(&sbi->node_change); |
| 1443 | else |
| 1444 | f2fs_unlock_op(sbi); |
| 1445 | } |
| 1446 | |
| 1447 | int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index) |
| 1448 | { |
| 1449 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
| 1450 | int err = 0; |
| 1451 | |
| 1452 | f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); |
| 1453 | if (!f2fs_lookup_read_extent_cache_block(dn->inode, index, |
| 1454 | &dn->data_blkaddr)) |
| 1455 | err = f2fs_reserve_block(dn, index); |
| 1456 | f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO); |
| 1457 | |
| 1458 | return err; |
| 1459 | } |
| 1460 | |
| 1461 | static int f2fs_map_no_dnode(struct inode *inode, |
| 1462 | struct f2fs_map_blocks *map, struct dnode_of_data *dn, |
| 1463 | pgoff_t pgoff) |
| 1464 | { |
| 1465 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1466 | |
| 1467 | /* |
| 1468 | * There is one exceptional case that read_node_page() may return |
| 1469 | * -ENOENT due to filesystem has been shutdown or cp_error, return |
| 1470 | * -EIO in that case. |
| 1471 | */ |
| 1472 | if (map->m_may_create && |
| 1473 | (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi))) |
| 1474 | return -EIO; |
| 1475 | |
| 1476 | if (map->m_next_pgofs) |
| 1477 | *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff); |
| 1478 | if (map->m_next_extent) |
| 1479 | *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff); |
| 1480 | return 0; |
| 1481 | } |
| 1482 | |
| 1483 | static bool f2fs_map_blocks_cached(struct inode *inode, |
| 1484 | struct f2fs_map_blocks *map, int flag) |
| 1485 | { |
| 1486 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1487 | unsigned int maxblocks = map->m_len; |
| 1488 | pgoff_t pgoff = (pgoff_t)map->m_lblk; |
| 1489 | struct extent_info ei = {}; |
| 1490 | |
| 1491 | if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei)) |
| 1492 | return false; |
| 1493 | |
| 1494 | map->m_pblk = ei.blk + pgoff - ei.fofs; |
| 1495 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff); |
| 1496 | map->m_flags = F2FS_MAP_MAPPED; |
| 1497 | if (map->m_next_extent) |
| 1498 | *map->m_next_extent = pgoff + map->m_len; |
| 1499 | |
| 1500 | /* for hardware encryption, but to avoid potential issue in future */ |
| 1501 | if (flag == F2FS_GET_BLOCK_DIO) |
| 1502 | f2fs_wait_on_block_writeback_range(inode, |
| 1503 | map->m_pblk, map->m_len); |
| 1504 | |
| 1505 | if (f2fs_allow_multi_device_dio(sbi, flag)) { |
| 1506 | int bidx = f2fs_target_device_index(sbi, map->m_pblk); |
| 1507 | struct f2fs_dev_info *dev = &sbi->devs[bidx]; |
| 1508 | |
| 1509 | map->m_bdev = dev->bdev; |
| 1510 | map->m_pblk -= dev->start_blk; |
| 1511 | map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk); |
| 1512 | } else { |
| 1513 | map->m_bdev = inode->i_sb->s_bdev; |
| 1514 | } |
| 1515 | return true; |
| 1516 | } |
| 1517 | |
| 1518 | static bool map_is_mergeable(struct f2fs_sb_info *sbi, |
| 1519 | struct f2fs_map_blocks *map, |
| 1520 | block_t blkaddr, int flag, int bidx, |
| 1521 | int ofs) |
| 1522 | { |
| 1523 | if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev) |
| 1524 | return false; |
| 1525 | if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) |
| 1526 | return true; |
| 1527 | if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) |
| 1528 | return true; |
| 1529 | if (flag == F2FS_GET_BLOCK_PRE_DIO) |
| 1530 | return true; |
| 1531 | if (flag == F2FS_GET_BLOCK_DIO && |
| 1532 | map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR) |
| 1533 | return true; |
| 1534 | return false; |
| 1535 | } |
| 1536 | |
| 1537 | /* |
| 1538 | * f2fs_map_blocks() tries to find or build mapping relationship which |
| 1539 | * maps continuous logical blocks to physical blocks, and return such |
| 1540 | * info via f2fs_map_blocks structure. |
| 1541 | */ |
| 1542 | int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag) |
| 1543 | { |
| 1544 | unsigned int maxblocks = map->m_len; |
| 1545 | struct dnode_of_data dn; |
| 1546 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1547 | int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; |
| 1548 | pgoff_t pgofs, end_offset, end; |
| 1549 | int err = 0, ofs = 1; |
| 1550 | unsigned int ofs_in_node, last_ofs_in_node; |
| 1551 | blkcnt_t prealloc; |
| 1552 | block_t blkaddr; |
| 1553 | unsigned int start_pgofs; |
| 1554 | int bidx = 0; |
| 1555 | bool is_hole; |
| 1556 | |
| 1557 | if (!maxblocks) |
| 1558 | return 0; |
| 1559 | |
| 1560 | if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag)) |
| 1561 | goto out; |
| 1562 | |
| 1563 | map->m_bdev = inode->i_sb->s_bdev; |
| 1564 | map->m_multidev_dio = |
| 1565 | f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag); |
| 1566 | |
| 1567 | map->m_len = 0; |
| 1568 | map->m_flags = 0; |
| 1569 | |
| 1570 | /* it only supports block size == page size */ |
| 1571 | pgofs = (pgoff_t)map->m_lblk; |
| 1572 | end = pgofs + maxblocks; |
| 1573 | |
| 1574 | next_dnode: |
| 1575 | if (map->m_may_create) |
| 1576 | f2fs_map_lock(sbi, flag); |
| 1577 | |
| 1578 | /* When reading holes, we need its node page */ |
| 1579 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 1580 | err = f2fs_get_dnode_of_data(&dn, pgofs, mode); |
| 1581 | if (err) { |
| 1582 | if (flag == F2FS_GET_BLOCK_BMAP) |
| 1583 | map->m_pblk = 0; |
| 1584 | if (err == -ENOENT) |
| 1585 | err = f2fs_map_no_dnode(inode, map, &dn, pgofs); |
| 1586 | goto unlock_out; |
| 1587 | } |
| 1588 | |
| 1589 | start_pgofs = pgofs; |
| 1590 | prealloc = 0; |
| 1591 | last_ofs_in_node = ofs_in_node = dn.ofs_in_node; |
| 1592 | end_offset = ADDRS_PER_PAGE(&dn.node_folio->page, inode); |
| 1593 | |
| 1594 | next_block: |
| 1595 | blkaddr = f2fs_data_blkaddr(&dn); |
| 1596 | is_hole = !__is_valid_data_blkaddr(blkaddr); |
| 1597 | if (!is_hole && |
| 1598 | !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { |
| 1599 | err = -EFSCORRUPTED; |
| 1600 | goto sync_out; |
| 1601 | } |
| 1602 | |
| 1603 | /* use out-place-update for direct IO under LFS mode */ |
| 1604 | if (map->m_may_create && (is_hole || |
| 1605 | (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && |
| 1606 | !f2fs_is_pinned_file(inode)))) { |
| 1607 | if (unlikely(f2fs_cp_error(sbi))) { |
| 1608 | err = -EIO; |
| 1609 | goto sync_out; |
| 1610 | } |
| 1611 | |
| 1612 | switch (flag) { |
| 1613 | case F2FS_GET_BLOCK_PRE_AIO: |
| 1614 | if (blkaddr == NULL_ADDR) { |
| 1615 | prealloc++; |
| 1616 | last_ofs_in_node = dn.ofs_in_node; |
| 1617 | } |
| 1618 | break; |
| 1619 | case F2FS_GET_BLOCK_PRE_DIO: |
| 1620 | case F2FS_GET_BLOCK_DIO: |
| 1621 | err = __allocate_data_block(&dn, map->m_seg_type); |
| 1622 | if (err) |
| 1623 | goto sync_out; |
| 1624 | if (flag == F2FS_GET_BLOCK_PRE_DIO) |
| 1625 | file_need_truncate(inode); |
| 1626 | set_inode_flag(inode, FI_APPEND_WRITE); |
| 1627 | break; |
| 1628 | default: |
| 1629 | WARN_ON_ONCE(1); |
| 1630 | err = -EIO; |
| 1631 | goto sync_out; |
| 1632 | } |
| 1633 | |
| 1634 | blkaddr = dn.data_blkaddr; |
| 1635 | if (is_hole) |
| 1636 | map->m_flags |= F2FS_MAP_NEW; |
| 1637 | } else if (is_hole) { |
| 1638 | if (f2fs_compressed_file(inode) && |
| 1639 | f2fs_sanity_check_cluster(&dn)) { |
| 1640 | err = -EFSCORRUPTED; |
| 1641 | f2fs_handle_error(sbi, |
| 1642 | ERROR_CORRUPTED_CLUSTER); |
| 1643 | goto sync_out; |
| 1644 | } |
| 1645 | |
| 1646 | switch (flag) { |
| 1647 | case F2FS_GET_BLOCK_PRECACHE: |
| 1648 | goto sync_out; |
| 1649 | case F2FS_GET_BLOCK_BMAP: |
| 1650 | map->m_pblk = 0; |
| 1651 | goto sync_out; |
| 1652 | case F2FS_GET_BLOCK_FIEMAP: |
| 1653 | if (blkaddr == NULL_ADDR) { |
| 1654 | if (map->m_next_pgofs) |
| 1655 | *map->m_next_pgofs = pgofs + 1; |
| 1656 | goto sync_out; |
| 1657 | } |
| 1658 | break; |
| 1659 | case F2FS_GET_BLOCK_DIO: |
| 1660 | if (map->m_next_pgofs) |
| 1661 | *map->m_next_pgofs = pgofs + 1; |
| 1662 | break; |
| 1663 | default: |
| 1664 | /* for defragment case */ |
| 1665 | if (map->m_next_pgofs) |
| 1666 | *map->m_next_pgofs = pgofs + 1; |
| 1667 | goto sync_out; |
| 1668 | } |
| 1669 | } |
| 1670 | |
| 1671 | if (flag == F2FS_GET_BLOCK_PRE_AIO) |
| 1672 | goto skip; |
| 1673 | |
| 1674 | if (map->m_multidev_dio) |
| 1675 | bidx = f2fs_target_device_index(sbi, blkaddr); |
| 1676 | |
| 1677 | if (map->m_len == 0) { |
| 1678 | /* reserved delalloc block should be mapped for fiemap. */ |
| 1679 | if (blkaddr == NEW_ADDR) |
| 1680 | map->m_flags |= F2FS_MAP_DELALLOC; |
| 1681 | /* DIO READ and hole case, should not map the blocks. */ |
| 1682 | if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create)) |
| 1683 | map->m_flags |= F2FS_MAP_MAPPED; |
| 1684 | |
| 1685 | map->m_pblk = blkaddr; |
| 1686 | map->m_len = 1; |
| 1687 | |
| 1688 | if (map->m_multidev_dio) |
| 1689 | map->m_bdev = FDEV(bidx).bdev; |
| 1690 | } else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) { |
| 1691 | ofs++; |
| 1692 | map->m_len++; |
| 1693 | } else { |
| 1694 | goto sync_out; |
| 1695 | } |
| 1696 | |
| 1697 | skip: |
| 1698 | dn.ofs_in_node++; |
| 1699 | pgofs++; |
| 1700 | |
| 1701 | /* preallocate blocks in batch for one dnode page */ |
| 1702 | if (flag == F2FS_GET_BLOCK_PRE_AIO && |
| 1703 | (pgofs == end || dn.ofs_in_node == end_offset)) { |
| 1704 | |
| 1705 | dn.ofs_in_node = ofs_in_node; |
| 1706 | err = f2fs_reserve_new_blocks(&dn, prealloc); |
| 1707 | if (err) |
| 1708 | goto sync_out; |
| 1709 | |
| 1710 | map->m_len += dn.ofs_in_node - ofs_in_node; |
| 1711 | if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { |
| 1712 | err = -ENOSPC; |
| 1713 | goto sync_out; |
| 1714 | } |
| 1715 | dn.ofs_in_node = end_offset; |
| 1716 | } |
| 1717 | |
| 1718 | if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) && |
| 1719 | map->m_may_create) { |
| 1720 | /* the next block to be allocated may not be contiguous. */ |
| 1721 | if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) == |
| 1722 | CAP_BLKS_PER_SEC(sbi) - 1) |
| 1723 | goto sync_out; |
| 1724 | } |
| 1725 | |
| 1726 | if (pgofs >= end) |
| 1727 | goto sync_out; |
| 1728 | else if (dn.ofs_in_node < end_offset) |
| 1729 | goto next_block; |
| 1730 | |
| 1731 | if (flag == F2FS_GET_BLOCK_PRECACHE) { |
| 1732 | if (map->m_flags & F2FS_MAP_MAPPED) { |
| 1733 | unsigned int ofs = start_pgofs - map->m_lblk; |
| 1734 | |
| 1735 | f2fs_update_read_extent_cache_range(&dn, |
| 1736 | start_pgofs, map->m_pblk + ofs, |
| 1737 | map->m_len - ofs); |
| 1738 | } |
| 1739 | } |
| 1740 | |
| 1741 | f2fs_put_dnode(&dn); |
| 1742 | |
| 1743 | if (map->m_may_create) { |
| 1744 | f2fs_map_unlock(sbi, flag); |
| 1745 | f2fs_balance_fs(sbi, dn.node_changed); |
| 1746 | } |
| 1747 | goto next_dnode; |
| 1748 | |
| 1749 | sync_out: |
| 1750 | |
| 1751 | if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) { |
| 1752 | /* |
| 1753 | * for hardware encryption, but to avoid potential issue |
| 1754 | * in future |
| 1755 | */ |
| 1756 | f2fs_wait_on_block_writeback_range(inode, |
| 1757 | map->m_pblk, map->m_len); |
| 1758 | |
| 1759 | if (map->m_multidev_dio) { |
| 1760 | block_t blk_addr = map->m_pblk; |
| 1761 | |
| 1762 | bidx = f2fs_target_device_index(sbi, map->m_pblk); |
| 1763 | |
| 1764 | map->m_bdev = FDEV(bidx).bdev; |
| 1765 | map->m_pblk -= FDEV(bidx).start_blk; |
| 1766 | |
| 1767 | if (map->m_may_create) |
| 1768 | f2fs_update_device_state(sbi, inode->i_ino, |
| 1769 | blk_addr, map->m_len); |
| 1770 | |
| 1771 | f2fs_bug_on(sbi, blk_addr + map->m_len > |
| 1772 | FDEV(bidx).end_blk + 1); |
| 1773 | } |
| 1774 | } |
| 1775 | |
| 1776 | if (flag == F2FS_GET_BLOCK_PRECACHE) { |
| 1777 | if (map->m_flags & F2FS_MAP_MAPPED) { |
| 1778 | unsigned int ofs = start_pgofs - map->m_lblk; |
| 1779 | |
| 1780 | f2fs_update_read_extent_cache_range(&dn, |
| 1781 | start_pgofs, map->m_pblk + ofs, |
| 1782 | map->m_len - ofs); |
| 1783 | } |
| 1784 | if (map->m_next_extent) |
| 1785 | *map->m_next_extent = pgofs + 1; |
| 1786 | } |
| 1787 | f2fs_put_dnode(&dn); |
| 1788 | unlock_out: |
| 1789 | if (map->m_may_create) { |
| 1790 | f2fs_map_unlock(sbi, flag); |
| 1791 | f2fs_balance_fs(sbi, dn.node_changed); |
| 1792 | } |
| 1793 | out: |
| 1794 | trace_f2fs_map_blocks(inode, map, flag, err); |
| 1795 | return err; |
| 1796 | } |
| 1797 | |
| 1798 | bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len) |
| 1799 | { |
| 1800 | struct f2fs_map_blocks map; |
| 1801 | block_t last_lblk; |
| 1802 | int err; |
| 1803 | |
| 1804 | if (pos + len > i_size_read(inode)) |
| 1805 | return false; |
| 1806 | |
| 1807 | map.m_lblk = F2FS_BYTES_TO_BLK(pos); |
| 1808 | map.m_next_pgofs = NULL; |
| 1809 | map.m_next_extent = NULL; |
| 1810 | map.m_seg_type = NO_CHECK_TYPE; |
| 1811 | map.m_may_create = false; |
| 1812 | last_lblk = F2FS_BLK_ALIGN(pos + len); |
| 1813 | |
| 1814 | while (map.m_lblk < last_lblk) { |
| 1815 | map.m_len = last_lblk - map.m_lblk; |
| 1816 | err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT); |
| 1817 | if (err || map.m_len == 0) |
| 1818 | return false; |
| 1819 | map.m_lblk += map.m_len; |
| 1820 | } |
| 1821 | return true; |
| 1822 | } |
| 1823 | |
| 1824 | static int f2fs_xattr_fiemap(struct inode *inode, |
| 1825 | struct fiemap_extent_info *fieinfo) |
| 1826 | { |
| 1827 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 1828 | struct node_info ni; |
| 1829 | __u64 phys = 0, len; |
| 1830 | __u32 flags; |
| 1831 | nid_t xnid = F2FS_I(inode)->i_xattr_nid; |
| 1832 | int err = 0; |
| 1833 | |
| 1834 | if (f2fs_has_inline_xattr(inode)) { |
| 1835 | int offset; |
| 1836 | struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), |
| 1837 | inode->i_ino, false); |
| 1838 | |
| 1839 | if (IS_ERR(folio)) |
| 1840 | return PTR_ERR(folio); |
| 1841 | |
| 1842 | err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); |
| 1843 | if (err) { |
| 1844 | f2fs_folio_put(folio, true); |
| 1845 | return err; |
| 1846 | } |
| 1847 | |
| 1848 | phys = F2FS_BLK_TO_BYTES(ni.blk_addr); |
| 1849 | offset = offsetof(struct f2fs_inode, i_addr) + |
| 1850 | sizeof(__le32) * (DEF_ADDRS_PER_INODE - |
| 1851 | get_inline_xattr_addrs(inode)); |
| 1852 | |
| 1853 | phys += offset; |
| 1854 | len = inline_xattr_size(inode); |
| 1855 | |
| 1856 | f2fs_folio_put(folio, true); |
| 1857 | |
| 1858 | flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED; |
| 1859 | |
| 1860 | if (!xnid) |
| 1861 | flags |= FIEMAP_EXTENT_LAST; |
| 1862 | |
| 1863 | err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); |
| 1864 | trace_f2fs_fiemap(inode, 0, phys, len, flags, err); |
| 1865 | if (err) |
| 1866 | return err; |
| 1867 | } |
| 1868 | |
| 1869 | if (xnid) { |
| 1870 | struct folio *folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), |
| 1871 | xnid, false); |
| 1872 | |
| 1873 | if (IS_ERR(folio)) |
| 1874 | return PTR_ERR(folio); |
| 1875 | |
| 1876 | err = f2fs_get_node_info(sbi, xnid, &ni, false); |
| 1877 | if (err) { |
| 1878 | f2fs_folio_put(folio, true); |
| 1879 | return err; |
| 1880 | } |
| 1881 | |
| 1882 | phys = F2FS_BLK_TO_BYTES(ni.blk_addr); |
| 1883 | len = inode->i_sb->s_blocksize; |
| 1884 | |
| 1885 | f2fs_folio_put(folio, true); |
| 1886 | |
| 1887 | flags = FIEMAP_EXTENT_LAST; |
| 1888 | } |
| 1889 | |
| 1890 | if (phys) { |
| 1891 | err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); |
| 1892 | trace_f2fs_fiemap(inode, 0, phys, len, flags, err); |
| 1893 | } |
| 1894 | |
| 1895 | return (err < 0 ? err : 0); |
| 1896 | } |
| 1897 | |
| 1898 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
| 1899 | u64 start, u64 len) |
| 1900 | { |
| 1901 | struct f2fs_map_blocks map; |
| 1902 | sector_t start_blk, last_blk, blk_len, max_len; |
| 1903 | pgoff_t next_pgofs; |
| 1904 | u64 logical = 0, phys = 0, size = 0; |
| 1905 | u32 flags = 0; |
| 1906 | int ret = 0; |
| 1907 | bool compr_cluster = false, compr_appended; |
| 1908 | unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; |
| 1909 | unsigned int count_in_cluster = 0; |
| 1910 | loff_t maxbytes; |
| 1911 | |
| 1912 | if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { |
| 1913 | ret = f2fs_precache_extents(inode); |
| 1914 | if (ret) |
| 1915 | return ret; |
| 1916 | } |
| 1917 | |
| 1918 | ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR); |
| 1919 | if (ret) |
| 1920 | return ret; |
| 1921 | |
| 1922 | inode_lock_shared(inode); |
| 1923 | |
| 1924 | maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); |
| 1925 | if (start > maxbytes) { |
| 1926 | ret = -EFBIG; |
| 1927 | goto out; |
| 1928 | } |
| 1929 | |
| 1930 | if (len > maxbytes || (maxbytes - len) < start) |
| 1931 | len = maxbytes - start; |
| 1932 | |
| 1933 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { |
| 1934 | ret = f2fs_xattr_fiemap(inode, fieinfo); |
| 1935 | goto out; |
| 1936 | } |
| 1937 | |
| 1938 | if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { |
| 1939 | ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); |
| 1940 | if (ret != -EAGAIN) |
| 1941 | goto out; |
| 1942 | } |
| 1943 | |
| 1944 | start_blk = F2FS_BYTES_TO_BLK(start); |
| 1945 | last_blk = F2FS_BYTES_TO_BLK(start + len - 1); |
| 1946 | blk_len = last_blk - start_blk + 1; |
| 1947 | max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk; |
| 1948 | |
| 1949 | next: |
| 1950 | memset(&map, 0, sizeof(map)); |
| 1951 | map.m_lblk = start_blk; |
| 1952 | map.m_len = blk_len; |
| 1953 | map.m_next_pgofs = &next_pgofs; |
| 1954 | map.m_seg_type = NO_CHECK_TYPE; |
| 1955 | |
| 1956 | if (compr_cluster) { |
| 1957 | map.m_lblk += 1; |
| 1958 | map.m_len = cluster_size - count_in_cluster; |
| 1959 | } |
| 1960 | |
| 1961 | ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP); |
| 1962 | if (ret) |
| 1963 | goto out; |
| 1964 | |
| 1965 | /* HOLE */ |
| 1966 | if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) { |
| 1967 | start_blk = next_pgofs; |
| 1968 | |
| 1969 | if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes) |
| 1970 | goto prep_next; |
| 1971 | |
| 1972 | flags |= FIEMAP_EXTENT_LAST; |
| 1973 | } |
| 1974 | |
| 1975 | /* |
| 1976 | * current extent may cross boundary of inquiry, increase len to |
| 1977 | * requery. |
| 1978 | */ |
| 1979 | if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) && |
| 1980 | map.m_lblk + map.m_len - 1 == last_blk && |
| 1981 | blk_len != max_len) { |
| 1982 | blk_len = max_len; |
| 1983 | goto next; |
| 1984 | } |
| 1985 | |
| 1986 | compr_appended = false; |
| 1987 | /* In a case of compressed cluster, append this to the last extent */ |
| 1988 | if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) || |
| 1989 | !(map.m_flags & F2FS_MAP_FLAGS))) { |
| 1990 | compr_appended = true; |
| 1991 | goto skip_fill; |
| 1992 | } |
| 1993 | |
| 1994 | if (size) { |
| 1995 | flags |= FIEMAP_EXTENT_MERGED; |
| 1996 | if (IS_ENCRYPTED(inode)) |
| 1997 | flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; |
| 1998 | |
| 1999 | ret = fiemap_fill_next_extent(fieinfo, logical, |
| 2000 | phys, size, flags); |
| 2001 | trace_f2fs_fiemap(inode, logical, phys, size, flags, ret); |
| 2002 | if (ret) |
| 2003 | goto out; |
| 2004 | size = 0; |
| 2005 | } |
| 2006 | |
| 2007 | if (start_blk > last_blk) |
| 2008 | goto out; |
| 2009 | |
| 2010 | skip_fill: |
| 2011 | if (map.m_pblk == COMPRESS_ADDR) { |
| 2012 | compr_cluster = true; |
| 2013 | count_in_cluster = 1; |
| 2014 | } else if (compr_appended) { |
| 2015 | unsigned int appended_blks = cluster_size - |
| 2016 | count_in_cluster + 1; |
| 2017 | size += F2FS_BLK_TO_BYTES(appended_blks); |
| 2018 | start_blk += appended_blks; |
| 2019 | compr_cluster = false; |
| 2020 | } else { |
| 2021 | logical = F2FS_BLK_TO_BYTES(start_blk); |
| 2022 | phys = __is_valid_data_blkaddr(map.m_pblk) ? |
| 2023 | F2FS_BLK_TO_BYTES(map.m_pblk) : 0; |
| 2024 | size = F2FS_BLK_TO_BYTES(map.m_len); |
| 2025 | flags = 0; |
| 2026 | |
| 2027 | if (compr_cluster) { |
| 2028 | flags = FIEMAP_EXTENT_ENCODED; |
| 2029 | count_in_cluster += map.m_len; |
| 2030 | if (count_in_cluster == cluster_size) { |
| 2031 | compr_cluster = false; |
| 2032 | size += F2FS_BLKSIZE; |
| 2033 | } |
| 2034 | } else if (map.m_flags & F2FS_MAP_DELALLOC) { |
| 2035 | flags = FIEMAP_EXTENT_UNWRITTEN; |
| 2036 | } |
| 2037 | |
| 2038 | start_blk += F2FS_BYTES_TO_BLK(size); |
| 2039 | } |
| 2040 | |
| 2041 | prep_next: |
| 2042 | cond_resched(); |
| 2043 | if (fatal_signal_pending(current)) |
| 2044 | ret = -EINTR; |
| 2045 | else |
| 2046 | goto next; |
| 2047 | out: |
| 2048 | if (ret == 1) |
| 2049 | ret = 0; |
| 2050 | |
| 2051 | inode_unlock_shared(inode); |
| 2052 | return ret; |
| 2053 | } |
| 2054 | |
| 2055 | static inline loff_t f2fs_readpage_limit(struct inode *inode) |
| 2056 | { |
| 2057 | if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode)) |
| 2058 | return F2FS_BLK_TO_BYTES(max_file_blocks(inode)); |
| 2059 | |
| 2060 | return i_size_read(inode); |
| 2061 | } |
| 2062 | |
| 2063 | static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac) |
| 2064 | { |
| 2065 | return rac ? REQ_RAHEAD : 0; |
| 2066 | } |
| 2067 | |
| 2068 | static int f2fs_read_single_page(struct inode *inode, struct folio *folio, |
| 2069 | unsigned nr_pages, |
| 2070 | struct f2fs_map_blocks *map, |
| 2071 | struct bio **bio_ret, |
| 2072 | sector_t *last_block_in_bio, |
| 2073 | struct readahead_control *rac) |
| 2074 | { |
| 2075 | struct bio *bio = *bio_ret; |
| 2076 | const unsigned int blocksize = F2FS_BLKSIZE; |
| 2077 | sector_t block_in_file; |
| 2078 | sector_t last_block; |
| 2079 | sector_t last_block_in_file; |
| 2080 | sector_t block_nr; |
| 2081 | pgoff_t index = folio->index; |
| 2082 | int ret = 0; |
| 2083 | |
| 2084 | block_in_file = (sector_t)index; |
| 2085 | last_block = block_in_file + nr_pages; |
| 2086 | last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) + |
| 2087 | blocksize - 1); |
| 2088 | if (last_block > last_block_in_file) |
| 2089 | last_block = last_block_in_file; |
| 2090 | |
| 2091 | /* just zeroing out page which is beyond EOF */ |
| 2092 | if (block_in_file >= last_block) |
| 2093 | goto zero_out; |
| 2094 | /* |
| 2095 | * Map blocks using the previous result first. |
| 2096 | */ |
| 2097 | if ((map->m_flags & F2FS_MAP_MAPPED) && |
| 2098 | block_in_file > map->m_lblk && |
| 2099 | block_in_file < (map->m_lblk + map->m_len)) |
| 2100 | goto got_it; |
| 2101 | |
| 2102 | /* |
| 2103 | * Then do more f2fs_map_blocks() calls until we are |
| 2104 | * done with this page. |
| 2105 | */ |
| 2106 | map->m_lblk = block_in_file; |
| 2107 | map->m_len = last_block - block_in_file; |
| 2108 | |
| 2109 | ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT); |
| 2110 | if (ret) |
| 2111 | goto out; |
| 2112 | got_it: |
| 2113 | if ((map->m_flags & F2FS_MAP_MAPPED)) { |
| 2114 | block_nr = map->m_pblk + block_in_file - map->m_lblk; |
| 2115 | folio_set_mappedtodisk(folio); |
| 2116 | |
| 2117 | if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, |
| 2118 | DATA_GENERIC_ENHANCE_READ)) { |
| 2119 | ret = -EFSCORRUPTED; |
| 2120 | goto out; |
| 2121 | } |
| 2122 | } else { |
| 2123 | zero_out: |
| 2124 | folio_zero_segment(folio, 0, folio_size(folio)); |
| 2125 | if (f2fs_need_verity(inode, index) && |
| 2126 | !fsverity_verify_folio(folio)) { |
| 2127 | ret = -EIO; |
| 2128 | goto out; |
| 2129 | } |
| 2130 | if (!folio_test_uptodate(folio)) |
| 2131 | folio_mark_uptodate(folio); |
| 2132 | folio_unlock(folio); |
| 2133 | goto out; |
| 2134 | } |
| 2135 | |
| 2136 | /* |
| 2137 | * This page will go to BIO. Do we need to send this |
| 2138 | * BIO off first? |
| 2139 | */ |
| 2140 | if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, |
| 2141 | *last_block_in_bio, block_nr) || |
| 2142 | !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) { |
| 2143 | submit_and_realloc: |
| 2144 | f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); |
| 2145 | bio = NULL; |
| 2146 | } |
| 2147 | if (bio == NULL) { |
| 2148 | bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, |
| 2149 | f2fs_ra_op_flags(rac), index, |
| 2150 | false); |
| 2151 | if (IS_ERR(bio)) { |
| 2152 | ret = PTR_ERR(bio); |
| 2153 | bio = NULL; |
| 2154 | goto out; |
| 2155 | } |
| 2156 | } |
| 2157 | |
| 2158 | /* |
| 2159 | * If the page is under writeback, we need to wait for |
| 2160 | * its completion to see the correct decrypted data. |
| 2161 | */ |
| 2162 | f2fs_wait_on_block_writeback(inode, block_nr); |
| 2163 | |
| 2164 | if (!bio_add_folio(bio, folio, blocksize, 0)) |
| 2165 | goto submit_and_realloc; |
| 2166 | |
| 2167 | inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA); |
| 2168 | f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO, |
| 2169 | F2FS_BLKSIZE); |
| 2170 | *last_block_in_bio = block_nr; |
| 2171 | out: |
| 2172 | *bio_ret = bio; |
| 2173 | return ret; |
| 2174 | } |
| 2175 | |
| 2176 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2177 | int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, |
| 2178 | unsigned nr_pages, sector_t *last_block_in_bio, |
| 2179 | struct readahead_control *rac, bool for_write) |
| 2180 | { |
| 2181 | struct dnode_of_data dn; |
| 2182 | struct inode *inode = cc->inode; |
| 2183 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 2184 | struct bio *bio = *bio_ret; |
| 2185 | unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size; |
| 2186 | sector_t last_block_in_file; |
| 2187 | const unsigned int blocksize = F2FS_BLKSIZE; |
| 2188 | struct decompress_io_ctx *dic = NULL; |
| 2189 | struct extent_info ei = {}; |
| 2190 | bool from_dnode = true; |
| 2191 | int i; |
| 2192 | int ret = 0; |
| 2193 | |
| 2194 | if (unlikely(f2fs_cp_error(sbi))) { |
| 2195 | ret = -EIO; |
| 2196 | from_dnode = false; |
| 2197 | goto out_put_dnode; |
| 2198 | } |
| 2199 | |
| 2200 | f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc)); |
| 2201 | |
| 2202 | last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) + |
| 2203 | blocksize - 1); |
| 2204 | |
| 2205 | /* get rid of pages beyond EOF */ |
| 2206 | for (i = 0; i < cc->cluster_size; i++) { |
| 2207 | struct page *page = cc->rpages[i]; |
| 2208 | struct folio *folio; |
| 2209 | |
| 2210 | if (!page) |
| 2211 | continue; |
| 2212 | |
| 2213 | folio = page_folio(page); |
| 2214 | if ((sector_t)folio->index >= last_block_in_file) { |
| 2215 | folio_zero_segment(folio, 0, folio_size(folio)); |
| 2216 | if (!folio_test_uptodate(folio)) |
| 2217 | folio_mark_uptodate(folio); |
| 2218 | } else if (!folio_test_uptodate(folio)) { |
| 2219 | continue; |
| 2220 | } |
| 2221 | folio_unlock(folio); |
| 2222 | if (for_write) |
| 2223 | folio_put(folio); |
| 2224 | cc->rpages[i] = NULL; |
| 2225 | cc->nr_rpages--; |
| 2226 | } |
| 2227 | |
| 2228 | /* we are done since all pages are beyond EOF */ |
| 2229 | if (f2fs_cluster_is_empty(cc)) |
| 2230 | goto out; |
| 2231 | |
| 2232 | if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei)) |
| 2233 | from_dnode = false; |
| 2234 | |
| 2235 | if (!from_dnode) |
| 2236 | goto skip_reading_dnode; |
| 2237 | |
| 2238 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 2239 | ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); |
| 2240 | if (ret) |
| 2241 | goto out; |
| 2242 | |
| 2243 | f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR); |
| 2244 | |
| 2245 | skip_reading_dnode: |
| 2246 | for (i = 1; i < cc->cluster_size; i++) { |
| 2247 | block_t blkaddr; |
| 2248 | |
| 2249 | blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio, |
| 2250 | dn.ofs_in_node + i) : |
| 2251 | ei.blk + i - 1; |
| 2252 | |
| 2253 | if (!__is_valid_data_blkaddr(blkaddr)) |
| 2254 | break; |
| 2255 | |
| 2256 | if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) { |
| 2257 | ret = -EFAULT; |
| 2258 | goto out_put_dnode; |
| 2259 | } |
| 2260 | cc->nr_cpages++; |
| 2261 | |
| 2262 | if (!from_dnode && i >= ei.c_len) |
| 2263 | break; |
| 2264 | } |
| 2265 | |
| 2266 | /* nothing to decompress */ |
| 2267 | if (cc->nr_cpages == 0) { |
| 2268 | ret = 0; |
| 2269 | goto out_put_dnode; |
| 2270 | } |
| 2271 | |
| 2272 | dic = f2fs_alloc_dic(cc); |
| 2273 | if (IS_ERR(dic)) { |
| 2274 | ret = PTR_ERR(dic); |
| 2275 | goto out_put_dnode; |
| 2276 | } |
| 2277 | |
| 2278 | for (i = 0; i < cc->nr_cpages; i++) { |
| 2279 | struct folio *folio = page_folio(dic->cpages[i]); |
| 2280 | block_t blkaddr; |
| 2281 | struct bio_post_read_ctx *ctx; |
| 2282 | |
| 2283 | blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_folio, |
| 2284 | dn.ofs_in_node + i + 1) : |
| 2285 | ei.blk + i; |
| 2286 | |
| 2287 | f2fs_wait_on_block_writeback(inode, blkaddr); |
| 2288 | |
| 2289 | if (f2fs_load_compressed_folio(sbi, folio, blkaddr)) { |
| 2290 | if (atomic_dec_and_test(&dic->remaining_pages)) { |
| 2291 | f2fs_decompress_cluster(dic, true); |
| 2292 | break; |
| 2293 | } |
| 2294 | continue; |
| 2295 | } |
| 2296 | |
| 2297 | if (bio && (!page_is_mergeable(sbi, bio, |
| 2298 | *last_block_in_bio, blkaddr) || |
| 2299 | !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) { |
| 2300 | submit_and_realloc: |
| 2301 | f2fs_submit_read_bio(sbi, bio, DATA); |
| 2302 | bio = NULL; |
| 2303 | } |
| 2304 | |
| 2305 | if (!bio) { |
| 2306 | bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages, |
| 2307 | f2fs_ra_op_flags(rac), |
| 2308 | folio->index, for_write); |
| 2309 | if (IS_ERR(bio)) { |
| 2310 | ret = PTR_ERR(bio); |
| 2311 | f2fs_decompress_end_io(dic, ret, true); |
| 2312 | f2fs_put_dnode(&dn); |
| 2313 | *bio_ret = NULL; |
| 2314 | return ret; |
| 2315 | } |
| 2316 | } |
| 2317 | |
| 2318 | if (!bio_add_folio(bio, folio, blocksize, 0)) |
| 2319 | goto submit_and_realloc; |
| 2320 | |
| 2321 | ctx = get_post_read_ctx(bio); |
| 2322 | ctx->enabled_steps |= STEP_DECOMPRESS; |
| 2323 | refcount_inc(&dic->refcnt); |
| 2324 | |
| 2325 | inc_page_count(sbi, F2FS_RD_DATA); |
| 2326 | f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE); |
| 2327 | *last_block_in_bio = blkaddr; |
| 2328 | } |
| 2329 | |
| 2330 | if (from_dnode) |
| 2331 | f2fs_put_dnode(&dn); |
| 2332 | |
| 2333 | *bio_ret = bio; |
| 2334 | return 0; |
| 2335 | |
| 2336 | out_put_dnode: |
| 2337 | if (from_dnode) |
| 2338 | f2fs_put_dnode(&dn); |
| 2339 | out: |
| 2340 | for (i = 0; i < cc->cluster_size; i++) { |
| 2341 | if (cc->rpages[i]) { |
| 2342 | ClearPageUptodate(cc->rpages[i]); |
| 2343 | unlock_page(cc->rpages[i]); |
| 2344 | } |
| 2345 | } |
| 2346 | *bio_ret = bio; |
| 2347 | return ret; |
| 2348 | } |
| 2349 | #endif |
| 2350 | |
| 2351 | /* |
| 2352 | * This function was originally taken from fs/mpage.c, and customized for f2fs. |
| 2353 | * Major change was from block_size == page_size in f2fs by default. |
| 2354 | */ |
| 2355 | static int f2fs_mpage_readpages(struct inode *inode, |
| 2356 | struct readahead_control *rac, struct folio *folio) |
| 2357 | { |
| 2358 | struct bio *bio = NULL; |
| 2359 | sector_t last_block_in_bio = 0; |
| 2360 | struct f2fs_map_blocks map; |
| 2361 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2362 | struct compress_ctx cc = { |
| 2363 | .inode = inode, |
| 2364 | .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, |
| 2365 | .cluster_size = F2FS_I(inode)->i_cluster_size, |
| 2366 | .cluster_idx = NULL_CLUSTER, |
| 2367 | .rpages = NULL, |
| 2368 | .cpages = NULL, |
| 2369 | .nr_rpages = 0, |
| 2370 | .nr_cpages = 0, |
| 2371 | }; |
| 2372 | pgoff_t nc_cluster_idx = NULL_CLUSTER; |
| 2373 | pgoff_t index; |
| 2374 | #endif |
| 2375 | unsigned nr_pages = rac ? readahead_count(rac) : 1; |
| 2376 | unsigned max_nr_pages = nr_pages; |
| 2377 | int ret = 0; |
| 2378 | |
| 2379 | map.m_pblk = 0; |
| 2380 | map.m_lblk = 0; |
| 2381 | map.m_len = 0; |
| 2382 | map.m_flags = 0; |
| 2383 | map.m_next_pgofs = NULL; |
| 2384 | map.m_next_extent = NULL; |
| 2385 | map.m_seg_type = NO_CHECK_TYPE; |
| 2386 | map.m_may_create = false; |
| 2387 | |
| 2388 | for (; nr_pages; nr_pages--) { |
| 2389 | if (rac) { |
| 2390 | folio = readahead_folio(rac); |
| 2391 | prefetchw(&folio->flags); |
| 2392 | } |
| 2393 | |
| 2394 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2395 | index = folio->index; |
| 2396 | |
| 2397 | if (!f2fs_compressed_file(inode)) |
| 2398 | goto read_single_page; |
| 2399 | |
| 2400 | /* there are remained compressed pages, submit them */ |
| 2401 | if (!f2fs_cluster_can_merge_page(&cc, index)) { |
| 2402 | ret = f2fs_read_multi_pages(&cc, &bio, |
| 2403 | max_nr_pages, |
| 2404 | &last_block_in_bio, |
| 2405 | rac, false); |
| 2406 | f2fs_destroy_compress_ctx(&cc, false); |
| 2407 | if (ret) |
| 2408 | goto set_error_page; |
| 2409 | } |
| 2410 | if (cc.cluster_idx == NULL_CLUSTER) { |
| 2411 | if (nc_cluster_idx == index >> cc.log_cluster_size) |
| 2412 | goto read_single_page; |
| 2413 | |
| 2414 | ret = f2fs_is_compressed_cluster(inode, index); |
| 2415 | if (ret < 0) |
| 2416 | goto set_error_page; |
| 2417 | else if (!ret) { |
| 2418 | nc_cluster_idx = |
| 2419 | index >> cc.log_cluster_size; |
| 2420 | goto read_single_page; |
| 2421 | } |
| 2422 | |
| 2423 | nc_cluster_idx = NULL_CLUSTER; |
| 2424 | } |
| 2425 | ret = f2fs_init_compress_ctx(&cc); |
| 2426 | if (ret) |
| 2427 | goto set_error_page; |
| 2428 | |
| 2429 | f2fs_compress_ctx_add_page(&cc, folio); |
| 2430 | |
| 2431 | goto next_page; |
| 2432 | read_single_page: |
| 2433 | #endif |
| 2434 | |
| 2435 | ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map, |
| 2436 | &bio, &last_block_in_bio, rac); |
| 2437 | if (ret) { |
| 2438 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2439 | set_error_page: |
| 2440 | #endif |
| 2441 | folio_zero_segment(folio, 0, folio_size(folio)); |
| 2442 | folio_unlock(folio); |
| 2443 | } |
| 2444 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2445 | next_page: |
| 2446 | #endif |
| 2447 | |
| 2448 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2449 | if (f2fs_compressed_file(inode)) { |
| 2450 | /* last page */ |
| 2451 | if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) { |
| 2452 | ret = f2fs_read_multi_pages(&cc, &bio, |
| 2453 | max_nr_pages, |
| 2454 | &last_block_in_bio, |
| 2455 | rac, false); |
| 2456 | f2fs_destroy_compress_ctx(&cc, false); |
| 2457 | } |
| 2458 | } |
| 2459 | #endif |
| 2460 | } |
| 2461 | if (bio) |
| 2462 | f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA); |
| 2463 | return ret; |
| 2464 | } |
| 2465 | |
| 2466 | static int f2fs_read_data_folio(struct file *file, struct folio *folio) |
| 2467 | { |
| 2468 | struct inode *inode = folio->mapping->host; |
| 2469 | int ret = -EAGAIN; |
| 2470 | |
| 2471 | trace_f2fs_readpage(folio, DATA); |
| 2472 | |
| 2473 | if (!f2fs_is_compress_backend_ready(inode)) { |
| 2474 | folio_unlock(folio); |
| 2475 | return -EOPNOTSUPP; |
| 2476 | } |
| 2477 | |
| 2478 | /* If the file has inline data, try to read it directly */ |
| 2479 | if (f2fs_has_inline_data(inode)) |
| 2480 | ret = f2fs_read_inline_data(inode, folio); |
| 2481 | if (ret == -EAGAIN) |
| 2482 | ret = f2fs_mpage_readpages(inode, NULL, folio); |
| 2483 | return ret; |
| 2484 | } |
| 2485 | |
| 2486 | static void f2fs_readahead(struct readahead_control *rac) |
| 2487 | { |
| 2488 | struct inode *inode = rac->mapping->host; |
| 2489 | |
| 2490 | trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac)); |
| 2491 | |
| 2492 | if (!f2fs_is_compress_backend_ready(inode)) |
| 2493 | return; |
| 2494 | |
| 2495 | /* If the file has inline data, skip readahead */ |
| 2496 | if (f2fs_has_inline_data(inode)) |
| 2497 | return; |
| 2498 | |
| 2499 | f2fs_mpage_readpages(inode, rac, NULL); |
| 2500 | } |
| 2501 | |
| 2502 | int f2fs_encrypt_one_page(struct f2fs_io_info *fio) |
| 2503 | { |
| 2504 | struct inode *inode = fio_inode(fio); |
| 2505 | struct folio *mfolio; |
| 2506 | struct page *page; |
| 2507 | gfp_t gfp_flags = GFP_NOFS; |
| 2508 | |
| 2509 | if (!f2fs_encrypted_file(inode)) |
| 2510 | return 0; |
| 2511 | |
| 2512 | page = fio->compressed_page ? fio->compressed_page : fio->page; |
| 2513 | |
| 2514 | if (fscrypt_inode_uses_inline_crypto(inode)) |
| 2515 | return 0; |
| 2516 | |
| 2517 | retry_encrypt: |
| 2518 | fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page_folio(page), |
| 2519 | PAGE_SIZE, 0, gfp_flags); |
| 2520 | if (IS_ERR(fio->encrypted_page)) { |
| 2521 | /* flush pending IOs and wait for a while in the ENOMEM case */ |
| 2522 | if (PTR_ERR(fio->encrypted_page) == -ENOMEM) { |
| 2523 | f2fs_flush_merged_writes(fio->sbi); |
| 2524 | memalloc_retry_wait(GFP_NOFS); |
| 2525 | gfp_flags |= __GFP_NOFAIL; |
| 2526 | goto retry_encrypt; |
| 2527 | } |
| 2528 | return PTR_ERR(fio->encrypted_page); |
| 2529 | } |
| 2530 | |
| 2531 | mfolio = filemap_lock_folio(META_MAPPING(fio->sbi), fio->old_blkaddr); |
| 2532 | if (!IS_ERR(mfolio)) { |
| 2533 | if (folio_test_uptodate(mfolio)) |
| 2534 | memcpy(folio_address(mfolio), |
| 2535 | page_address(fio->encrypted_page), PAGE_SIZE); |
| 2536 | f2fs_folio_put(mfolio, true); |
| 2537 | } |
| 2538 | return 0; |
| 2539 | } |
| 2540 | |
| 2541 | static inline bool check_inplace_update_policy(struct inode *inode, |
| 2542 | struct f2fs_io_info *fio) |
| 2543 | { |
| 2544 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 2545 | |
| 2546 | if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) && |
| 2547 | is_inode_flag_set(inode, FI_OPU_WRITE)) |
| 2548 | return false; |
| 2549 | if (IS_F2FS_IPU_FORCE(sbi)) |
| 2550 | return true; |
| 2551 | if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi)) |
| 2552 | return true; |
| 2553 | if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util) |
| 2554 | return true; |
| 2555 | if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) && |
| 2556 | utilization(sbi) > SM_I(sbi)->min_ipu_util) |
| 2557 | return true; |
| 2558 | |
| 2559 | /* |
| 2560 | * IPU for rewrite async pages |
| 2561 | */ |
| 2562 | if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE && |
| 2563 | !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode)) |
| 2564 | return true; |
| 2565 | |
| 2566 | /* this is only set during fdatasync */ |
| 2567 | if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU)) |
| 2568 | return true; |
| 2569 | |
| 2570 | if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) && |
| 2571 | !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) |
| 2572 | return true; |
| 2573 | |
| 2574 | return false; |
| 2575 | } |
| 2576 | |
| 2577 | bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio) |
| 2578 | { |
| 2579 | /* swap file is migrating in aligned write mode */ |
| 2580 | if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) |
| 2581 | return false; |
| 2582 | |
| 2583 | if (f2fs_is_pinned_file(inode)) |
| 2584 | return true; |
| 2585 | |
| 2586 | /* if this is cold file, we should overwrite to avoid fragmentation */ |
| 2587 | if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE)) |
| 2588 | return true; |
| 2589 | |
| 2590 | return check_inplace_update_policy(inode, fio); |
| 2591 | } |
| 2592 | |
| 2593 | bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio) |
| 2594 | { |
| 2595 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 2596 | |
| 2597 | /* The below cases were checked when setting it. */ |
| 2598 | if (f2fs_is_pinned_file(inode)) |
| 2599 | return false; |
| 2600 | if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK)) |
| 2601 | return true; |
| 2602 | if (f2fs_lfs_mode(sbi)) |
| 2603 | return true; |
| 2604 | if (S_ISDIR(inode->i_mode)) |
| 2605 | return true; |
| 2606 | if (IS_NOQUOTA(inode)) |
| 2607 | return true; |
| 2608 | if (f2fs_used_in_atomic_write(inode)) |
| 2609 | return true; |
| 2610 | /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */ |
| 2611 | if (f2fs_compressed_file(inode) && |
| 2612 | F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER && |
| 2613 | is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) |
| 2614 | return true; |
| 2615 | |
| 2616 | /* swap file is migrating in aligned write mode */ |
| 2617 | if (is_inode_flag_set(inode, FI_ALIGNED_WRITE)) |
| 2618 | return true; |
| 2619 | |
| 2620 | if (is_inode_flag_set(inode, FI_OPU_WRITE)) |
| 2621 | return true; |
| 2622 | |
| 2623 | if (fio) { |
| 2624 | if (page_private_gcing(fio->page)) |
| 2625 | return true; |
| 2626 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && |
| 2627 | f2fs_is_checkpointed_data(sbi, fio->old_blkaddr))) |
| 2628 | return true; |
| 2629 | } |
| 2630 | return false; |
| 2631 | } |
| 2632 | |
| 2633 | static inline bool need_inplace_update(struct f2fs_io_info *fio) |
| 2634 | { |
| 2635 | struct inode *inode = fio_inode(fio); |
| 2636 | |
| 2637 | if (f2fs_should_update_outplace(inode, fio)) |
| 2638 | return false; |
| 2639 | |
| 2640 | return f2fs_should_update_inplace(inode, fio); |
| 2641 | } |
| 2642 | |
| 2643 | int f2fs_do_write_data_page(struct f2fs_io_info *fio) |
| 2644 | { |
| 2645 | struct folio *folio = page_folio(fio->page); |
| 2646 | struct inode *inode = folio->mapping->host; |
| 2647 | struct dnode_of_data dn; |
| 2648 | struct node_info ni; |
| 2649 | bool ipu_force = false; |
| 2650 | bool atomic_commit; |
| 2651 | int err = 0; |
| 2652 | |
| 2653 | /* Use COW inode to make dnode_of_data for atomic write */ |
| 2654 | atomic_commit = f2fs_is_atomic_file(inode) && |
| 2655 | page_private_atomic(folio_page(folio, 0)); |
| 2656 | if (atomic_commit) |
| 2657 | set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0); |
| 2658 | else |
| 2659 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 2660 | |
| 2661 | if (need_inplace_update(fio) && |
| 2662 | f2fs_lookup_read_extent_cache_block(inode, folio->index, |
| 2663 | &fio->old_blkaddr)) { |
| 2664 | if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, |
| 2665 | DATA_GENERIC_ENHANCE)) |
| 2666 | return -EFSCORRUPTED; |
| 2667 | |
| 2668 | ipu_force = true; |
| 2669 | fio->need_lock = LOCK_DONE; |
| 2670 | goto got_it; |
| 2671 | } |
| 2672 | |
| 2673 | /* Deadlock due to between page->lock and f2fs_lock_op */ |
| 2674 | if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi)) |
| 2675 | return -EAGAIN; |
| 2676 | |
| 2677 | err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE); |
| 2678 | if (err) |
| 2679 | goto out; |
| 2680 | |
| 2681 | fio->old_blkaddr = dn.data_blkaddr; |
| 2682 | |
| 2683 | /* This page is already truncated */ |
| 2684 | if (fio->old_blkaddr == NULL_ADDR) { |
| 2685 | folio_clear_uptodate(folio); |
| 2686 | clear_page_private_gcing(folio_page(folio, 0)); |
| 2687 | goto out_writepage; |
| 2688 | } |
| 2689 | got_it: |
| 2690 | if (__is_valid_data_blkaddr(fio->old_blkaddr) && |
| 2691 | !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, |
| 2692 | DATA_GENERIC_ENHANCE)) { |
| 2693 | err = -EFSCORRUPTED; |
| 2694 | goto out_writepage; |
| 2695 | } |
| 2696 | |
| 2697 | /* wait for GCed page writeback via META_MAPPING */ |
| 2698 | if (fio->meta_gc) |
| 2699 | f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); |
| 2700 | |
| 2701 | /* |
| 2702 | * If current allocation needs SSR, |
| 2703 | * it had better in-place writes for updated data. |
| 2704 | */ |
| 2705 | if (ipu_force || |
| 2706 | (__is_valid_data_blkaddr(fio->old_blkaddr) && |
| 2707 | need_inplace_update(fio))) { |
| 2708 | err = f2fs_encrypt_one_page(fio); |
| 2709 | if (err) |
| 2710 | goto out_writepage; |
| 2711 | |
| 2712 | folio_start_writeback(folio); |
| 2713 | f2fs_put_dnode(&dn); |
| 2714 | if (fio->need_lock == LOCK_REQ) |
| 2715 | f2fs_unlock_op(fio->sbi); |
| 2716 | err = f2fs_inplace_write_data(fio); |
| 2717 | if (err) { |
| 2718 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) |
| 2719 | fscrypt_finalize_bounce_page(&fio->encrypted_page); |
| 2720 | folio_end_writeback(folio); |
| 2721 | } else { |
| 2722 | set_inode_flag(inode, FI_UPDATE_WRITE); |
| 2723 | } |
| 2724 | trace_f2fs_do_write_data_page(folio, IPU); |
| 2725 | return err; |
| 2726 | } |
| 2727 | |
| 2728 | if (fio->need_lock == LOCK_RETRY) { |
| 2729 | if (!f2fs_trylock_op(fio->sbi)) { |
| 2730 | err = -EAGAIN; |
| 2731 | goto out_writepage; |
| 2732 | } |
| 2733 | fio->need_lock = LOCK_REQ; |
| 2734 | } |
| 2735 | |
| 2736 | err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false); |
| 2737 | if (err) |
| 2738 | goto out_writepage; |
| 2739 | |
| 2740 | fio->version = ni.version; |
| 2741 | |
| 2742 | err = f2fs_encrypt_one_page(fio); |
| 2743 | if (err) |
| 2744 | goto out_writepage; |
| 2745 | |
| 2746 | folio_start_writeback(folio); |
| 2747 | |
| 2748 | if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR) |
| 2749 | f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false); |
| 2750 | |
| 2751 | /* LFS mode write path */ |
| 2752 | f2fs_outplace_write_data(&dn, fio); |
| 2753 | trace_f2fs_do_write_data_page(folio, OPU); |
| 2754 | set_inode_flag(inode, FI_APPEND_WRITE); |
| 2755 | if (atomic_commit) |
| 2756 | clear_page_private_atomic(folio_page(folio, 0)); |
| 2757 | out_writepage: |
| 2758 | f2fs_put_dnode(&dn); |
| 2759 | out: |
| 2760 | if (fio->need_lock == LOCK_REQ) |
| 2761 | f2fs_unlock_op(fio->sbi); |
| 2762 | return err; |
| 2763 | } |
| 2764 | |
| 2765 | int f2fs_write_single_data_page(struct folio *folio, int *submitted, |
| 2766 | struct bio **bio, |
| 2767 | sector_t *last_block, |
| 2768 | struct writeback_control *wbc, |
| 2769 | enum iostat_type io_type, |
| 2770 | int compr_blocks, |
| 2771 | bool allow_balance) |
| 2772 | { |
| 2773 | struct inode *inode = folio->mapping->host; |
| 2774 | struct page *page = folio_page(folio, 0); |
| 2775 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 2776 | loff_t i_size = i_size_read(inode); |
| 2777 | const pgoff_t end_index = ((unsigned long long)i_size) |
| 2778 | >> PAGE_SHIFT; |
| 2779 | loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT; |
| 2780 | unsigned offset = 0; |
| 2781 | bool need_balance_fs = false; |
| 2782 | bool quota_inode = IS_NOQUOTA(inode); |
| 2783 | int err = 0; |
| 2784 | struct f2fs_io_info fio = { |
| 2785 | .sbi = sbi, |
| 2786 | .ino = inode->i_ino, |
| 2787 | .type = DATA, |
| 2788 | .op = REQ_OP_WRITE, |
| 2789 | .op_flags = wbc_to_write_flags(wbc), |
| 2790 | .old_blkaddr = NULL_ADDR, |
| 2791 | .page = page, |
| 2792 | .encrypted_page = NULL, |
| 2793 | .submitted = 0, |
| 2794 | .compr_blocks = compr_blocks, |
| 2795 | .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY, |
| 2796 | .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0, |
| 2797 | .io_type = io_type, |
| 2798 | .io_wbc = wbc, |
| 2799 | .bio = bio, |
| 2800 | .last_block = last_block, |
| 2801 | }; |
| 2802 | |
| 2803 | trace_f2fs_writepage(folio, DATA); |
| 2804 | |
| 2805 | /* we should bypass data pages to proceed the kworker jobs */ |
| 2806 | if (unlikely(f2fs_cp_error(sbi))) { |
| 2807 | mapping_set_error(folio->mapping, -EIO); |
| 2808 | /* |
| 2809 | * don't drop any dirty dentry pages for keeping lastest |
| 2810 | * directory structure. |
| 2811 | */ |
| 2812 | if (S_ISDIR(inode->i_mode) && |
| 2813 | !is_sbi_flag_set(sbi, SBI_IS_CLOSE)) |
| 2814 | goto redirty_out; |
| 2815 | |
| 2816 | /* keep data pages in remount-ro mode */ |
| 2817 | if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) |
| 2818 | goto redirty_out; |
| 2819 | goto out; |
| 2820 | } |
| 2821 | |
| 2822 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
| 2823 | goto redirty_out; |
| 2824 | |
| 2825 | if (folio->index < end_index || |
| 2826 | f2fs_verity_in_progress(inode) || |
| 2827 | compr_blocks) |
| 2828 | goto write; |
| 2829 | |
| 2830 | /* |
| 2831 | * If the offset is out-of-range of file size, |
| 2832 | * this page does not have to be written to disk. |
| 2833 | */ |
| 2834 | offset = i_size & (PAGE_SIZE - 1); |
| 2835 | if ((folio->index >= end_index + 1) || !offset) |
| 2836 | goto out; |
| 2837 | |
| 2838 | folio_zero_segment(folio, offset, folio_size(folio)); |
| 2839 | write: |
| 2840 | /* Dentry/quota blocks are controlled by checkpoint */ |
| 2841 | if (S_ISDIR(inode->i_mode) || quota_inode) { |
| 2842 | /* |
| 2843 | * We need to wait for node_write to avoid block allocation during |
| 2844 | * checkpoint. This can only happen to quota writes which can cause |
| 2845 | * the below discard race condition. |
| 2846 | */ |
| 2847 | if (quota_inode) |
| 2848 | f2fs_down_read(&sbi->node_write); |
| 2849 | |
| 2850 | fio.need_lock = LOCK_DONE; |
| 2851 | err = f2fs_do_write_data_page(&fio); |
| 2852 | |
| 2853 | if (quota_inode) |
| 2854 | f2fs_up_read(&sbi->node_write); |
| 2855 | |
| 2856 | goto done; |
| 2857 | } |
| 2858 | |
| 2859 | need_balance_fs = true; |
| 2860 | err = -EAGAIN; |
| 2861 | if (f2fs_has_inline_data(inode)) { |
| 2862 | err = f2fs_write_inline_data(inode, folio); |
| 2863 | if (!err) |
| 2864 | goto out; |
| 2865 | } |
| 2866 | |
| 2867 | if (err == -EAGAIN) { |
| 2868 | err = f2fs_do_write_data_page(&fio); |
| 2869 | if (err == -EAGAIN) { |
| 2870 | f2fs_bug_on(sbi, compr_blocks); |
| 2871 | fio.need_lock = LOCK_REQ; |
| 2872 | err = f2fs_do_write_data_page(&fio); |
| 2873 | } |
| 2874 | } |
| 2875 | |
| 2876 | if (err) { |
| 2877 | file_set_keep_isize(inode); |
| 2878 | } else { |
| 2879 | spin_lock(&F2FS_I(inode)->i_size_lock); |
| 2880 | if (F2FS_I(inode)->last_disk_size < psize) |
| 2881 | F2FS_I(inode)->last_disk_size = psize; |
| 2882 | spin_unlock(&F2FS_I(inode)->i_size_lock); |
| 2883 | } |
| 2884 | |
| 2885 | done: |
| 2886 | if (err && err != -ENOENT) |
| 2887 | goto redirty_out; |
| 2888 | |
| 2889 | out: |
| 2890 | inode_dec_dirty_pages(inode); |
| 2891 | if (err) { |
| 2892 | folio_clear_uptodate(folio); |
| 2893 | clear_page_private_gcing(page); |
| 2894 | } |
| 2895 | folio_unlock(folio); |
| 2896 | if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && |
| 2897 | !F2FS_I(inode)->wb_task && allow_balance) |
| 2898 | f2fs_balance_fs(sbi, need_balance_fs); |
| 2899 | |
| 2900 | if (unlikely(f2fs_cp_error(sbi))) { |
| 2901 | f2fs_submit_merged_write(sbi, DATA); |
| 2902 | if (bio && *bio) |
| 2903 | f2fs_submit_merged_ipu_write(sbi, bio, NULL); |
| 2904 | submitted = NULL; |
| 2905 | } |
| 2906 | |
| 2907 | if (submitted) |
| 2908 | *submitted = fio.submitted; |
| 2909 | |
| 2910 | return 0; |
| 2911 | |
| 2912 | redirty_out: |
| 2913 | folio_redirty_for_writepage(wbc, folio); |
| 2914 | /* |
| 2915 | * pageout() in MM translates EAGAIN, so calls handle_write_error() |
| 2916 | * -> mapping_set_error() -> set_bit(AS_EIO, ...). |
| 2917 | * file_write_and_wait_range() will see EIO error, which is critical |
| 2918 | * to return value of fsync() followed by atomic_write failure to user. |
| 2919 | */ |
| 2920 | folio_unlock(folio); |
| 2921 | if (!err) |
| 2922 | return 1; |
| 2923 | return err; |
| 2924 | } |
| 2925 | |
| 2926 | /* |
| 2927 | * This function was copied from write_cache_pages from mm/page-writeback.c. |
| 2928 | * The major change is making write step of cold data page separately from |
| 2929 | * warm/hot data page. |
| 2930 | */ |
| 2931 | static int f2fs_write_cache_pages(struct address_space *mapping, |
| 2932 | struct writeback_control *wbc, |
| 2933 | enum iostat_type io_type) |
| 2934 | { |
| 2935 | int ret = 0; |
| 2936 | int done = 0, retry = 0; |
| 2937 | struct page *pages_local[F2FS_ONSTACK_PAGES]; |
| 2938 | struct page **pages = pages_local; |
| 2939 | struct folio_batch fbatch; |
| 2940 | struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); |
| 2941 | struct bio *bio = NULL; |
| 2942 | sector_t last_block; |
| 2943 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2944 | struct inode *inode = mapping->host; |
| 2945 | struct compress_ctx cc = { |
| 2946 | .inode = inode, |
| 2947 | .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, |
| 2948 | .cluster_size = F2FS_I(inode)->i_cluster_size, |
| 2949 | .cluster_idx = NULL_CLUSTER, |
| 2950 | .rpages = NULL, |
| 2951 | .nr_rpages = 0, |
| 2952 | .cpages = NULL, |
| 2953 | .valid_nr_cpages = 0, |
| 2954 | .rbuf = NULL, |
| 2955 | .cbuf = NULL, |
| 2956 | .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size, |
| 2957 | .private = NULL, |
| 2958 | }; |
| 2959 | #endif |
| 2960 | int nr_folios, p, idx; |
| 2961 | int nr_pages; |
| 2962 | unsigned int max_pages = F2FS_ONSTACK_PAGES; |
| 2963 | pgoff_t index; |
| 2964 | pgoff_t end; /* Inclusive */ |
| 2965 | pgoff_t done_index; |
| 2966 | int range_whole = 0; |
| 2967 | xa_mark_t tag; |
| 2968 | int nwritten = 0; |
| 2969 | int submitted = 0; |
| 2970 | int i; |
| 2971 | |
| 2972 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 2973 | if (f2fs_compressed_file(inode) && |
| 2974 | 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) { |
| 2975 | pages = f2fs_kzalloc(sbi, sizeof(struct page *) << |
| 2976 | cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL); |
| 2977 | max_pages = 1 << cc.log_cluster_size; |
| 2978 | } |
| 2979 | #endif |
| 2980 | |
| 2981 | folio_batch_init(&fbatch); |
| 2982 | |
| 2983 | if (get_dirty_pages(mapping->host) <= |
| 2984 | SM_I(F2FS_M_SB(mapping))->min_hot_blocks) |
| 2985 | set_inode_flag(mapping->host, FI_HOT_DATA); |
| 2986 | else |
| 2987 | clear_inode_flag(mapping->host, FI_HOT_DATA); |
| 2988 | |
| 2989 | if (wbc->range_cyclic) { |
| 2990 | index = mapping->writeback_index; /* prev offset */ |
| 2991 | end = -1; |
| 2992 | } else { |
| 2993 | index = wbc->range_start >> PAGE_SHIFT; |
| 2994 | end = wbc->range_end >> PAGE_SHIFT; |
| 2995 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
| 2996 | range_whole = 1; |
| 2997 | } |
| 2998 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
| 2999 | tag = PAGECACHE_TAG_TOWRITE; |
| 3000 | else |
| 3001 | tag = PAGECACHE_TAG_DIRTY; |
| 3002 | retry: |
| 3003 | retry = 0; |
| 3004 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
| 3005 | tag_pages_for_writeback(mapping, index, end); |
| 3006 | done_index = index; |
| 3007 | while (!done && !retry && (index <= end)) { |
| 3008 | nr_pages = 0; |
| 3009 | again: |
| 3010 | nr_folios = filemap_get_folios_tag(mapping, &index, end, |
| 3011 | tag, &fbatch); |
| 3012 | if (nr_folios == 0) { |
| 3013 | if (nr_pages) |
| 3014 | goto write; |
| 3015 | break; |
| 3016 | } |
| 3017 | |
| 3018 | for (i = 0; i < nr_folios; i++) { |
| 3019 | struct folio *folio = fbatch.folios[i]; |
| 3020 | |
| 3021 | idx = 0; |
| 3022 | p = folio_nr_pages(folio); |
| 3023 | add_more: |
| 3024 | pages[nr_pages] = folio_page(folio, idx); |
| 3025 | folio_get(folio); |
| 3026 | if (++nr_pages == max_pages) { |
| 3027 | index = folio->index + idx + 1; |
| 3028 | folio_batch_release(&fbatch); |
| 3029 | goto write; |
| 3030 | } |
| 3031 | if (++idx < p) |
| 3032 | goto add_more; |
| 3033 | } |
| 3034 | folio_batch_release(&fbatch); |
| 3035 | goto again; |
| 3036 | write: |
| 3037 | for (i = 0; i < nr_pages; i++) { |
| 3038 | struct page *page = pages[i]; |
| 3039 | struct folio *folio = page_folio(page); |
| 3040 | bool need_readd; |
| 3041 | readd: |
| 3042 | need_readd = false; |
| 3043 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3044 | if (f2fs_compressed_file(inode)) { |
| 3045 | void *fsdata = NULL; |
| 3046 | struct page *pagep; |
| 3047 | int ret2; |
| 3048 | |
| 3049 | ret = f2fs_init_compress_ctx(&cc); |
| 3050 | if (ret) { |
| 3051 | done = 1; |
| 3052 | break; |
| 3053 | } |
| 3054 | |
| 3055 | if (!f2fs_cluster_can_merge_page(&cc, |
| 3056 | folio->index)) { |
| 3057 | ret = f2fs_write_multi_pages(&cc, |
| 3058 | &submitted, wbc, io_type); |
| 3059 | if (!ret) |
| 3060 | need_readd = true; |
| 3061 | goto result; |
| 3062 | } |
| 3063 | |
| 3064 | if (unlikely(f2fs_cp_error(sbi))) |
| 3065 | goto lock_folio; |
| 3066 | |
| 3067 | if (!f2fs_cluster_is_empty(&cc)) |
| 3068 | goto lock_folio; |
| 3069 | |
| 3070 | if (f2fs_all_cluster_page_ready(&cc, |
| 3071 | pages, i, nr_pages, true)) |
| 3072 | goto lock_folio; |
| 3073 | |
| 3074 | ret2 = f2fs_prepare_compress_overwrite( |
| 3075 | inode, &pagep, |
| 3076 | folio->index, &fsdata); |
| 3077 | if (ret2 < 0) { |
| 3078 | ret = ret2; |
| 3079 | done = 1; |
| 3080 | break; |
| 3081 | } else if (ret2 && |
| 3082 | (!f2fs_compress_write_end(inode, |
| 3083 | fsdata, folio->index, 1) || |
| 3084 | !f2fs_all_cluster_page_ready(&cc, |
| 3085 | pages, i, nr_pages, |
| 3086 | false))) { |
| 3087 | retry = 1; |
| 3088 | break; |
| 3089 | } |
| 3090 | } |
| 3091 | #endif |
| 3092 | /* give a priority to WB_SYNC threads */ |
| 3093 | if (atomic_read(&sbi->wb_sync_req[DATA]) && |
| 3094 | wbc->sync_mode == WB_SYNC_NONE) { |
| 3095 | done = 1; |
| 3096 | break; |
| 3097 | } |
| 3098 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3099 | lock_folio: |
| 3100 | #endif |
| 3101 | done_index = folio->index; |
| 3102 | retry_write: |
| 3103 | folio_lock(folio); |
| 3104 | |
| 3105 | if (unlikely(folio->mapping != mapping)) { |
| 3106 | continue_unlock: |
| 3107 | folio_unlock(folio); |
| 3108 | continue; |
| 3109 | } |
| 3110 | |
| 3111 | if (!folio_test_dirty(folio)) { |
| 3112 | /* someone wrote it for us */ |
| 3113 | goto continue_unlock; |
| 3114 | } |
| 3115 | |
| 3116 | if (folio_test_writeback(folio)) { |
| 3117 | if (wbc->sync_mode == WB_SYNC_NONE) |
| 3118 | goto continue_unlock; |
| 3119 | f2fs_folio_wait_writeback(folio, DATA, true, true); |
| 3120 | } |
| 3121 | |
| 3122 | if (!folio_clear_dirty_for_io(folio)) |
| 3123 | goto continue_unlock; |
| 3124 | |
| 3125 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3126 | if (f2fs_compressed_file(inode)) { |
| 3127 | folio_get(folio); |
| 3128 | f2fs_compress_ctx_add_page(&cc, folio); |
| 3129 | continue; |
| 3130 | } |
| 3131 | #endif |
| 3132 | submitted = 0; |
| 3133 | ret = f2fs_write_single_data_page(folio, |
| 3134 | &submitted, &bio, &last_block, |
| 3135 | wbc, io_type, 0, true); |
| 3136 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3137 | result: |
| 3138 | #endif |
| 3139 | nwritten += submitted; |
| 3140 | wbc->nr_to_write -= submitted; |
| 3141 | |
| 3142 | if (unlikely(ret)) { |
| 3143 | /* |
| 3144 | * keep nr_to_write, since vfs uses this to |
| 3145 | * get # of written pages. |
| 3146 | */ |
| 3147 | if (ret == 1) { |
| 3148 | ret = 0; |
| 3149 | goto next; |
| 3150 | } else if (ret == -EAGAIN) { |
| 3151 | ret = 0; |
| 3152 | if (wbc->sync_mode == WB_SYNC_ALL) { |
| 3153 | f2fs_io_schedule_timeout( |
| 3154 | DEFAULT_IO_TIMEOUT); |
| 3155 | goto retry_write; |
| 3156 | } |
| 3157 | goto next; |
| 3158 | } |
| 3159 | done_index = folio_next_index(folio); |
| 3160 | done = 1; |
| 3161 | break; |
| 3162 | } |
| 3163 | |
| 3164 | if (wbc->nr_to_write <= 0 && |
| 3165 | wbc->sync_mode == WB_SYNC_NONE) { |
| 3166 | done = 1; |
| 3167 | break; |
| 3168 | } |
| 3169 | next: |
| 3170 | if (need_readd) |
| 3171 | goto readd; |
| 3172 | } |
| 3173 | release_pages(pages, nr_pages); |
| 3174 | cond_resched(); |
| 3175 | } |
| 3176 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3177 | /* flush remained pages in compress cluster */ |
| 3178 | if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) { |
| 3179 | ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type); |
| 3180 | nwritten += submitted; |
| 3181 | wbc->nr_to_write -= submitted; |
| 3182 | if (ret) { |
| 3183 | done = 1; |
| 3184 | retry = 0; |
| 3185 | } |
| 3186 | } |
| 3187 | if (f2fs_compressed_file(inode)) |
| 3188 | f2fs_destroy_compress_ctx(&cc, false); |
| 3189 | #endif |
| 3190 | if (retry) { |
| 3191 | index = 0; |
| 3192 | end = -1; |
| 3193 | goto retry; |
| 3194 | } |
| 3195 | if (wbc->range_cyclic && !done) |
| 3196 | done_index = 0; |
| 3197 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
| 3198 | mapping->writeback_index = done_index; |
| 3199 | |
| 3200 | if (nwritten) |
| 3201 | f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, |
| 3202 | NULL, 0, DATA); |
| 3203 | /* submit cached bio of IPU write */ |
| 3204 | if (bio) |
| 3205 | f2fs_submit_merged_ipu_write(sbi, &bio, NULL); |
| 3206 | |
| 3207 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3208 | if (pages != pages_local) |
| 3209 | kfree(pages); |
| 3210 | #endif |
| 3211 | |
| 3212 | return ret; |
| 3213 | } |
| 3214 | |
| 3215 | static inline bool __should_serialize_io(struct inode *inode, |
| 3216 | struct writeback_control *wbc) |
| 3217 | { |
| 3218 | /* to avoid deadlock in path of data flush */ |
| 3219 | if (F2FS_I(inode)->wb_task) |
| 3220 | return false; |
| 3221 | |
| 3222 | if (!S_ISREG(inode->i_mode)) |
| 3223 | return false; |
| 3224 | if (IS_NOQUOTA(inode)) |
| 3225 | return false; |
| 3226 | |
| 3227 | if (f2fs_need_compress_data(inode)) |
| 3228 | return true; |
| 3229 | if (wbc->sync_mode != WB_SYNC_ALL) |
| 3230 | return true; |
| 3231 | if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) |
| 3232 | return true; |
| 3233 | return false; |
| 3234 | } |
| 3235 | |
| 3236 | static int __f2fs_write_data_pages(struct address_space *mapping, |
| 3237 | struct writeback_control *wbc, |
| 3238 | enum iostat_type io_type) |
| 3239 | { |
| 3240 | struct inode *inode = mapping->host; |
| 3241 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 3242 | struct blk_plug plug; |
| 3243 | int ret; |
| 3244 | bool locked = false; |
| 3245 | |
| 3246 | /* skip writing if there is no dirty page in this inode */ |
| 3247 | if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) |
| 3248 | return 0; |
| 3249 | |
| 3250 | /* during POR, we don't need to trigger writepage at all. */ |
| 3251 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
| 3252 | goto skip_write; |
| 3253 | |
| 3254 | if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) && |
| 3255 | wbc->sync_mode == WB_SYNC_NONE && |
| 3256 | get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && |
| 3257 | f2fs_available_free_memory(sbi, DIRTY_DENTS)) |
| 3258 | goto skip_write; |
| 3259 | |
| 3260 | /* skip writing in file defragment preparing stage */ |
| 3261 | if (is_inode_flag_set(inode, FI_SKIP_WRITES)) |
| 3262 | goto skip_write; |
| 3263 | |
| 3264 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
| 3265 | |
| 3266 | /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */ |
| 3267 | if (wbc->sync_mode == WB_SYNC_ALL) |
| 3268 | atomic_inc(&sbi->wb_sync_req[DATA]); |
| 3269 | else if (atomic_read(&sbi->wb_sync_req[DATA])) { |
| 3270 | /* to avoid potential deadlock */ |
| 3271 | if (current->plug) |
| 3272 | blk_finish_plug(current->plug); |
| 3273 | goto skip_write; |
| 3274 | } |
| 3275 | |
| 3276 | if (__should_serialize_io(inode, wbc)) { |
| 3277 | mutex_lock(&sbi->writepages); |
| 3278 | locked = true; |
| 3279 | } |
| 3280 | |
| 3281 | blk_start_plug(&plug); |
| 3282 | ret = f2fs_write_cache_pages(mapping, wbc, io_type); |
| 3283 | blk_finish_plug(&plug); |
| 3284 | |
| 3285 | if (locked) |
| 3286 | mutex_unlock(&sbi->writepages); |
| 3287 | |
| 3288 | if (wbc->sync_mode == WB_SYNC_ALL) |
| 3289 | atomic_dec(&sbi->wb_sync_req[DATA]); |
| 3290 | /* |
| 3291 | * if some pages were truncated, we cannot guarantee its mapping->host |
| 3292 | * to detect pending bios. |
| 3293 | */ |
| 3294 | |
| 3295 | f2fs_remove_dirty_inode(inode); |
| 3296 | return ret; |
| 3297 | |
| 3298 | skip_write: |
| 3299 | wbc->pages_skipped += get_dirty_pages(inode); |
| 3300 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
| 3301 | return 0; |
| 3302 | } |
| 3303 | |
| 3304 | static int f2fs_write_data_pages(struct address_space *mapping, |
| 3305 | struct writeback_control *wbc) |
| 3306 | { |
| 3307 | struct inode *inode = mapping->host; |
| 3308 | |
| 3309 | return __f2fs_write_data_pages(mapping, wbc, |
| 3310 | F2FS_I(inode)->cp_task == current ? |
| 3311 | FS_CP_DATA_IO : FS_DATA_IO); |
| 3312 | } |
| 3313 | |
| 3314 | void f2fs_write_failed(struct inode *inode, loff_t to) |
| 3315 | { |
| 3316 | loff_t i_size = i_size_read(inode); |
| 3317 | |
| 3318 | if (IS_NOQUOTA(inode)) |
| 3319 | return; |
| 3320 | |
| 3321 | /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ |
| 3322 | if (to > i_size && !f2fs_verity_in_progress(inode)) { |
| 3323 | f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
| 3324 | filemap_invalidate_lock(inode->i_mapping); |
| 3325 | |
| 3326 | truncate_pagecache(inode, i_size); |
| 3327 | f2fs_truncate_blocks(inode, i_size, true); |
| 3328 | |
| 3329 | filemap_invalidate_unlock(inode->i_mapping); |
| 3330 | f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
| 3331 | } |
| 3332 | } |
| 3333 | |
| 3334 | static int prepare_write_begin(struct f2fs_sb_info *sbi, |
| 3335 | struct folio *folio, loff_t pos, unsigned int len, |
| 3336 | block_t *blk_addr, bool *node_changed) |
| 3337 | { |
| 3338 | struct inode *inode = folio->mapping->host; |
| 3339 | pgoff_t index = folio->index; |
| 3340 | struct dnode_of_data dn; |
| 3341 | struct folio *ifolio; |
| 3342 | bool locked = false; |
| 3343 | int flag = F2FS_GET_BLOCK_PRE_AIO; |
| 3344 | int err = 0; |
| 3345 | |
| 3346 | /* |
| 3347 | * If a whole page is being written and we already preallocated all the |
| 3348 | * blocks, then there is no need to get a block address now. |
| 3349 | */ |
| 3350 | if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL)) |
| 3351 | return 0; |
| 3352 | |
| 3353 | /* f2fs_lock_op avoids race between write CP and convert_inline_page */ |
| 3354 | if (f2fs_has_inline_data(inode)) { |
| 3355 | if (pos + len > MAX_INLINE_DATA(inode)) |
| 3356 | flag = F2FS_GET_BLOCK_DEFAULT; |
| 3357 | f2fs_map_lock(sbi, flag); |
| 3358 | locked = true; |
| 3359 | } else if ((pos & PAGE_MASK) >= i_size_read(inode)) { |
| 3360 | f2fs_map_lock(sbi, flag); |
| 3361 | locked = true; |
| 3362 | } |
| 3363 | |
| 3364 | restart: |
| 3365 | /* check inline_data */ |
| 3366 | ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); |
| 3367 | if (IS_ERR(ifolio)) { |
| 3368 | err = PTR_ERR(ifolio); |
| 3369 | goto unlock_out; |
| 3370 | } |
| 3371 | |
| 3372 | set_new_dnode(&dn, inode, ifolio, ifolio, 0); |
| 3373 | |
| 3374 | if (f2fs_has_inline_data(inode)) { |
| 3375 | if (pos + len <= MAX_INLINE_DATA(inode)) { |
| 3376 | f2fs_do_read_inline_data(folio, ifolio); |
| 3377 | set_inode_flag(inode, FI_DATA_EXIST); |
| 3378 | if (inode->i_nlink) |
| 3379 | set_page_private_inline(&ifolio->page); |
| 3380 | goto out; |
| 3381 | } |
| 3382 | err = f2fs_convert_inline_folio(&dn, folio); |
| 3383 | if (err || dn.data_blkaddr != NULL_ADDR) |
| 3384 | goto out; |
| 3385 | } |
| 3386 | |
| 3387 | if (!f2fs_lookup_read_extent_cache_block(inode, index, |
| 3388 | &dn.data_blkaddr)) { |
| 3389 | if (IS_DEVICE_ALIASING(inode)) { |
| 3390 | err = -ENODATA; |
| 3391 | goto out; |
| 3392 | } |
| 3393 | |
| 3394 | if (locked) { |
| 3395 | err = f2fs_reserve_block(&dn, index); |
| 3396 | goto out; |
| 3397 | } |
| 3398 | |
| 3399 | /* hole case */ |
| 3400 | err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); |
| 3401 | if (!err && dn.data_blkaddr != NULL_ADDR) |
| 3402 | goto out; |
| 3403 | f2fs_put_dnode(&dn); |
| 3404 | f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); |
| 3405 | WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO); |
| 3406 | locked = true; |
| 3407 | goto restart; |
| 3408 | } |
| 3409 | out: |
| 3410 | if (!err) { |
| 3411 | /* convert_inline_page can make node_changed */ |
| 3412 | *blk_addr = dn.data_blkaddr; |
| 3413 | *node_changed = dn.node_changed; |
| 3414 | } |
| 3415 | f2fs_put_dnode(&dn); |
| 3416 | unlock_out: |
| 3417 | if (locked) |
| 3418 | f2fs_map_unlock(sbi, flag); |
| 3419 | return err; |
| 3420 | } |
| 3421 | |
| 3422 | static int __find_data_block(struct inode *inode, pgoff_t index, |
| 3423 | block_t *blk_addr) |
| 3424 | { |
| 3425 | struct dnode_of_data dn; |
| 3426 | struct folio *ifolio; |
| 3427 | int err = 0; |
| 3428 | |
| 3429 | ifolio = f2fs_get_inode_folio(F2FS_I_SB(inode), inode->i_ino); |
| 3430 | if (IS_ERR(ifolio)) |
| 3431 | return PTR_ERR(ifolio); |
| 3432 | |
| 3433 | set_new_dnode(&dn, inode, ifolio, ifolio, 0); |
| 3434 | |
| 3435 | if (!f2fs_lookup_read_extent_cache_block(inode, index, |
| 3436 | &dn.data_blkaddr)) { |
| 3437 | /* hole case */ |
| 3438 | err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); |
| 3439 | if (err) { |
| 3440 | dn.data_blkaddr = NULL_ADDR; |
| 3441 | err = 0; |
| 3442 | } |
| 3443 | } |
| 3444 | *blk_addr = dn.data_blkaddr; |
| 3445 | f2fs_put_dnode(&dn); |
| 3446 | return err; |
| 3447 | } |
| 3448 | |
| 3449 | static int __reserve_data_block(struct inode *inode, pgoff_t index, |
| 3450 | block_t *blk_addr, bool *node_changed) |
| 3451 | { |
| 3452 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 3453 | struct dnode_of_data dn; |
| 3454 | struct folio *ifolio; |
| 3455 | int err = 0; |
| 3456 | |
| 3457 | f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO); |
| 3458 | |
| 3459 | ifolio = f2fs_get_inode_folio(sbi, inode->i_ino); |
| 3460 | if (IS_ERR(ifolio)) { |
| 3461 | err = PTR_ERR(ifolio); |
| 3462 | goto unlock_out; |
| 3463 | } |
| 3464 | set_new_dnode(&dn, inode, ifolio, ifolio, 0); |
| 3465 | |
| 3466 | if (!f2fs_lookup_read_extent_cache_block(dn.inode, index, |
| 3467 | &dn.data_blkaddr)) |
| 3468 | err = f2fs_reserve_block(&dn, index); |
| 3469 | |
| 3470 | *blk_addr = dn.data_blkaddr; |
| 3471 | *node_changed = dn.node_changed; |
| 3472 | f2fs_put_dnode(&dn); |
| 3473 | |
| 3474 | unlock_out: |
| 3475 | f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO); |
| 3476 | return err; |
| 3477 | } |
| 3478 | |
| 3479 | static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi, |
| 3480 | struct folio *folio, loff_t pos, unsigned int len, |
| 3481 | block_t *blk_addr, bool *node_changed, bool *use_cow) |
| 3482 | { |
| 3483 | struct inode *inode = folio->mapping->host; |
| 3484 | struct inode *cow_inode = F2FS_I(inode)->cow_inode; |
| 3485 | pgoff_t index = folio->index; |
| 3486 | int err = 0; |
| 3487 | block_t ori_blk_addr = NULL_ADDR; |
| 3488 | |
| 3489 | /* If pos is beyond the end of file, reserve a new block in COW inode */ |
| 3490 | if ((pos & PAGE_MASK) >= i_size_read(inode)) |
| 3491 | goto reserve_block; |
| 3492 | |
| 3493 | /* Look for the block in COW inode first */ |
| 3494 | err = __find_data_block(cow_inode, index, blk_addr); |
| 3495 | if (err) { |
| 3496 | return err; |
| 3497 | } else if (*blk_addr != NULL_ADDR) { |
| 3498 | *use_cow = true; |
| 3499 | return 0; |
| 3500 | } |
| 3501 | |
| 3502 | if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE)) |
| 3503 | goto reserve_block; |
| 3504 | |
| 3505 | /* Look for the block in the original inode */ |
| 3506 | err = __find_data_block(inode, index, &ori_blk_addr); |
| 3507 | if (err) |
| 3508 | return err; |
| 3509 | |
| 3510 | reserve_block: |
| 3511 | /* Finally, we should reserve a new block in COW inode for the update */ |
| 3512 | err = __reserve_data_block(cow_inode, index, blk_addr, node_changed); |
| 3513 | if (err) |
| 3514 | return err; |
| 3515 | inc_atomic_write_cnt(inode); |
| 3516 | |
| 3517 | if (ori_blk_addr != NULL_ADDR) |
| 3518 | *blk_addr = ori_blk_addr; |
| 3519 | return 0; |
| 3520 | } |
| 3521 | |
| 3522 | static int f2fs_write_begin(struct file *file, struct address_space *mapping, |
| 3523 | loff_t pos, unsigned len, struct folio **foliop, void **fsdata) |
| 3524 | { |
| 3525 | struct inode *inode = mapping->host; |
| 3526 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 3527 | struct folio *folio; |
| 3528 | pgoff_t index = pos >> PAGE_SHIFT; |
| 3529 | bool need_balance = false; |
| 3530 | bool use_cow = false; |
| 3531 | block_t blkaddr = NULL_ADDR; |
| 3532 | int err = 0; |
| 3533 | |
| 3534 | trace_f2fs_write_begin(inode, pos, len); |
| 3535 | |
| 3536 | if (!f2fs_is_checkpoint_ready(sbi)) { |
| 3537 | err = -ENOSPC; |
| 3538 | goto fail; |
| 3539 | } |
| 3540 | |
| 3541 | /* |
| 3542 | * We should check this at this moment to avoid deadlock on inode page |
| 3543 | * and #0 page. The locking rule for inline_data conversion should be: |
| 3544 | * folio_lock(folio #0) -> folio_lock(inode_page) |
| 3545 | */ |
| 3546 | if (index != 0) { |
| 3547 | err = f2fs_convert_inline_inode(inode); |
| 3548 | if (err) |
| 3549 | goto fail; |
| 3550 | } |
| 3551 | |
| 3552 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3553 | if (f2fs_compressed_file(inode)) { |
| 3554 | int ret; |
| 3555 | struct page *page; |
| 3556 | |
| 3557 | *fsdata = NULL; |
| 3558 | |
| 3559 | if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode))) |
| 3560 | goto repeat; |
| 3561 | |
| 3562 | ret = f2fs_prepare_compress_overwrite(inode, &page, |
| 3563 | index, fsdata); |
| 3564 | if (ret < 0) { |
| 3565 | err = ret; |
| 3566 | goto fail; |
| 3567 | } else if (ret) { |
| 3568 | *foliop = page_folio(page); |
| 3569 | return 0; |
| 3570 | } |
| 3571 | } |
| 3572 | #endif |
| 3573 | |
| 3574 | repeat: |
| 3575 | /* |
| 3576 | * Do not use FGP_STABLE to avoid deadlock. |
| 3577 | * Will wait that below with our IO control. |
| 3578 | */ |
| 3579 | folio = __filemap_get_folio(mapping, index, |
| 3580 | FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS); |
| 3581 | if (IS_ERR(folio)) { |
| 3582 | err = PTR_ERR(folio); |
| 3583 | goto fail; |
| 3584 | } |
| 3585 | |
| 3586 | /* TODO: cluster can be compressed due to race with .writepage */ |
| 3587 | |
| 3588 | *foliop = folio; |
| 3589 | |
| 3590 | if (f2fs_is_atomic_file(inode)) |
| 3591 | err = prepare_atomic_write_begin(sbi, folio, pos, len, |
| 3592 | &blkaddr, &need_balance, &use_cow); |
| 3593 | else |
| 3594 | err = prepare_write_begin(sbi, folio, pos, len, |
| 3595 | &blkaddr, &need_balance); |
| 3596 | if (err) |
| 3597 | goto put_folio; |
| 3598 | |
| 3599 | if (need_balance && !IS_NOQUOTA(inode) && |
| 3600 | has_not_enough_free_secs(sbi, 0, 0)) { |
| 3601 | folio_unlock(folio); |
| 3602 | f2fs_balance_fs(sbi, true); |
| 3603 | folio_lock(folio); |
| 3604 | if (folio->mapping != mapping) { |
| 3605 | /* The folio got truncated from under us */ |
| 3606 | folio_unlock(folio); |
| 3607 | folio_put(folio); |
| 3608 | goto repeat; |
| 3609 | } |
| 3610 | } |
| 3611 | |
| 3612 | f2fs_folio_wait_writeback(folio, DATA, false, true); |
| 3613 | |
| 3614 | if (len == folio_size(folio) || folio_test_uptodate(folio)) |
| 3615 | return 0; |
| 3616 | |
| 3617 | if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) && |
| 3618 | !f2fs_verity_in_progress(inode)) { |
| 3619 | folio_zero_segment(folio, len, folio_size(folio)); |
| 3620 | return 0; |
| 3621 | } |
| 3622 | |
| 3623 | if (blkaddr == NEW_ADDR) { |
| 3624 | folio_zero_segment(folio, 0, folio_size(folio)); |
| 3625 | folio_mark_uptodate(folio); |
| 3626 | } else { |
| 3627 | if (!f2fs_is_valid_blkaddr(sbi, blkaddr, |
| 3628 | DATA_GENERIC_ENHANCE_READ)) { |
| 3629 | err = -EFSCORRUPTED; |
| 3630 | goto put_folio; |
| 3631 | } |
| 3632 | err = f2fs_submit_page_read(use_cow ? |
| 3633 | F2FS_I(inode)->cow_inode : inode, |
| 3634 | folio, blkaddr, 0, true); |
| 3635 | if (err) |
| 3636 | goto put_folio; |
| 3637 | |
| 3638 | folio_lock(folio); |
| 3639 | if (unlikely(folio->mapping != mapping)) { |
| 3640 | folio_unlock(folio); |
| 3641 | folio_put(folio); |
| 3642 | goto repeat; |
| 3643 | } |
| 3644 | if (unlikely(!folio_test_uptodate(folio))) { |
| 3645 | err = -EIO; |
| 3646 | goto put_folio; |
| 3647 | } |
| 3648 | } |
| 3649 | return 0; |
| 3650 | |
| 3651 | put_folio: |
| 3652 | folio_unlock(folio); |
| 3653 | folio_put(folio); |
| 3654 | fail: |
| 3655 | f2fs_write_failed(inode, pos + len); |
| 3656 | return err; |
| 3657 | } |
| 3658 | |
| 3659 | static int f2fs_write_end(struct file *file, |
| 3660 | struct address_space *mapping, |
| 3661 | loff_t pos, unsigned len, unsigned copied, |
| 3662 | struct folio *folio, void *fsdata) |
| 3663 | { |
| 3664 | struct inode *inode = folio->mapping->host; |
| 3665 | |
| 3666 | trace_f2fs_write_end(inode, pos, len, copied); |
| 3667 | |
| 3668 | /* |
| 3669 | * This should be come from len == PAGE_SIZE, and we expect copied |
| 3670 | * should be PAGE_SIZE. Otherwise, we treat it with zero copied and |
| 3671 | * let generic_perform_write() try to copy data again through copied=0. |
| 3672 | */ |
| 3673 | if (!folio_test_uptodate(folio)) { |
| 3674 | if (unlikely(copied != len)) |
| 3675 | copied = 0; |
| 3676 | else |
| 3677 | folio_mark_uptodate(folio); |
| 3678 | } |
| 3679 | |
| 3680 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3681 | /* overwrite compressed file */ |
| 3682 | if (f2fs_compressed_file(inode) && fsdata) { |
| 3683 | f2fs_compress_write_end(inode, fsdata, folio->index, copied); |
| 3684 | f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); |
| 3685 | |
| 3686 | if (pos + copied > i_size_read(inode) && |
| 3687 | !f2fs_verity_in_progress(inode)) |
| 3688 | f2fs_i_size_write(inode, pos + copied); |
| 3689 | return copied; |
| 3690 | } |
| 3691 | #endif |
| 3692 | |
| 3693 | if (!copied) |
| 3694 | goto unlock_out; |
| 3695 | |
| 3696 | folio_mark_dirty(folio); |
| 3697 | |
| 3698 | if (f2fs_is_atomic_file(inode)) |
| 3699 | set_page_private_atomic(folio_page(folio, 0)); |
| 3700 | |
| 3701 | if (pos + copied > i_size_read(inode) && |
| 3702 | !f2fs_verity_in_progress(inode)) { |
| 3703 | f2fs_i_size_write(inode, pos + copied); |
| 3704 | if (f2fs_is_atomic_file(inode)) |
| 3705 | f2fs_i_size_write(F2FS_I(inode)->cow_inode, |
| 3706 | pos + copied); |
| 3707 | } |
| 3708 | unlock_out: |
| 3709 | folio_unlock(folio); |
| 3710 | folio_put(folio); |
| 3711 | f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); |
| 3712 | return copied; |
| 3713 | } |
| 3714 | |
| 3715 | void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length) |
| 3716 | { |
| 3717 | struct inode *inode = folio->mapping->host; |
| 3718 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 3719 | |
| 3720 | if (inode->i_ino >= F2FS_ROOT_INO(sbi) && |
| 3721 | (offset || length != folio_size(folio))) |
| 3722 | return; |
| 3723 | |
| 3724 | if (folio_test_dirty(folio)) { |
| 3725 | if (inode->i_ino == F2FS_META_INO(sbi)) { |
| 3726 | dec_page_count(sbi, F2FS_DIRTY_META); |
| 3727 | } else if (inode->i_ino == F2FS_NODE_INO(sbi)) { |
| 3728 | dec_page_count(sbi, F2FS_DIRTY_NODES); |
| 3729 | } else { |
| 3730 | inode_dec_dirty_pages(inode); |
| 3731 | f2fs_remove_dirty_inode(inode); |
| 3732 | } |
| 3733 | } |
| 3734 | clear_page_private_all(&folio->page); |
| 3735 | } |
| 3736 | |
| 3737 | bool f2fs_release_folio(struct folio *folio, gfp_t wait) |
| 3738 | { |
| 3739 | /* If this is dirty folio, keep private data */ |
| 3740 | if (folio_test_dirty(folio)) |
| 3741 | return false; |
| 3742 | |
| 3743 | clear_page_private_all(&folio->page); |
| 3744 | return true; |
| 3745 | } |
| 3746 | |
| 3747 | static bool f2fs_dirty_data_folio(struct address_space *mapping, |
| 3748 | struct folio *folio) |
| 3749 | { |
| 3750 | struct inode *inode = mapping->host; |
| 3751 | |
| 3752 | trace_f2fs_set_page_dirty(folio, DATA); |
| 3753 | |
| 3754 | if (!folio_test_uptodate(folio)) |
| 3755 | folio_mark_uptodate(folio); |
| 3756 | BUG_ON(folio_test_swapcache(folio)); |
| 3757 | |
| 3758 | if (filemap_dirty_folio(mapping, folio)) { |
| 3759 | f2fs_update_dirty_folio(inode, folio); |
| 3760 | return true; |
| 3761 | } |
| 3762 | return false; |
| 3763 | } |
| 3764 | |
| 3765 | |
| 3766 | static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) |
| 3767 | { |
| 3768 | #ifdef CONFIG_F2FS_FS_COMPRESSION |
| 3769 | struct dnode_of_data dn; |
| 3770 | sector_t start_idx, blknr = 0; |
| 3771 | int ret; |
| 3772 | |
| 3773 | start_idx = round_down(block, F2FS_I(inode)->i_cluster_size); |
| 3774 | |
| 3775 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
| 3776 | ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); |
| 3777 | if (ret) |
| 3778 | return 0; |
| 3779 | |
| 3780 | if (dn.data_blkaddr != COMPRESS_ADDR) { |
| 3781 | dn.ofs_in_node += block - start_idx; |
| 3782 | blknr = f2fs_data_blkaddr(&dn); |
| 3783 | if (!__is_valid_data_blkaddr(blknr)) |
| 3784 | blknr = 0; |
| 3785 | } |
| 3786 | |
| 3787 | f2fs_put_dnode(&dn); |
| 3788 | return blknr; |
| 3789 | #else |
| 3790 | return 0; |
| 3791 | #endif |
| 3792 | } |
| 3793 | |
| 3794 | |
| 3795 | static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) |
| 3796 | { |
| 3797 | struct inode *inode = mapping->host; |
| 3798 | sector_t blknr = 0; |
| 3799 | |
| 3800 | if (f2fs_has_inline_data(inode)) |
| 3801 | goto out; |
| 3802 | |
| 3803 | /* make sure allocating whole blocks */ |
| 3804 | if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
| 3805 | filemap_write_and_wait(mapping); |
| 3806 | |
| 3807 | /* Block number less than F2FS MAX BLOCKS */ |
| 3808 | if (unlikely(block >= max_file_blocks(inode))) |
| 3809 | goto out; |
| 3810 | |
| 3811 | if (f2fs_compressed_file(inode)) { |
| 3812 | blknr = f2fs_bmap_compress(inode, block); |
| 3813 | } else { |
| 3814 | struct f2fs_map_blocks map; |
| 3815 | |
| 3816 | memset(&map, 0, sizeof(map)); |
| 3817 | map.m_lblk = block; |
| 3818 | map.m_len = 1; |
| 3819 | map.m_next_pgofs = NULL; |
| 3820 | map.m_seg_type = NO_CHECK_TYPE; |
| 3821 | |
| 3822 | if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP)) |
| 3823 | blknr = map.m_pblk; |
| 3824 | } |
| 3825 | out: |
| 3826 | trace_f2fs_bmap(inode, block, blknr); |
| 3827 | return blknr; |
| 3828 | } |
| 3829 | |
| 3830 | #ifdef CONFIG_SWAP |
| 3831 | static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, |
| 3832 | unsigned int blkcnt) |
| 3833 | { |
| 3834 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 3835 | unsigned int blkofs; |
| 3836 | unsigned int blk_per_sec = BLKS_PER_SEC(sbi); |
| 3837 | unsigned int end_blk = start_blk + blkcnt - 1; |
| 3838 | unsigned int secidx = start_blk / blk_per_sec; |
| 3839 | unsigned int end_sec; |
| 3840 | int ret = 0; |
| 3841 | |
| 3842 | if (!blkcnt) |
| 3843 | return 0; |
| 3844 | end_sec = end_blk / blk_per_sec; |
| 3845 | |
| 3846 | f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
| 3847 | filemap_invalidate_lock(inode->i_mapping); |
| 3848 | |
| 3849 | set_inode_flag(inode, FI_ALIGNED_WRITE); |
| 3850 | set_inode_flag(inode, FI_OPU_WRITE); |
| 3851 | |
| 3852 | for (; secidx <= end_sec; secidx++) { |
| 3853 | unsigned int blkofs_end = secidx == end_sec ? |
| 3854 | end_blk % blk_per_sec : blk_per_sec - 1; |
| 3855 | |
| 3856 | f2fs_down_write(&sbi->pin_sem); |
| 3857 | |
| 3858 | ret = f2fs_allocate_pinning_section(sbi); |
| 3859 | if (ret) { |
| 3860 | f2fs_up_write(&sbi->pin_sem); |
| 3861 | break; |
| 3862 | } |
| 3863 | |
| 3864 | set_inode_flag(inode, FI_SKIP_WRITES); |
| 3865 | |
| 3866 | for (blkofs = 0; blkofs <= blkofs_end; blkofs++) { |
| 3867 | struct folio *folio; |
| 3868 | unsigned int blkidx = secidx * blk_per_sec + blkofs; |
| 3869 | |
| 3870 | folio = f2fs_get_lock_data_folio(inode, blkidx, true); |
| 3871 | if (IS_ERR(folio)) { |
| 3872 | f2fs_up_write(&sbi->pin_sem); |
| 3873 | ret = PTR_ERR(folio); |
| 3874 | goto done; |
| 3875 | } |
| 3876 | |
| 3877 | folio_mark_dirty(folio); |
| 3878 | f2fs_folio_put(folio, true); |
| 3879 | } |
| 3880 | |
| 3881 | clear_inode_flag(inode, FI_SKIP_WRITES); |
| 3882 | |
| 3883 | ret = filemap_fdatawrite(inode->i_mapping); |
| 3884 | |
| 3885 | f2fs_up_write(&sbi->pin_sem); |
| 3886 | |
| 3887 | if (ret) |
| 3888 | break; |
| 3889 | } |
| 3890 | |
| 3891 | done: |
| 3892 | clear_inode_flag(inode, FI_SKIP_WRITES); |
| 3893 | clear_inode_flag(inode, FI_OPU_WRITE); |
| 3894 | clear_inode_flag(inode, FI_ALIGNED_WRITE); |
| 3895 | |
| 3896 | filemap_invalidate_unlock(inode->i_mapping); |
| 3897 | f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
| 3898 | |
| 3899 | return ret; |
| 3900 | } |
| 3901 | |
| 3902 | static int check_swap_activate(struct swap_info_struct *sis, |
| 3903 | struct file *swap_file, sector_t *span) |
| 3904 | { |
| 3905 | struct address_space *mapping = swap_file->f_mapping; |
| 3906 | struct inode *inode = mapping->host; |
| 3907 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 3908 | block_t cur_lblock; |
| 3909 | block_t last_lblock; |
| 3910 | block_t pblock; |
| 3911 | block_t lowest_pblock = -1; |
| 3912 | block_t highest_pblock = 0; |
| 3913 | int nr_extents = 0; |
| 3914 | unsigned int nr_pblocks; |
| 3915 | unsigned int blks_per_sec = BLKS_PER_SEC(sbi); |
| 3916 | unsigned int not_aligned = 0; |
| 3917 | int ret = 0; |
| 3918 | |
| 3919 | /* |
| 3920 | * Map all the blocks into the extent list. This code doesn't try |
| 3921 | * to be very smart. |
| 3922 | */ |
| 3923 | cur_lblock = 0; |
| 3924 | last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode)); |
| 3925 | |
| 3926 | while (cur_lblock < last_lblock && cur_lblock < sis->max) { |
| 3927 | struct f2fs_map_blocks map; |
| 3928 | retry: |
| 3929 | cond_resched(); |
| 3930 | |
| 3931 | memset(&map, 0, sizeof(map)); |
| 3932 | map.m_lblk = cur_lblock; |
| 3933 | map.m_len = last_lblock - cur_lblock; |
| 3934 | map.m_next_pgofs = NULL; |
| 3935 | map.m_next_extent = NULL; |
| 3936 | map.m_seg_type = NO_CHECK_TYPE; |
| 3937 | map.m_may_create = false; |
| 3938 | |
| 3939 | ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP); |
| 3940 | if (ret) |
| 3941 | goto out; |
| 3942 | |
| 3943 | /* hole */ |
| 3944 | if (!(map.m_flags & F2FS_MAP_FLAGS)) { |
| 3945 | f2fs_err(sbi, "Swapfile has holes"); |
| 3946 | ret = -EINVAL; |
| 3947 | goto out; |
| 3948 | } |
| 3949 | |
| 3950 | pblock = map.m_pblk; |
| 3951 | nr_pblocks = map.m_len; |
| 3952 | |
| 3953 | if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec || |
| 3954 | nr_pblocks % blks_per_sec || |
| 3955 | f2fs_is_sequential_zone_area(sbi, pblock)) { |
| 3956 | bool last_extent = false; |
| 3957 | |
| 3958 | not_aligned++; |
| 3959 | |
| 3960 | nr_pblocks = roundup(nr_pblocks, blks_per_sec); |
| 3961 | if (cur_lblock + nr_pblocks > sis->max) |
| 3962 | nr_pblocks -= blks_per_sec; |
| 3963 | |
| 3964 | /* this extent is last one */ |
| 3965 | if (!nr_pblocks) { |
| 3966 | nr_pblocks = last_lblock - cur_lblock; |
| 3967 | last_extent = true; |
| 3968 | } |
| 3969 | |
| 3970 | ret = f2fs_migrate_blocks(inode, cur_lblock, |
| 3971 | nr_pblocks); |
| 3972 | if (ret) { |
| 3973 | if (ret == -ENOENT) |
| 3974 | ret = -EINVAL; |
| 3975 | goto out; |
| 3976 | } |
| 3977 | |
| 3978 | if (!last_extent) |
| 3979 | goto retry; |
| 3980 | } |
| 3981 | |
| 3982 | if (cur_lblock + nr_pblocks >= sis->max) |
| 3983 | nr_pblocks = sis->max - cur_lblock; |
| 3984 | |
| 3985 | if (cur_lblock) { /* exclude the header page */ |
| 3986 | if (pblock < lowest_pblock) |
| 3987 | lowest_pblock = pblock; |
| 3988 | if (pblock + nr_pblocks - 1 > highest_pblock) |
| 3989 | highest_pblock = pblock + nr_pblocks - 1; |
| 3990 | } |
| 3991 | |
| 3992 | /* |
| 3993 | * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks |
| 3994 | */ |
| 3995 | ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock); |
| 3996 | if (ret < 0) |
| 3997 | goto out; |
| 3998 | nr_extents += ret; |
| 3999 | cur_lblock += nr_pblocks; |
| 4000 | } |
| 4001 | ret = nr_extents; |
| 4002 | *span = 1 + highest_pblock - lowest_pblock; |
| 4003 | if (cur_lblock == 0) |
| 4004 | cur_lblock = 1; /* force Empty message */ |
| 4005 | sis->max = cur_lblock; |
| 4006 | sis->pages = cur_lblock - 1; |
| 4007 | out: |
| 4008 | if (not_aligned) |
| 4009 | f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)", |
| 4010 | not_aligned, blks_per_sec * F2FS_BLKSIZE); |
| 4011 | return ret; |
| 4012 | } |
| 4013 | |
| 4014 | static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, |
| 4015 | sector_t *span) |
| 4016 | { |
| 4017 | struct inode *inode = file_inode(file); |
| 4018 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
| 4019 | int ret; |
| 4020 | |
| 4021 | if (!S_ISREG(inode->i_mode)) |
| 4022 | return -EINVAL; |
| 4023 | |
| 4024 | if (f2fs_readonly(sbi->sb)) |
| 4025 | return -EROFS; |
| 4026 | |
| 4027 | if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) { |
| 4028 | f2fs_err(sbi, "Swapfile not supported in LFS mode"); |
| 4029 | return -EINVAL; |
| 4030 | } |
| 4031 | |
| 4032 | ret = f2fs_convert_inline_inode(inode); |
| 4033 | if (ret) |
| 4034 | return ret; |
| 4035 | |
| 4036 | if (!f2fs_disable_compressed_file(inode)) |
| 4037 | return -EINVAL; |
| 4038 | |
| 4039 | ret = filemap_fdatawrite(inode->i_mapping); |
| 4040 | if (ret < 0) |
| 4041 | return ret; |
| 4042 | |
| 4043 | f2fs_precache_extents(inode); |
| 4044 | |
| 4045 | ret = check_swap_activate(sis, file, span); |
| 4046 | if (ret < 0) |
| 4047 | return ret; |
| 4048 | |
| 4049 | stat_inc_swapfile_inode(inode); |
| 4050 | set_inode_flag(inode, FI_PIN_FILE); |
| 4051 | f2fs_update_time(sbi, REQ_TIME); |
| 4052 | return ret; |
| 4053 | } |
| 4054 | |
| 4055 | static void f2fs_swap_deactivate(struct file *file) |
| 4056 | { |
| 4057 | struct inode *inode = file_inode(file); |
| 4058 | |
| 4059 | stat_dec_swapfile_inode(inode); |
| 4060 | clear_inode_flag(inode, FI_PIN_FILE); |
| 4061 | } |
| 4062 | #else |
| 4063 | static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file, |
| 4064 | sector_t *span) |
| 4065 | { |
| 4066 | return -EOPNOTSUPP; |
| 4067 | } |
| 4068 | |
| 4069 | static void f2fs_swap_deactivate(struct file *file) |
| 4070 | { |
| 4071 | } |
| 4072 | #endif |
| 4073 | |
| 4074 | const struct address_space_operations f2fs_dblock_aops = { |
| 4075 | .read_folio = f2fs_read_data_folio, |
| 4076 | .readahead = f2fs_readahead, |
| 4077 | .writepages = f2fs_write_data_pages, |
| 4078 | .write_begin = f2fs_write_begin, |
| 4079 | .write_end = f2fs_write_end, |
| 4080 | .dirty_folio = f2fs_dirty_data_folio, |
| 4081 | .migrate_folio = filemap_migrate_folio, |
| 4082 | .invalidate_folio = f2fs_invalidate_folio, |
| 4083 | .release_folio = f2fs_release_folio, |
| 4084 | .bmap = f2fs_bmap, |
| 4085 | .swap_activate = f2fs_swap_activate, |
| 4086 | .swap_deactivate = f2fs_swap_deactivate, |
| 4087 | }; |
| 4088 | |
| 4089 | void f2fs_clear_page_cache_dirty_tag(struct folio *folio) |
| 4090 | { |
| 4091 | struct address_space *mapping = folio->mapping; |
| 4092 | unsigned long flags; |
| 4093 | |
| 4094 | xa_lock_irqsave(&mapping->i_pages, flags); |
| 4095 | __xa_clear_mark(&mapping->i_pages, folio->index, |
| 4096 | PAGECACHE_TAG_DIRTY); |
| 4097 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
| 4098 | } |
| 4099 | |
| 4100 | int __init f2fs_init_post_read_processing(void) |
| 4101 | { |
| 4102 | bio_post_read_ctx_cache = |
| 4103 | kmem_cache_create("f2fs_bio_post_read_ctx", |
| 4104 | sizeof(struct bio_post_read_ctx), 0, 0, NULL); |
| 4105 | if (!bio_post_read_ctx_cache) |
| 4106 | goto fail; |
| 4107 | bio_post_read_ctx_pool = |
| 4108 | mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS, |
| 4109 | bio_post_read_ctx_cache); |
| 4110 | if (!bio_post_read_ctx_pool) |
| 4111 | goto fail_free_cache; |
| 4112 | return 0; |
| 4113 | |
| 4114 | fail_free_cache: |
| 4115 | kmem_cache_destroy(bio_post_read_ctx_cache); |
| 4116 | fail: |
| 4117 | return -ENOMEM; |
| 4118 | } |
| 4119 | |
| 4120 | void f2fs_destroy_post_read_processing(void) |
| 4121 | { |
| 4122 | mempool_destroy(bio_post_read_ctx_pool); |
| 4123 | kmem_cache_destroy(bio_post_read_ctx_cache); |
| 4124 | } |
| 4125 | |
| 4126 | int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi) |
| 4127 | { |
| 4128 | if (!f2fs_sb_has_encrypt(sbi) && |
| 4129 | !f2fs_sb_has_verity(sbi) && |
| 4130 | !f2fs_sb_has_compression(sbi)) |
| 4131 | return 0; |
| 4132 | |
| 4133 | sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq", |
| 4134 | WQ_UNBOUND | WQ_HIGHPRI, |
| 4135 | num_online_cpus()); |
| 4136 | return sbi->post_read_wq ? 0 : -ENOMEM; |
| 4137 | } |
| 4138 | |
| 4139 | void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi) |
| 4140 | { |
| 4141 | if (sbi->post_read_wq) |
| 4142 | destroy_workqueue(sbi->post_read_wq); |
| 4143 | } |
| 4144 | |
| 4145 | int __init f2fs_init_bio_entry_cache(void) |
| 4146 | { |
| 4147 | bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab", |
| 4148 | sizeof(struct bio_entry)); |
| 4149 | return bio_entry_slab ? 0 : -ENOMEM; |
| 4150 | } |
| 4151 | |
| 4152 | void f2fs_destroy_bio_entry_cache(void) |
| 4153 | { |
| 4154 | kmem_cache_destroy(bio_entry_slab); |
| 4155 | } |
| 4156 | |
| 4157 | static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
| 4158 | unsigned int flags, struct iomap *iomap, |
| 4159 | struct iomap *srcmap) |
| 4160 | { |
| 4161 | struct f2fs_map_blocks map = {}; |
| 4162 | pgoff_t next_pgofs = 0; |
| 4163 | int err; |
| 4164 | |
| 4165 | map.m_lblk = F2FS_BYTES_TO_BLK(offset); |
| 4166 | map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1; |
| 4167 | map.m_next_pgofs = &next_pgofs; |
| 4168 | map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode), |
| 4169 | inode->i_write_hint); |
| 4170 | |
| 4171 | /* |
| 4172 | * If the blocks being overwritten are already allocated, |
| 4173 | * f2fs_map_lock and f2fs_balance_fs are not necessary. |
| 4174 | */ |
| 4175 | if ((flags & IOMAP_WRITE) && |
| 4176 | !f2fs_overwrite_io(inode, offset, length)) |
| 4177 | map.m_may_create = true; |
| 4178 | |
| 4179 | err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO); |
| 4180 | if (err) |
| 4181 | return err; |
| 4182 | |
| 4183 | iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk); |
| 4184 | |
| 4185 | /* |
| 4186 | * When inline encryption is enabled, sometimes I/O to an encrypted file |
| 4187 | * has to be broken up to guarantee DUN contiguity. Handle this by |
| 4188 | * limiting the length of the mapping returned. |
| 4189 | */ |
| 4190 | map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len); |
| 4191 | |
| 4192 | /* |
| 4193 | * We should never see delalloc or compressed extents here based on |
| 4194 | * prior flushing and checks. |
| 4195 | */ |
| 4196 | if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR)) |
| 4197 | return -EINVAL; |
| 4198 | |
| 4199 | if (map.m_flags & F2FS_MAP_MAPPED) { |
| 4200 | if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR)) |
| 4201 | return -EINVAL; |
| 4202 | |
| 4203 | iomap->length = F2FS_BLK_TO_BYTES(map.m_len); |
| 4204 | iomap->type = IOMAP_MAPPED; |
| 4205 | iomap->flags |= IOMAP_F_MERGED; |
| 4206 | iomap->bdev = map.m_bdev; |
| 4207 | iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk); |
| 4208 | } else { |
| 4209 | if (flags & IOMAP_WRITE) |
| 4210 | return -ENOTBLK; |
| 4211 | |
| 4212 | if (map.m_pblk == NULL_ADDR) { |
| 4213 | iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) - |
| 4214 | iomap->offset; |
| 4215 | iomap->type = IOMAP_HOLE; |
| 4216 | } else if (map.m_pblk == NEW_ADDR) { |
| 4217 | iomap->length = F2FS_BLK_TO_BYTES(map.m_len); |
| 4218 | iomap->type = IOMAP_UNWRITTEN; |
| 4219 | } else { |
| 4220 | f2fs_bug_on(F2FS_I_SB(inode), 1); |
| 4221 | } |
| 4222 | iomap->addr = IOMAP_NULL_ADDR; |
| 4223 | } |
| 4224 | |
| 4225 | if (map.m_flags & F2FS_MAP_NEW) |
| 4226 | iomap->flags |= IOMAP_F_NEW; |
| 4227 | if ((inode->i_state & I_DIRTY_DATASYNC) || |
| 4228 | offset + length > i_size_read(inode)) |
| 4229 | iomap->flags |= IOMAP_F_DIRTY; |
| 4230 | |
| 4231 | return 0; |
| 4232 | } |
| 4233 | |
| 4234 | const struct iomap_ops f2fs_iomap_ops = { |
| 4235 | .iomap_begin = f2fs_iomap_begin, |
| 4236 | }; |