Merge tag 'libnvdimm-fixes-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / fs / f2fs / data.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
eb47b800
JK
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
eb47b800
JK
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
4034247a 11#include <linux/sched/mm.h>
eb47b800
JK
12#include <linux/mpage.h>
13#include <linux/writeback.h>
8f46dcae 14#include <linux/pagevec.h>
eb47b800
JK
15#include <linux/blkdev.h>
16#include <linux/bio.h>
27aacd28 17#include <linux/blk-crypto.h>
4969c06a 18#include <linux/swap.h>
690e4a3e 19#include <linux/prefetch.h>
e2e40f2c 20#include <linux/uio.h>
174cd4b1 21#include <linux/sched/signal.h>
10c5db28 22#include <linux/fiemap.h>
1517c1a7 23#include <linux/iomap.h>
eb47b800
JK
24
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
52118743 28#include "iostat.h"
848753aa 29#include <trace/events/f2fs.h>
eb47b800 30
6dbb1796
EB
31#define NUM_PREALLOC_POST_READ_CTXS 128
32
33static struct kmem_cache *bio_post_read_ctx_cache;
0b20fcec 34static struct kmem_cache *bio_entry_slab;
6dbb1796 35static mempool_t *bio_post_read_ctx_pool;
f543805f
CY
36static struct bio_set f2fs_bioset;
37
38#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39
40int __init f2fs_init_bioset(void)
41{
870af777
YL
42 return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS);
f543805f
CY
44}
45
46void f2fs_destroy_bioset(void)
47{
48 bioset_exit(&f2fs_bioset);
49}
50
36951b38
CY
51static bool __is_cp_guaranteed(struct page *page)
52{
53 struct address_space *mapping = page->mapping;
54 struct inode *inode;
55 struct f2fs_sb_info *sbi;
56
57 if (!mapping)
58 return false;
59
60 inode = mapping->host;
61 sbi = F2FS_I_SB(inode);
62
63 if (inode->i_ino == F2FS_META_INO(sbi) ||
a87aff1d 64 inode->i_ino == F2FS_NODE_INO(sbi) ||
b763f3be
CY
65 S_ISDIR(inode->i_mode))
66 return true;
67
68 if (f2fs_is_compressed_page(page))
69 return false;
3db1de0e 70 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
b763f3be 71 page_private_gcing(page))
36951b38
CY
72 return true;
73 return false;
74}
75
5f9abab4
JK
76static enum count_type __read_io_type(struct page *page)
77{
4969c06a 78 struct address_space *mapping = page_file_mapping(page);
5f9abab4
JK
79
80 if (mapping) {
81 struct inode *inode = mapping->host;
82 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
83
84 if (inode->i_ino == F2FS_META_INO(sbi))
85 return F2FS_RD_META;
86
87 if (inode->i_ino == F2FS_NODE_INO(sbi))
88 return F2FS_RD_NODE;
89 }
90 return F2FS_RD_DATA;
91}
92
6dbb1796
EB
93/* postprocessing steps for read bios */
94enum bio_post_read_step {
7f59b277
EB
95#ifdef CONFIG_FS_ENCRYPTION
96 STEP_DECRYPT = 1 << 0,
97#else
98 STEP_DECRYPT = 0, /* compile out the decryption-related code */
99#endif
100#ifdef CONFIG_F2FS_FS_COMPRESSION
101 STEP_DECOMPRESS = 1 << 1,
102#else
103 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
104#endif
105#ifdef CONFIG_FS_VERITY
106 STEP_VERITY = 1 << 2,
107#else
108 STEP_VERITY = 0, /* compile out the verity-related code */
109#endif
6dbb1796
EB
110};
111
112struct bio_post_read_ctx {
113 struct bio *bio;
4c8ff709 114 struct f2fs_sb_info *sbi;
6dbb1796 115 struct work_struct work;
6dbb1796 116 unsigned int enabled_steps;
98dc08ba
EB
117 /*
118 * decompression_attempted keeps track of whether
119 * f2fs_end_read_compressed_page() has been called on the pages in the
120 * bio that belong to a compressed cluster yet.
121 */
122 bool decompression_attempted;
4931e0c9 123 block_t fs_blkaddr;
6dbb1796
EB
124};
125
98dc08ba
EB
126/*
127 * Update and unlock a bio's pages, and free the bio.
128 *
129 * This marks pages up-to-date only if there was no error in the bio (I/O error,
130 * decryption error, or verity error), as indicated by bio->bi_status.
131 *
132 * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
133 * aren't marked up-to-date here, as decompression is done on a per-compression-
134 * cluster basis rather than a per-bio basis. Instead, we only must do two
135 * things for each compressed page here: call f2fs_end_read_compressed_page()
136 * with failed=true if an error occurred before it would have normally gotten
137 * called (i.e., I/O error or decryption error, but *not* verity error), and
138 * release the bio's reference to the decompress_io_ctx of the page's cluster.
139 */
bff139b4 140static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
93dfe2ac 141{
6dbb1796 142 struct bio_vec *bv;
6dc4f100 143 struct bvec_iter_all iter_all;
98dc08ba 144 struct bio_post_read_ctx *ctx = bio->bi_private;
93dfe2ac 145
2b070cfe 146 bio_for_each_segment_all(bv, bio, iter_all) {
7f59b277 147 struct page *page = bv->bv_page;
6dbb1796 148
7f59b277 149 if (f2fs_is_compressed_page(page)) {
98dc08ba 150 if (ctx && !ctx->decompression_attempted)
bff139b4
DJ
151 f2fs_end_read_compressed_page(page, true, 0,
152 in_task);
153 f2fs_put_page_dic(page, in_task);
4c8ff709
CY
154 continue;
155 }
4c8ff709 156
98dc08ba 157 if (bio->bi_status)
6dbb1796 158 ClearPageUptodate(page);
98dc08ba 159 else
6dbb1796 160 SetPageUptodate(page);
5f9abab4 161 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
6dbb1796
EB
162 unlock_page(page);
163 }
79bbefb1 164
98dc08ba
EB
165 if (ctx)
166 mempool_free(ctx, bio_post_read_ctx_pool);
7f59b277 167 bio_put(bio);
4c8ff709 168}
4c8ff709 169
7f59b277 170static void f2fs_verify_bio(struct work_struct *work)
6dbb1796
EB
171{
172 struct bio_post_read_ctx *ctx =
173 container_of(work, struct bio_post_read_ctx, work);
644c8c92 174 struct bio *bio = ctx->bio;
7f59b277 175 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
644c8c92
EB
176
177 /*
704528d8 178 * fsverity_verify_bio() may call readahead() again, and while verity
7f59b277
EB
179 * will be disabled for this, decryption and/or decompression may still
180 * be needed, resulting in another bio_post_read_ctx being allocated.
181 * So to prevent deadlocks we need to release the current ctx to the
182 * mempool first. This assumes that verity is the last post-read step.
644c8c92
EB
183 */
184 mempool_free(ctx, bio_post_read_ctx_pool);
185 bio->bi_private = NULL;
6dbb1796 186
7f59b277
EB
187 /*
188 * Verify the bio's pages with fs-verity. Exclude compressed pages,
189 * as those were handled separately by f2fs_end_read_compressed_page().
190 */
191 if (may_have_compressed_pages) {
192 struct bio_vec *bv;
193 struct bvec_iter_all iter_all;
194
195 bio_for_each_segment_all(bv, bio, iter_all) {
196 struct page *page = bv->bv_page;
197
198 if (!f2fs_is_compressed_page(page) &&
98dc08ba
EB
199 !fsverity_verify_page(page)) {
200 bio->bi_status = BLK_STS_IOERR;
201 break;
202 }
7f59b277
EB
203 }
204 } else {
205 fsverity_verify_bio(bio);
4c8ff709 206 }
6dbb1796 207
bff139b4 208 f2fs_finish_read_bio(bio, true);
6dbb1796
EB
209}
210
7f59b277
EB
211/*
212 * If the bio's data needs to be verified with fs-verity, then enqueue the
213 * verity work for the bio. Otherwise finish the bio now.
214 *
215 * Note that to avoid deadlocks, the verity work can't be done on the
216 * decryption/decompression workqueue. This is because verifying the data pages
217 * can involve reading verity metadata pages from the file, and these verity
218 * metadata pages may be encrypted and/or compressed.
219 */
bff139b4 220static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
95ae251f 221{
7f59b277 222 struct bio_post_read_ctx *ctx = bio->bi_private;
4c8ff709 223
7f59b277
EB
224 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
225 INIT_WORK(&ctx->work, f2fs_verify_bio);
4c8ff709 226 fsverity_enqueue_verify_work(&ctx->work);
7f59b277 227 } else {
bff139b4 228 f2fs_finish_read_bio(bio, in_task);
4c8ff709 229 }
4c8ff709 230}
95ae251f 231
7f59b277
EB
232/*
233 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
234 * remaining page was read by @ctx->bio.
235 *
236 * Note that a bio may span clusters (even a mix of compressed and uncompressed
237 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
238 * that the bio includes at least one compressed page. The actual decompression
239 * is done on a per-cluster basis, not a per-bio basis.
240 */
bff139b4
DJ
241static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
242 bool in_task)
6dbb1796 243{
7f59b277
EB
244 struct bio_vec *bv;
245 struct bvec_iter_all iter_all;
246 bool all_compressed = true;
4931e0c9 247 block_t blkaddr = ctx->fs_blkaddr;
4c8ff709 248
7f59b277
EB
249 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
250 struct page *page = bv->bv_page;
4c8ff709 251
7f59b277 252 if (f2fs_is_compressed_page(page))
14db0b3c
EB
253 f2fs_end_read_compressed_page(page, false, blkaddr,
254 in_task);
7f59b277
EB
255 else
256 all_compressed = false;
6ce19aff
CY
257
258 blkaddr++;
6dbb1796 259 }
4c8ff709 260
98dc08ba
EB
261 ctx->decompression_attempted = true;
262
7f59b277
EB
263 /*
264 * Optimization: if all the bio's pages are compressed, then scheduling
265 * the per-bio verity work is unnecessary, as verity will be fully
266 * handled at the compression cluster level.
267 */
268 if (all_compressed)
269 ctx->enabled_steps &= ~STEP_VERITY;
6dbb1796
EB
270}
271
7f59b277 272static void f2fs_post_read_work(struct work_struct *work)
6dbb1796 273{
7f59b277
EB
274 struct bio_post_read_ctx *ctx =
275 container_of(work, struct bio_post_read_ctx, work);
14db0b3c 276 struct bio *bio = ctx->bio;
7f59b277 277
14db0b3c
EB
278 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
279 f2fs_finish_read_bio(bio, true);
280 return;
281 }
7f59b277
EB
282
283 if (ctx->enabled_steps & STEP_DECOMPRESS)
bff139b4 284 f2fs_handle_step_decompress(ctx, true);
7f59b277 285
14db0b3c 286 f2fs_verify_and_finish_bio(bio, true);
6dbb1796
EB
287}
288
289static void f2fs_read_end_io(struct bio *bio)
290{
c45d6002 291 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
a4b68176 292 struct bio_post_read_ctx *ctx;
bff139b4 293 bool intask = in_task();
a4b68176
DJ
294
295 iostat_update_and_unbind_ctx(bio, 0);
296 ctx = bio->bi_private;
c45d6002
CY
297
298 if (time_to_inject(sbi, FAULT_READ_IO)) {
299 f2fs_show_injection_info(sbi, FAULT_READ_IO);
4e4cbee9 300 bio->bi_status = BLK_STS_IOERR;
55523519 301 }
8b038c70 302
7f59b277 303 if (bio->bi_status) {
bff139b4 304 f2fs_finish_read_bio(bio, intask);
6dbb1796 305 return;
f1e88660 306 }
6dbb1796 307
bff139b4
DJ
308 if (ctx) {
309 unsigned int enabled_steps = ctx->enabled_steps &
310 (STEP_DECRYPT | STEP_DECOMPRESS);
311
312 /*
313 * If we have only decompression step between decompression and
314 * decrypt, we don't need post processing for this.
315 */
316 if (enabled_steps == STEP_DECOMPRESS &&
317 !f2fs_low_mem_mode(sbi)) {
318 f2fs_handle_step_decompress(ctx, intask);
319 } else if (enabled_steps) {
320 INIT_WORK(&ctx->work, f2fs_post_read_work);
321 queue_work(ctx->sbi->post_read_wq, &ctx->work);
322 return;
323 }
7f59b277 324 }
bff139b4
DJ
325
326 f2fs_verify_and_finish_bio(bio, intask);
f1e88660
JK
327}
328
4246a0b6 329static void f2fs_write_end_io(struct bio *bio)
93dfe2ac 330{
a4b68176 331 struct f2fs_sb_info *sbi;
f568849e 332 struct bio_vec *bvec;
6dc4f100 333 struct bvec_iter_all iter_all;
93dfe2ac 334
a4b68176
DJ
335 iostat_update_and_unbind_ctx(bio, 1);
336 sbi = bio->bi_private;
337
6f5c2ed0 338 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
c45d6002 339 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
6f5c2ed0
CY
340 bio->bi_status = BLK_STS_IOERR;
341 }
342
2b070cfe 343 bio_for_each_segment_all(bvec, bio, iter_all) {
93dfe2ac 344 struct page *page = bvec->bv_page;
36951b38 345 enum count_type type = WB_DATA_TYPE(page);
93dfe2ac 346
b763f3be
CY
347 if (page_private_dummy(page)) {
348 clear_page_private_dummy(page);
0a595eba
JK
349 unlock_page(page);
350 mempool_free(page, sbi->write_io_dummy);
351
4e4cbee9 352 if (unlikely(bio->bi_status))
a9cfee0e
CY
353 f2fs_stop_checkpoint(sbi, true,
354 STOP_CP_REASON_WRITE_FAIL);
0a595eba
JK
355 continue;
356 }
357
d2d0727b 358 fscrypt_finalize_bounce_page(&page);
4375a336 359
4c8ff709
CY
360#ifdef CONFIG_F2FS_FS_COMPRESSION
361 if (f2fs_is_compressed_page(page)) {
362 f2fs_compress_write_end_io(bio, page);
363 continue;
364 }
365#endif
366
4e4cbee9 367 if (unlikely(bio->bi_status)) {
5114a97a 368 mapping_set_error(page->mapping, -EIO);
b1ca321d 369 if (type == F2FS_WB_CP_DATA)
a9cfee0e
CY
370 f2fs_stop_checkpoint(sbi, true,
371 STOP_CP_REASON_WRITE_FAIL);
93dfe2ac 372 }
7dff55d2
YH
373
374 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
375 page->index != nid_of_node(page));
376
36951b38 377 dec_page_count(sbi, type);
50fa53ec
CY
378 if (f2fs_in_warm_node_list(sbi, page))
379 f2fs_del_fsync_node_entry(sbi, page);
b763f3be 380 clear_page_private_gcing(page);
93dfe2ac 381 end_page_writeback(page);
f568849e 382 }
36951b38 383 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
f5730184 384 wq_has_sleeper(&sbi->cp_wait))
93dfe2ac
JK
385 wake_up(&sbi->cp_wait);
386
387 bio_put(bio);
388}
389
3c62be17 390struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
5189810a 391 block_t blk_addr, sector_t *sector)
3c62be17
JK
392{
393 struct block_device *bdev = sbi->sb->s_bdev;
394 int i;
395
0916878d
DLM
396 if (f2fs_is_multi_device(sbi)) {
397 for (i = 0; i < sbi->s_ndevs; i++) {
398 if (FDEV(i).start_blk <= blk_addr &&
399 FDEV(i).end_blk >= blk_addr) {
400 blk_addr -= FDEV(i).start_blk;
401 bdev = FDEV(i).bdev;
402 break;
403 }
3c62be17
JK
404 }
405 }
5189810a
CH
406
407 if (sector)
408 *sector = SECTOR_FROM_BLOCK(blk_addr);
3c62be17
JK
409 return bdev;
410}
411
412int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
413{
414 int i;
415
0916878d
DLM
416 if (!f2fs_is_multi_device(sbi))
417 return 0;
418
3c62be17
JK
419 for (i = 0; i < sbi->s_ndevs; i++)
420 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
421 return i;
422 return 0;
423}
424
7649c873 425static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
64bf0eef
CH
426{
427 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
0adc2ab0 428 unsigned int fua_flag, meta_flag, io_flag;
7649c873 429 blk_opf_t op_flags = 0;
0adc2ab0
JK
430
431 if (fio->op != REQ_OP_WRITE)
432 return 0;
433 if (fio->type == DATA)
434 io_flag = fio->sbi->data_io_flag;
435 else if (fio->type == NODE)
436 io_flag = fio->sbi->node_io_flag;
437 else
438 return 0;
439
440 fua_flag = io_flag & temp_mask;
441 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
64bf0eef
CH
442
443 /*
444 * data/node io flag bits per temp:
445 * REQ_META | REQ_FUA |
446 * 5 | 4 | 3 | 2 | 1 | 0 |
447 * Cold | Warm | Hot | Cold | Warm | Hot |
448 */
449 if ((1 << fio->temp) & meta_flag)
0adc2ab0 450 op_flags |= REQ_META;
64bf0eef 451 if ((1 << fio->temp) & fua_flag)
0adc2ab0
JK
452 op_flags |= REQ_FUA;
453 return op_flags;
64bf0eef
CH
454}
455
b757f6ed 456static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
940a6d34 457{
b757f6ed 458 struct f2fs_sb_info *sbi = fio->sbi;
5189810a
CH
459 struct block_device *bdev;
460 sector_t sector;
940a6d34
GZ
461 struct bio *bio;
462
5189810a 463 bdev = f2fs_target_device(sbi, fio->new_blkaddr, &sector);
0adc2ab0
JK
464 bio = bio_alloc_bioset(bdev, npages,
465 fio->op | fio->op_flags | f2fs_io_flags(fio),
466 GFP_NOIO, &f2fs_bioset);
5189810a 467 bio->bi_iter.bi_sector = sector;
b757f6ed 468 if (is_read_io(fio->op)) {
0cdd3195
HL
469 bio->bi_end_io = f2fs_read_end_io;
470 bio->bi_private = NULL;
471 } else {
472 bio->bi_end_io = f2fs_write_end_io;
473 bio->bi_private = sbi;
0cdd3195 474 }
a4b68176
DJ
475 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
476
b757f6ed
CY
477 if (fio->io_wbc)
478 wbc_init_bio(fio->io_wbc, bio);
940a6d34
GZ
479
480 return bio;
481}
482
27aacd28
ST
483static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
484 pgoff_t first_idx,
485 const struct f2fs_io_info *fio,
486 gfp_t gfp_mask)
487{
488 /*
489 * The f2fs garbage collector sets ->encrypted_page when it wants to
490 * read/write raw data without encryption.
491 */
492 if (!fio || !fio->encrypted_page)
493 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
494}
495
496static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
497 pgoff_t next_idx,
498 const struct f2fs_io_info *fio)
499{
500 /*
501 * The f2fs garbage collector sets ->encrypted_page when it wants to
502 * read/write raw data without encryption.
503 */
504 if (fio && fio->encrypted_page)
505 return !bio_has_crypt_ctx(bio);
506
507 return fscrypt_mergeable_bio(bio, inode, next_idx);
508}
509
4fc29c1a
LT
510static inline void __submit_bio(struct f2fs_sb_info *sbi,
511 struct bio *bio, enum page_type type)
f5730184 512{
4fc29c1a 513 if (!is_read_io(bio_op(bio))) {
0a595eba
JK
514 unsigned int start;
515
0a595eba
JK
516 if (type != DATA && type != NODE)
517 goto submit_io;
518
b0332a0f 519 if (f2fs_lfs_mode(sbi) && current->plug)
3bb09a0e
TY
520 blk_finish_plug(current->plug);
521
39f71b7e 522 if (!F2FS_IO_ALIGNED(sbi))
8223ecc4
CY
523 goto submit_io;
524
0a595eba
JK
525 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
526 start %= F2FS_IO_SIZE(sbi);
527
528 if (start == 0)
529 goto submit_io;
530
531 /* fill dummy pages */
532 for (; start < F2FS_IO_SIZE(sbi); start++) {
533 struct page *page =
534 mempool_alloc(sbi->write_io_dummy,
bc73a4b2 535 GFP_NOIO | __GFP_NOFAIL);
0a595eba
JK
536 f2fs_bug_on(sbi, !page);
537
0a595eba 538 lock_page(page);
b763f3be
CY
539
540 zero_user_segment(page, 0, PAGE_SIZE);
541 set_page_private_dummy(page);
542
0a595eba
JK
543 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
544 f2fs_bug_on(sbi, 1);
545 }
546 /*
547 * In the NODE case, we lose next block address chain. So, we
548 * need to do checkpoint in f2fs_sync_file.
549 */
550 if (type == NODE)
551 set_sbi_flag(sbi, SBI_NEED_CP);
19a5f5e2 552 }
0a595eba 553submit_io:
554b5125
JK
554 if (is_read_io(bio_op(bio)))
555 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
556 else
557 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
a4b68176
DJ
558
559 iostat_update_submit_ctx(bio, type);
4e49ea4a 560 submit_bio(bio);
f5730184
JK
561}
562
4c8ff709
CY
563void f2fs_submit_bio(struct f2fs_sb_info *sbi,
564 struct bio *bio, enum page_type type)
565{
566 __submit_bio(sbi, bio, type);
567}
568
458e6197 569static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 570{
458e6197 571 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
572
573 if (!io->bio)
574 return;
575
04d328de 576 if (is_read_io(fio->op))
554b5125 577 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
6a8f8ca5 578 else
554b5125 579 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
04d328de 580
4fc29c1a 581 __submit_bio(io->sbi, io->bio, fio->type);
93dfe2ac
JK
582 io->bio = NULL;
583}
584
8648de2c 585static bool __has_merged_page(struct bio *bio, struct inode *inode,
bab475c5 586 struct page *page, nid_t ino)
0fd785eb 587{
0fd785eb 588 struct bio_vec *bvec;
6dc4f100 589 struct bvec_iter_all iter_all;
0fd785eb 590
8648de2c 591 if (!bio)
0fd785eb 592 return false;
0c3a5797 593
bab475c5 594 if (!inode && !page && !ino)
0c3a5797 595 return true;
0fd785eb 596
8648de2c 597 bio_for_each_segment_all(bvec, bio, iter_all) {
4c8ff709 598 struct page *target = bvec->bv_page;
0fd785eb 599
4c8ff709 600 if (fscrypt_is_bounce_page(target)) {
d2d0727b 601 target = fscrypt_pagecache_page(target);
4c8ff709
CY
602 if (IS_ERR(target))
603 continue;
604 }
605 if (f2fs_is_compressed_page(target)) {
606 target = f2fs_compress_control_page(target);
607 if (IS_ERR(target))
608 continue;
609 }
0fd785eb 610
0c3a5797
CY
611 if (inode && inode == target->mapping->host)
612 return true;
bab475c5
CY
613 if (page && page == target)
614 return true;
0c3a5797 615 if (ino && ino == ino_of_node(target))
0fd785eb 616 return true;
0fd785eb
CY
617 }
618
0fd785eb
CY
619 return false;
620}
621
908ea654
YY
622int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
623{
624 int i;
625
626 for (i = 0; i < NR_PAGE_TYPE; i++) {
627 int n = (i == META) ? 1 : NR_TEMP_TYPE;
628 int j;
629
630 sbi->write_io[i] = f2fs_kmalloc(sbi,
631 array_size(n, sizeof(struct f2fs_bio_info)),
632 GFP_KERNEL);
633 if (!sbi->write_io[i])
634 return -ENOMEM;
635
636 for (j = HOT; j < n; j++) {
637 init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
638 sbi->write_io[i][j].sbi = sbi;
639 sbi->write_io[i][j].bio = NULL;
640 spin_lock_init(&sbi->write_io[i][j].io_lock);
641 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
642 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
643 init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
644 }
645 }
646
647 return 0;
648}
649
b9109b0e 650static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
a912b54d 651 enum page_type type, enum temp_type temp)
93dfe2ac
JK
652{
653 enum page_type btype = PAGE_TYPE_OF_BIO(type);
a912b54d 654 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
93dfe2ac 655
e4544b63 656 f2fs_down_write(&io->io_rwsem);
458e6197
JK
657
658 /* change META to META_FLUSH in the checkpoint procedure */
659 if (type >= META_FLUSH) {
660 io->fio.type = META_FLUSH;
64bf0eef 661 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
70fd7614 662 if (!test_opt(sbi, NOBARRIER))
64bf0eef 663 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
458e6197
JK
664 }
665 __submit_merged_bio(io);
e4544b63 666 f2fs_up_write(&io->io_rwsem);
93dfe2ac
JK
667}
668
a912b54d 669static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
bab475c5
CY
670 struct inode *inode, struct page *page,
671 nid_t ino, enum page_type type, bool force)
0c3a5797 672{
a912b54d 673 enum temp_type temp;
1e771e83 674 bool ret = true;
a912b54d
JK
675
676 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
1e771e83
YS
677 if (!force) {
678 enum page_type btype = PAGE_TYPE_OF_BIO(type);
679 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
a912b54d 680
e4544b63 681 f2fs_down_read(&io->io_rwsem);
8648de2c 682 ret = __has_merged_page(io->bio, inode, page, ino);
e4544b63 683 f2fs_up_read(&io->io_rwsem);
1e771e83
YS
684 }
685 if (ret)
686 __f2fs_submit_merged_write(sbi, type, temp);
a912b54d
JK
687
688 /* TODO: use HOT temp only for meta pages now. */
689 if (type >= META)
690 break;
691 }
0c3a5797
CY
692}
693
b9109b0e 694void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
0c3a5797 695{
adcc00f7 696 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
0c3a5797
CY
697}
698
b9109b0e 699void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
bab475c5
CY
700 struct inode *inode, struct page *page,
701 nid_t ino, enum page_type type)
0c3a5797 702{
bab475c5 703 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
0c3a5797
CY
704}
705
b9109b0e 706void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
406657dd 707{
b9109b0e
JK
708 f2fs_submit_merged_write(sbi, DATA);
709 f2fs_submit_merged_write(sbi, NODE);
710 f2fs_submit_merged_write(sbi, META);
406657dd
CY
711}
712
93dfe2ac
JK
713/*
714 * Fill the locked page with data located in the block address.
771a9a71 715 * A caller needs to unlock the page on failure.
93dfe2ac 716 */
05ca3632 717int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 718{
93dfe2ac 719 struct bio *bio;
0b81d077
JK
720 struct page *page = fio->encrypted_page ?
721 fio->encrypted_page : fio->page;
93dfe2ac 722
c9b60788 723 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
93770ab7 724 fio->is_por ? META_POR : (__is_meta_io(fio) ?
95fa90c9
CY
725 META_GENERIC : DATA_GENERIC_ENHANCE))) {
726 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
10f966bb 727 return -EFSCORRUPTED;
95fa90c9 728 }
c9b60788 729
2ace38e0 730 trace_f2fs_submit_page_bio(page, fio);
93dfe2ac
JK
731
732 /* Allocate a new bio */
b757f6ed 733 bio = __bio_alloc(fio, 1);
93dfe2ac 734
27aacd28
ST
735 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
736 fio->page->index, fio, GFP_NOIO);
737
09cbfeaf 738 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
93dfe2ac 739 bio_put(bio);
93dfe2ac
JK
740 return -EFAULT;
741 }
78efac53
CY
742
743 if (fio->io_wbc && !is_read_io(fio->op))
34e51a5e 744 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
78efac53 745
5f9abab4 746 inc_page_count(fio->sbi, is_read_io(fio->op) ?
544b53da 747 __read_io_type(page) : WB_DATA_TYPE(fio->page));
4c58ed07
CY
748
749 __submit_bio(fio->sbi, bio, fio->type);
93dfe2ac
JK
750 return 0;
751}
752
8896cbdf
CY
753static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
754 block_t last_blkaddr, block_t cur_blkaddr)
755{
10208567
JK
756 if (unlikely(sbi->max_io_bytes &&
757 bio->bi_iter.bi_size >= sbi->max_io_bytes))
758 return false;
8896cbdf
CY
759 if (last_blkaddr + 1 != cur_blkaddr)
760 return false;
309dca30 761 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
8896cbdf
CY
762}
763
764static bool io_type_is_mergeable(struct f2fs_bio_info *io,
765 struct f2fs_io_info *fio)
766{
767 if (io->fio.op != fio->op)
768 return false;
769 return io->fio.op_flags == fio->op_flags;
770}
771
772static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
773 struct f2fs_bio_info *io,
774 struct f2fs_io_info *fio,
775 block_t last_blkaddr,
776 block_t cur_blkaddr)
777{
c72db71e
CY
778 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
779 unsigned int filled_blocks =
780 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
781 unsigned int io_size = F2FS_IO_SIZE(sbi);
782 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
783
784 /* IOs in bio is aligned and left space of vectors is not enough */
785 if (!(filled_blocks % io_size) && left_vecs < io_size)
786 return false;
787 }
8896cbdf
CY
788 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
789 return false;
790 return io_type_is_mergeable(io, fio);
791}
792
0b20fcec
CY
793static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
794 struct page *page, enum temp_type temp)
795{
796 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
797 struct bio_entry *be;
798
32410577 799 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
0b20fcec
CY
800 be->bio = bio;
801 bio_get(bio);
802
803 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
804 f2fs_bug_on(sbi, 1);
805
e4544b63 806 f2fs_down_write(&io->bio_list_lock);
0b20fcec 807 list_add_tail(&be->list, &io->bio_list);
e4544b63 808 f2fs_up_write(&io->bio_list_lock);
0b20fcec
CY
809}
810
811static void del_bio_entry(struct bio_entry *be)
812{
813 list_del(&be->list);
814 kmem_cache_free(bio_entry_slab, be);
815}
816
27aacd28 817static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
0b20fcec
CY
818 struct page *page)
819{
27aacd28 820 struct f2fs_sb_info *sbi = fio->sbi;
0b20fcec
CY
821 enum temp_type temp;
822 bool found = false;
823 int ret = -EAGAIN;
824
825 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
826 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
827 struct list_head *head = &io->bio_list;
828 struct bio_entry *be;
829
e4544b63 830 f2fs_down_write(&io->bio_list_lock);
0b20fcec
CY
831 list_for_each_entry(be, head, list) {
832 if (be->bio != *bio)
833 continue;
834
835 found = true;
836
27aacd28
ST
837 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
838 *fio->last_block,
839 fio->new_blkaddr));
840 if (f2fs_crypt_mergeable_bio(*bio,
841 fio->page->mapping->host,
842 fio->page->index, fio) &&
843 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
844 PAGE_SIZE) {
0b20fcec
CY
845 ret = 0;
846 break;
847 }
848
27aacd28 849 /* page can't be merged into bio; submit the bio */
0b20fcec
CY
850 del_bio_entry(be);
851 __submit_bio(sbi, *bio, DATA);
852 break;
853 }
e4544b63 854 f2fs_up_write(&io->bio_list_lock);
0b20fcec
CY
855 }
856
857 if (ret) {
858 bio_put(*bio);
859 *bio = NULL;
860 }
861
862 return ret;
863}
864
865void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
866 struct bio **bio, struct page *page)
867{
868 enum temp_type temp;
869 bool found = false;
870 struct bio *target = bio ? *bio : NULL;
871
872 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
873 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
874 struct list_head *head = &io->bio_list;
875 struct bio_entry *be;
876
877 if (list_empty(head))
878 continue;
879
e4544b63 880 f2fs_down_read(&io->bio_list_lock);
0b20fcec
CY
881 list_for_each_entry(be, head, list) {
882 if (target)
883 found = (target == be->bio);
884 else
885 found = __has_merged_page(be->bio, NULL,
886 page, 0);
887 if (found)
888 break;
889 }
e4544b63 890 f2fs_up_read(&io->bio_list_lock);
0b20fcec
CY
891
892 if (!found)
893 continue;
894
895 found = false;
896
e4544b63 897 f2fs_down_write(&io->bio_list_lock);
0b20fcec
CY
898 list_for_each_entry(be, head, list) {
899 if (target)
900 found = (target == be->bio);
901 else
902 found = __has_merged_page(be->bio, NULL,
903 page, 0);
904 if (found) {
905 target = be->bio;
906 del_bio_entry(be);
907 break;
908 }
909 }
e4544b63 910 f2fs_up_write(&io->bio_list_lock);
0b20fcec
CY
911 }
912
913 if (found)
914 __submit_bio(sbi, target, DATA);
915 if (bio && *bio) {
916 bio_put(*bio);
917 *bio = NULL;
918 }
919}
920
8648de2c
CY
921int f2fs_merge_page_bio(struct f2fs_io_info *fio)
922{
923 struct bio *bio = *fio->bio;
924 struct page *page = fio->encrypted_page ?
925 fio->encrypted_page : fio->page;
926
927 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
95fa90c9
CY
928 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) {
929 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
10f966bb 930 return -EFSCORRUPTED;
95fa90c9 931 }
8648de2c
CY
932
933 trace_f2fs_submit_page_bio(page, fio);
8648de2c 934
8896cbdf 935 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
0b20fcec
CY
936 fio->new_blkaddr))
937 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
8648de2c
CY
938alloc_new:
939 if (!bio) {
a8affc03 940 bio = __bio_alloc(fio, BIO_MAX_VECS);
27aacd28
ST
941 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
942 fio->page->index, fio, GFP_NOIO);
8648de2c 943
0b20fcec
CY
944 add_bio_entry(fio->sbi, bio, page, fio->temp);
945 } else {
27aacd28 946 if (add_ipu_page(fio, &bio, page))
0b20fcec 947 goto alloc_new;
8648de2c
CY
948 }
949
950 if (fio->io_wbc)
9637d517 951 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
8648de2c
CY
952
953 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
954
955 *fio->last_block = fio->new_blkaddr;
956 *fio->bio = bio;
957
958 return 0;
959}
960
fe16efe6 961void f2fs_submit_page_write(struct f2fs_io_info *fio)
93dfe2ac 962{
05ca3632 963 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 964 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
a912b54d 965 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
4375a336 966 struct page *bio_page;
93dfe2ac 967
b9109b0e 968 f2fs_bug_on(sbi, is_read_io(fio->op));
93dfe2ac 969
e4544b63 970 f2fs_down_write(&io->io_rwsem);
fb830fc5
CY
971next:
972 if (fio->in_list) {
973 spin_lock(&io->io_lock);
974 if (list_empty(&io->io_list)) {
975 spin_unlock(&io->io_lock);
fe16efe6 976 goto out;
fb830fc5
CY
977 }
978 fio = list_first_entry(&io->io_list,
979 struct f2fs_io_info, list);
980 list_del(&fio->list);
981 spin_unlock(&io->io_lock);
982 }
93dfe2ac 983
93770ab7 984 verify_fio_blkaddr(fio);
93dfe2ac 985
4c8ff709
CY
986 if (fio->encrypted_page)
987 bio_page = fio->encrypted_page;
988 else if (fio->compressed_page)
989 bio_page = fio->compressed_page;
990 else
991 bio_page = fio->page;
36951b38 992
ebf7c522
TM
993 /* set submitted = true as a return value */
994 fio->submitted = true;
d68f735b 995
b9109b0e 996 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
93dfe2ac 997
27aacd28
ST
998 if (io->bio &&
999 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
1000 fio->new_blkaddr) ||
1001 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
1002 bio_page->index, fio)))
458e6197 1003 __submit_merged_bio(io);
93dfe2ac
JK
1004alloc_new:
1005 if (io->bio == NULL) {
8223ecc4
CY
1006 if (F2FS_IO_ALIGNED(sbi) &&
1007 (fio->type == DATA || fio->type == NODE) &&
0a595eba 1008 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
b9109b0e 1009 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
fe16efe6
CY
1010 fio->retry = true;
1011 goto skip;
0a595eba 1012 }
a8affc03 1013 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
27aacd28
ST
1014 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1015 bio_page->index, fio, GFP_NOIO);
458e6197 1016 io->fio = *fio;
93dfe2ac
JK
1017 }
1018
a912b54d 1019 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
458e6197 1020 __submit_merged_bio(io);
93dfe2ac
JK
1021 goto alloc_new;
1022 }
1023
578c6478 1024 if (fio->io_wbc)
34e51a5e 1025 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
578c6478 1026
7a9d7548 1027 io->last_block_in_bio = fio->new_blkaddr;
fb830fc5
CY
1028
1029 trace_f2fs_submit_page_write(fio->page, fio);
fe16efe6 1030skip:
fb830fc5
CY
1031 if (fio->in_list)
1032 goto next;
fe16efe6 1033out:
4354994f 1034 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
00e09c0b 1035 !f2fs_is_checkpoint_ready(sbi))
5ce80586 1036 __submit_merged_bio(io);
e4544b63 1037 f2fs_up_write(&io->io_rwsem);
93dfe2ac
JK
1038}
1039
13ba41e3 1040static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
7649c873 1041 unsigned nr_pages, blk_opf_t op_flag,
7f59b277 1042 pgoff_t first_idx, bool for_write)
13ba41e3
JK
1043{
1044 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
13ba41e3 1045 struct bio *bio;
a4b68176 1046 struct bio_post_read_ctx *ctx = NULL;
6dbb1796 1047 unsigned int post_read_steps = 0;
5189810a
CH
1048 sector_t sector;
1049 struct block_device *bdev = f2fs_target_device(sbi, blkaddr, &sector);
13ba41e3 1050
64bf0eef
CH
1051 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1052 REQ_OP_READ | op_flag,
609be106 1053 for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
6dbb1796 1054 if (!bio)
13ba41e3 1055 return ERR_PTR(-ENOMEM);
5189810a 1056 bio->bi_iter.bi_sector = sector;
27aacd28 1057 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
13ba41e3 1058 bio->bi_end_io = f2fs_read_end_io;
13ba41e3 1059
27aacd28 1060 if (fscrypt_inode_uses_fs_layer_crypto(inode))
7f59b277 1061 post_read_steps |= STEP_DECRYPT;
95ae251f 1062
7f59b277
EB
1063 if (f2fs_need_verity(inode, first_idx))
1064 post_read_steps |= STEP_VERITY;
1065
1066 /*
1067 * STEP_DECOMPRESS is handled specially, since a compressed file might
1068 * contain both compressed and uncompressed clusters. We'll allocate a
1069 * bio_post_read_ctx if the file is compressed, but the caller is
1070 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1071 */
1072
1073 if (post_read_steps || f2fs_compressed_file(inode)) {
e8ce5749 1074 /* Due to the mempool, this never fails. */
6dbb1796 1075 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
6dbb1796 1076 ctx->bio = bio;
4c8ff709 1077 ctx->sbi = sbi;
6dbb1796 1078 ctx->enabled_steps = post_read_steps;
4931e0c9 1079 ctx->fs_blkaddr = blkaddr;
98dc08ba 1080 ctx->decompression_attempted = false;
6dbb1796 1081 bio->bi_private = ctx;
6dbb1796 1082 }
a4b68176 1083 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
6dbb1796 1084
13ba41e3
JK
1085 return bio;
1086}
1087
1088/* This can handle encryption stuffs */
1089static int f2fs_submit_page_read(struct inode *inode, struct page *page,
7649c873
BVA
1090 block_t blkaddr, blk_opf_t op_flags,
1091 bool for_write)
13ba41e3 1092{
93770ab7
CY
1093 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1094 struct bio *bio;
13ba41e3 1095
b7973091 1096 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
7f59b277 1097 page->index, for_write);
13ba41e3
JK
1098 if (IS_ERR(bio))
1099 return PTR_ERR(bio);
1100
0ded69f6
JK
1101 /* wait for GCed page writeback via META_MAPPING */
1102 f2fs_wait_on_block_writeback(inode, blkaddr);
1103
13ba41e3
JK
1104 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1105 bio_put(bio);
1106 return -EFAULT;
1107 }
93770ab7 1108 inc_page_count(sbi, F2FS_RD_DATA);
34a23525 1109 f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
93770ab7 1110 __submit_bio(sbi, bio, DATA);
13ba41e3
JK
1111 return 0;
1112}
1113
46008c6d
CY
1114static void __set_data_blkaddr(struct dnode_of_data *dn)
1115{
1116 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1117 __le32 *addr_array;
7a2af766
CY
1118 int base = 0;
1119
1120 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1121 base = get_extra_isize(dn->inode);
46008c6d
CY
1122
1123 /* Get physical address of data block */
1124 addr_array = blkaddr_in_node(rn);
7a2af766 1125 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
46008c6d
CY
1126}
1127
0a8165d7 1128/*
eb47b800
JK
1129 * Lock ordering for the change of data block address:
1130 * ->data_page
1131 * ->node_page
1132 * update block addresses in the node page
1133 */
4d57b86d 1134void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
eb47b800 1135{
bae0ee7a 1136 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
46008c6d
CY
1137 __set_data_blkaddr(dn);
1138 if (set_page_dirty(dn->node_page))
12719ae1 1139 dn->node_changed = true;
eb47b800
JK
1140}
1141
f28b3434
CY
1142void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1143{
1144 dn->data_blkaddr = blkaddr;
4d57b86d 1145 f2fs_set_data_blkaddr(dn);
e7547dac 1146 f2fs_update_read_extent_cache(dn);
f28b3434
CY
1147}
1148
46008c6d 1149/* dn->ofs_in_node will be returned with up-to-date last block pointer */
4d57b86d 1150int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
eb47b800 1151{
4081363f 1152 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
0abd675e 1153 int err;
eb47b800 1154
46008c6d
CY
1155 if (!count)
1156 return 0;
1157
91942321 1158 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
eb47b800 1159 return -EPERM;
0abd675e
CY
1160 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1161 return err;
eb47b800 1162
46008c6d
CY
1163 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1164 dn->ofs_in_node, count);
1165
bae0ee7a 1166 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
46008c6d
CY
1167
1168 for (; count > 0; dn->ofs_in_node++) {
a2ced1ce 1169 block_t blkaddr = f2fs_data_blkaddr(dn);
5f029c04 1170
46008c6d
CY
1171 if (blkaddr == NULL_ADDR) {
1172 dn->data_blkaddr = NEW_ADDR;
1173 __set_data_blkaddr(dn);
1174 count--;
1175 }
1176 }
1177
1178 if (set_page_dirty(dn->node_page))
1179 dn->node_changed = true;
eb47b800
JK
1180 return 0;
1181}
1182
46008c6d 1183/* Should keep dn->ofs_in_node unchanged */
4d57b86d 1184int f2fs_reserve_new_block(struct dnode_of_data *dn)
46008c6d
CY
1185{
1186 unsigned int ofs_in_node = dn->ofs_in_node;
1187 int ret;
1188
4d57b86d 1189 ret = f2fs_reserve_new_blocks(dn, 1);
46008c6d
CY
1190 dn->ofs_in_node = ofs_in_node;
1191 return ret;
1192}
1193
b600965c
HL
1194int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1195{
1196 bool need_put = dn->inode_page ? false : true;
1197 int err;
1198
4d57b86d 1199 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
b600965c
HL
1200 if (err)
1201 return err;
a8865372 1202
b600965c 1203 if (dn->data_blkaddr == NULL_ADDR)
4d57b86d 1204 err = f2fs_reserve_new_block(dn);
a8865372 1205 if (err || need_put)
b600965c
HL
1206 f2fs_put_dnode(dn);
1207 return err;
1208}
1209
759af1c9 1210int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b800 1211{
94afd6d6 1212 struct extent_info ei = {0, };
759af1c9 1213 struct inode *inode = dn->inode;
028a41e8 1214
e7547dac 1215 if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
759af1c9
FL
1216 dn->data_blkaddr = ei.blk + index - ei.fofs;
1217 return 0;
429511cd 1218 }
028a41e8 1219
759af1c9 1220 return f2fs_reserve_block(dn, index);
eb47b800
JK
1221}
1222
4d57b86d 1223struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
59237a21
CY
1224 blk_opf_t op_flags, bool for_write,
1225 pgoff_t *next_pgofs)
eb47b800 1226{
eb47b800
JK
1227 struct address_space *mapping = inode->i_mapping;
1228 struct dnode_of_data dn;
1229 struct page *page;
94afd6d6 1230 struct extent_info ei = {0, };
eb47b800 1231 int err;
4375a336 1232
a56c7c6f 1233 page = f2fs_grab_cache_page(mapping, index, for_write);
650495de
JK
1234 if (!page)
1235 return ERR_PTR(-ENOMEM);
1236
e7547dac 1237 if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
cb3bc9ee 1238 dn.data_blkaddr = ei.blk + index - ei.fofs;
93770ab7
CY
1239 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1240 DATA_GENERIC_ENHANCE_READ)) {
10f966bb 1241 err = -EFSCORRUPTED;
95fa90c9
CY
1242 f2fs_handle_error(F2FS_I_SB(inode),
1243 ERROR_INVALID_BLKADDR);
93770ab7
CY
1244 goto put_err;
1245 }
cb3bc9ee
CY
1246 goto got_it;
1247 }
1248
eb47b800 1249 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1250 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
59237a21
CY
1251 if (err) {
1252 if (err == -ENOENT && next_pgofs)
1253 *next_pgofs = f2fs_get_next_page_offset(&dn, index);
86531d6b 1254 goto put_err;
59237a21 1255 }
eb47b800
JK
1256 f2fs_put_dnode(&dn);
1257
6bacf52f 1258 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b 1259 err = -ENOENT;
59237a21
CY
1260 if (next_pgofs)
1261 *next_pgofs = index + 1;
86531d6b 1262 goto put_err;
650495de 1263 }
93770ab7
CY
1264 if (dn.data_blkaddr != NEW_ADDR &&
1265 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1266 dn.data_blkaddr,
1267 DATA_GENERIC_ENHANCE)) {
10f966bb 1268 err = -EFSCORRUPTED;
95fa90c9
CY
1269 f2fs_handle_error(F2FS_I_SB(inode),
1270 ERROR_INVALID_BLKADDR);
93770ab7
CY
1271 goto put_err;
1272 }
cb3bc9ee 1273got_it:
43f3eae1
JK
1274 if (PageUptodate(page)) {
1275 unlock_page(page);
eb47b800 1276 return page;
43f3eae1 1277 }
eb47b800 1278
d59ff4df
JK
1279 /*
1280 * A new dentry page is allocated but not able to be written, since its
1281 * new inode page couldn't be allocated due to -ENOSPC.
1282 * In such the case, its blkaddr can be remained as NEW_ADDR.
4d57b86d
CY
1283 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1284 * f2fs_init_inode_metadata.
d59ff4df
JK
1285 */
1286 if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf 1287 zero_user_segment(page, 0, PAGE_SIZE);
237c0790
JK
1288 if (!PageUptodate(page))
1289 SetPageUptodate(page);
43f3eae1 1290 unlock_page(page);
d59ff4df
JK
1291 return page;
1292 }
eb47b800 1293
b7973091
JY
1294 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1295 op_flags, for_write);
393ff91f 1296 if (err)
86531d6b 1297 goto put_err;
43f3eae1 1298 return page;
86531d6b
JK
1299
1300put_err:
1301 f2fs_put_page(page, 1);
1302 return ERR_PTR(err);
43f3eae1
JK
1303}
1304
59237a21
CY
1305struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1306 pgoff_t *next_pgofs)
43f3eae1
JK
1307{
1308 struct address_space *mapping = inode->i_mapping;
1309 struct page *page;
1310
1311 page = find_get_page(mapping, index);
1312 if (page && PageUptodate(page))
1313 return page;
1314 f2fs_put_page(page, 0);
1315
59237a21 1316 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
43f3eae1
JK
1317 if (IS_ERR(page))
1318 return page;
1319
1320 if (PageUptodate(page))
1321 return page;
1322
1323 wait_on_page_locked(page);
1324 if (unlikely(!PageUptodate(page))) {
1325 f2fs_put_page(page, 0);
1326 return ERR_PTR(-EIO);
1327 }
1328 return page;
1329}
1330
1331/*
1332 * If it tries to access a hole, return an error.
1333 * Because, the callers, functions in dir.c and GC, should be able to know
1334 * whether this page exists or not.
1335 */
4d57b86d 1336struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
a56c7c6f 1337 bool for_write)
43f3eae1
JK
1338{
1339 struct address_space *mapping = inode->i_mapping;
1340 struct page *page;
1341repeat:
59237a21 1342 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
43f3eae1
JK
1343 if (IS_ERR(page))
1344 return page;
393ff91f 1345
43f3eae1 1346 /* wait for read completion */
393ff91f 1347 lock_page(page);
6bacf52f 1348 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1349 f2fs_put_page(page, 1);
1350 goto repeat;
eb47b800 1351 }
1563ac75
CY
1352 if (unlikely(!PageUptodate(page))) {
1353 f2fs_put_page(page, 1);
1354 return ERR_PTR(-EIO);
1355 }
eb47b800
JK
1356 return page;
1357}
1358
0a8165d7 1359/*
eb47b800
JK
1360 * Caller ensures that this data page is never allocated.
1361 * A new zero-filled data page is allocated in the page cache.
39936837 1362 *
4f4124d0
CY
1363 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1364 * f2fs_unlock_op().
470f00e9
CY
1365 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1366 * ipage should be released by this function.
eb47b800 1367 */
4d57b86d 1368struct page *f2fs_get_new_data_page(struct inode *inode,
a8865372 1369 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 1370{
eb47b800
JK
1371 struct address_space *mapping = inode->i_mapping;
1372 struct page *page;
1373 struct dnode_of_data dn;
1374 int err;
7612118a 1375
a56c7c6f 1376 page = f2fs_grab_cache_page(mapping, index, true);
470f00e9
CY
1377 if (!page) {
1378 /*
1379 * before exiting, we should make sure ipage will be released
1380 * if any error occur.
1381 */
1382 f2fs_put_page(ipage, 1);
01f28610 1383 return ERR_PTR(-ENOMEM);
470f00e9 1384 }
eb47b800 1385
a8865372 1386 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 1387 err = f2fs_reserve_block(&dn, index);
01f28610
JK
1388 if (err) {
1389 f2fs_put_page(page, 1);
eb47b800 1390 return ERR_PTR(err);
a8865372 1391 }
01f28610
JK
1392 if (!ipage)
1393 f2fs_put_dnode(&dn);
eb47b800
JK
1394
1395 if (PageUptodate(page))
01f28610 1396 goto got_it;
eb47b800
JK
1397
1398 if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf 1399 zero_user_segment(page, 0, PAGE_SIZE);
237c0790
JK
1400 if (!PageUptodate(page))
1401 SetPageUptodate(page);
eb47b800 1402 } else {
4375a336 1403 f2fs_put_page(page, 1);
a8865372 1404
7612118a
JK
1405 /* if ipage exists, blkaddr should be NEW_ADDR */
1406 f2fs_bug_on(F2FS_I_SB(inode), ipage);
4d57b86d 1407 page = f2fs_get_lock_data_page(inode, index, true);
4375a336 1408 if (IS_ERR(page))
7612118a 1409 return page;
eb47b800 1410 }
01f28610 1411got_it:
9edcdabf 1412 if (new_i_size && i_size_read(inode) <
ee6d182f 1413 ((loff_t)(index + 1) << PAGE_SHIFT))
fc9581c8 1414 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
eb47b800
JK
1415 return page;
1416}
1417
d5097be5 1418static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
bfad7c2d 1419{
4081363f 1420 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
bfad7c2d 1421 struct f2fs_summary sum;
bfad7c2d 1422 struct node_info ni;
6aa58d8a 1423 block_t old_blkaddr;
46008c6d 1424 blkcnt_t count = 1;
0abd675e 1425 int err;
bfad7c2d 1426
91942321 1427 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
bfad7c2d 1428 return -EPERM;
df6136ef 1429
a9419b63 1430 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
7735730d
CY
1431 if (err)
1432 return err;
1433
a2ced1ce 1434 dn->data_blkaddr = f2fs_data_blkaddr(dn);
f847c699 1435 if (dn->data_blkaddr != NULL_ADDR)
df6136ef
CY
1436 goto alloc;
1437
0abd675e
CY
1438 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1439 return err;
bfad7c2d 1440
df6136ef 1441alloc:
bfad7c2d 1442 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
6aa58d8a
CY
1443 old_blkaddr = dn->data_blkaddr;
1444 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
093749e2 1445 &sum, seg_type, NULL);
6ce19aff 1446 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
6aa58d8a
CY
1447 invalidate_mapping_pages(META_MAPPING(sbi),
1448 old_blkaddr, old_blkaddr);
6ce19aff
CY
1449 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1450 }
86f35dc3 1451 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
bfad7c2d
JK
1452 return 0;
1453}
1454
0ef81833 1455void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
59c9081b
YH
1456{
1457 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1458 if (lock)
e4544b63 1459 f2fs_down_read(&sbi->node_change);
59c9081b 1460 else
e4544b63 1461 f2fs_up_read(&sbi->node_change);
59c9081b
YH
1462 } else {
1463 if (lock)
1464 f2fs_lock_op(sbi);
1465 else
1466 f2fs_unlock_op(sbi);
1467 }
1468}
1469
0a8165d7 1470/*
7a88ddb5
CY
1471 * f2fs_map_blocks() tries to find or build mapping relationship which
1472 * maps continuous logical blocks to physical blocks, and return such
1473 * info via f2fs_map_blocks structure.
eb47b800 1474 */
d323d005 1475int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc 1476 int create, int flag)
eb47b800 1477{
003a3e1d 1478 unsigned int maxblocks = map->m_len;
eb47b800 1479 struct dnode_of_data dn;
f9811703 1480 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
f9d6d059 1481 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
46008c6d 1482 pgoff_t pgofs, end_offset, end;
bfad7c2d 1483 int err = 0, ofs = 1;
46008c6d
CY
1484 unsigned int ofs_in_node, last_ofs_in_node;
1485 blkcnt_t prealloc;
94afd6d6 1486 struct extent_info ei = {0, };
7df3a431 1487 block_t blkaddr;
c4020b2d 1488 unsigned int start_pgofs;
71f2c820 1489 int bidx = 0;
eb47b800 1490
dfd02e4d
CY
1491 if (!maxblocks)
1492 return 0;
1493
71f2c820
CY
1494 map->m_bdev = inode->i_sb->s_bdev;
1495 map->m_multidev_dio =
1496 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1497
003a3e1d
JK
1498 map->m_len = 0;
1499 map->m_flags = 0;
1500
1501 /* it only supports block size == page size */
1502 pgofs = (pgoff_t)map->m_lblk;
46008c6d 1503 end = pgofs + maxblocks;
eb47b800 1504
e7547dac 1505 if (!create && f2fs_lookup_read_extent_cache(inode, pgofs, &ei)) {
b0332a0f 1506 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
f4f0b677
JZ
1507 map->m_may_create)
1508 goto next_dnode;
1509
003a3e1d
JK
1510 map->m_pblk = ei.blk + pgofs - ei.fofs;
1511 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1512 map->m_flags = F2FS_MAP_MAPPED;
c4020b2d
CY
1513 if (map->m_next_extent)
1514 *map->m_next_extent = pgofs + map->m_len;
1e78e8bd
ST
1515
1516 /* for hardware encryption, but to avoid potential issue in future */
1517 if (flag == F2FS_GET_BLOCK_DIO)
1518 f2fs_wait_on_block_writeback_range(inode,
1519 map->m_pblk, map->m_len);
71f2c820
CY
1520
1521 if (map->m_multidev_dio) {
1522 block_t blk_addr = map->m_pblk;
1523
1524 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1525
1526 map->m_bdev = FDEV(bidx).bdev;
1527 map->m_pblk -= FDEV(bidx).start_blk;
1528 map->m_len = min(map->m_len,
1529 FDEV(bidx).end_blk + 1 - map->m_pblk);
1530
1531 if (map->m_may_create)
1532 f2fs_update_device_state(sbi, inode->i_ino,
1533 blk_addr, map->m_len);
1534 }
bfad7c2d 1535 goto out;
a2e7d1bf 1536 }
bfad7c2d 1537
4fe71e88 1538next_dnode:
f9d6d059 1539 if (map->m_may_create)
0ef81833 1540 f2fs_do_map_lock(sbi, flag, true);
eb47b800
JK
1541
1542 /* When reading holes, we need its node page */
1543 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1544 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1ec79083 1545 if (err) {
43473f96
CY
1546 if (flag == F2FS_GET_BLOCK_BMAP)
1547 map->m_pblk = 0;
adf9ea89 1548
da85985c 1549 if (err == -ENOENT) {
adf9ea89
CY
1550 /*
1551 * There is one exceptional case that read_node_page()
1552 * may return -ENOENT due to filesystem has been
1553 * shutdown or cp_error, so force to convert error
1554 * number to EIO for such case.
1555 */
1556 if (map->m_may_create &&
1557 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1558 f2fs_cp_error(sbi))) {
1559 err = -EIO;
1560 goto unlock_out;
1561 }
1562
bfad7c2d 1563 err = 0;
da85985c
CY
1564 if (map->m_next_pgofs)
1565 *map->m_next_pgofs =
4d57b86d 1566 f2fs_get_next_page_offset(&dn, pgofs);
c4020b2d
CY
1567 if (map->m_next_extent)
1568 *map->m_next_extent =
4d57b86d 1569 f2fs_get_next_page_offset(&dn, pgofs);
da85985c 1570 }
bfad7c2d 1571 goto unlock_out;
848753aa 1572 }
973163fc 1573
c4020b2d 1574 start_pgofs = pgofs;
46008c6d 1575 prealloc = 0;
230436b3 1576 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
81ca7350 1577 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
4fe71e88
CY
1578
1579next_block:
a2ced1ce 1580 blkaddr = f2fs_data_blkaddr(&dn);
4fe71e88 1581
c9b60788 1582 if (__is_valid_data_blkaddr(blkaddr) &&
93770ab7 1583 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
10f966bb 1584 err = -EFSCORRUPTED;
95fa90c9 1585 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
c9b60788
CY
1586 goto sync_out;
1587 }
1588
93770ab7 1589 if (__is_valid_data_blkaddr(blkaddr)) {
f847c699 1590 /* use out-place-update for driect IO under LFS mode */
b0332a0f 1591 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
f9d6d059 1592 map->m_may_create) {
f847c699 1593 err = __allocate_data_block(&dn, map->m_seg_type);
05e36006
CY
1594 if (err)
1595 goto sync_out;
1596 blkaddr = dn.data_blkaddr;
1597 set_inode_flag(inode, FI_APPEND_WRITE);
f847c699
CY
1598 }
1599 } else {
973163fc 1600 if (create) {
f9811703
CY
1601 if (unlikely(f2fs_cp_error(sbi))) {
1602 err = -EIO;
4fe71e88 1603 goto sync_out;
f9811703 1604 }
24b84912 1605 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
46008c6d
CY
1606 if (blkaddr == NULL_ADDR) {
1607 prealloc++;
1608 last_ofs_in_node = dn.ofs_in_node;
1609 }
24b84912 1610 } else {
0a4daae5
JK
1611 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1612 flag != F2FS_GET_BLOCK_DIO);
d5097be5
HL
1613 err = __allocate_data_block(&dn,
1614 map->m_seg_type);
d4dd19ec
JK
1615 if (!err) {
1616 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1617 file_need_truncate(inode);
91942321 1618 set_inode_flag(inode, FI_APPEND_WRITE);
d4dd19ec 1619 }
24b84912 1620 }
973163fc 1621 if (err)
4fe71e88 1622 goto sync_out;
3f2be043 1623 map->m_flags |= F2FS_MAP_NEW;
4fe71e88 1624 blkaddr = dn.data_blkaddr;
973163fc 1625 } else {
bbe1da7e
CY
1626 if (f2fs_compressed_file(inode) &&
1627 f2fs_sanity_check_cluster(&dn) &&
1628 (flag != F2FS_GET_BLOCK_FIEMAP ||
1629 IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
1630 err = -EFSCORRUPTED;
95fa90c9
CY
1631 f2fs_handle_error(sbi,
1632 ERROR_CORRUPTED_CLUSTER);
bbe1da7e
CY
1633 goto sync_out;
1634 }
43473f96
CY
1635 if (flag == F2FS_GET_BLOCK_BMAP) {
1636 map->m_pblk = 0;
1637 goto sync_out;
1638 }
c4020b2d
CY
1639 if (flag == F2FS_GET_BLOCK_PRECACHE)
1640 goto sync_out;
da85985c
CY
1641 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1642 blkaddr == NULL_ADDR) {
1643 if (map->m_next_pgofs)
1644 *map->m_next_pgofs = pgofs + 1;
4c2ac6a8 1645 goto sync_out;
da85985c 1646 }
f3d98e74
CY
1647 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1648 /* for defragment case */
1649 if (map->m_next_pgofs)
1650 *map->m_next_pgofs = pgofs + 1;
4fe71e88 1651 goto sync_out;
f3d98e74 1652 }
e2b4e2bc 1653 }
e2b4e2bc 1654 }
eb47b800 1655
46008c6d
CY
1656 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1657 goto skip;
1658
71f2c820
CY
1659 if (map->m_multidev_dio)
1660 bidx = f2fs_target_device_index(sbi, blkaddr);
1661
4fe71e88
CY
1662 if (map->m_len == 0) {
1663 /* preallocated unwritten block should be mapped for fiemap. */
1664 if (blkaddr == NEW_ADDR)
1665 map->m_flags |= F2FS_MAP_UNWRITTEN;
1666 map->m_flags |= F2FS_MAP_MAPPED;
1667
1668 map->m_pblk = blkaddr;
1669 map->m_len = 1;
71f2c820
CY
1670
1671 if (map->m_multidev_dio)
1672 map->m_bdev = FDEV(bidx).bdev;
4fe71e88
CY
1673 } else if ((map->m_pblk != NEW_ADDR &&
1674 blkaddr == (map->m_pblk + ofs)) ||
b439b103 1675 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
46008c6d 1676 flag == F2FS_GET_BLOCK_PRE_DIO) {
71f2c820
CY
1677 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1678 goto sync_out;
4fe71e88
CY
1679 ofs++;
1680 map->m_len++;
1681 } else {
1682 goto sync_out;
1683 }
bfad7c2d 1684
46008c6d 1685skip:
bfad7c2d
JK
1686 dn.ofs_in_node++;
1687 pgofs++;
1688
46008c6d
CY
1689 /* preallocate blocks in batch for one dnode page */
1690 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1691 (pgofs == end || dn.ofs_in_node == end_offset)) {
7df3a431 1692
46008c6d 1693 dn.ofs_in_node = ofs_in_node;
4d57b86d 1694 err = f2fs_reserve_new_blocks(&dn, prealloc);
46008c6d
CY
1695 if (err)
1696 goto sync_out;
bfad7c2d 1697
46008c6d
CY
1698 map->m_len += dn.ofs_in_node - ofs_in_node;
1699 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1700 err = -ENOSPC;
1701 goto sync_out;
3104af35 1702 }
46008c6d
CY
1703 dn.ofs_in_node = end_offset;
1704 }
1705
1706 if (pgofs >= end)
1707 goto sync_out;
1708 else if (dn.ofs_in_node < end_offset)
1709 goto next_block;
1710
c4020b2d
CY
1711 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1712 if (map->m_flags & F2FS_MAP_MAPPED) {
1713 unsigned int ofs = start_pgofs - map->m_lblk;
1714
e7547dac 1715 f2fs_update_read_extent_cache_range(&dn,
c4020b2d
CY
1716 start_pgofs, map->m_pblk + ofs,
1717 map->m_len - ofs);
1718 }
1719 }
1720
46008c6d
CY
1721 f2fs_put_dnode(&dn);
1722
f9d6d059 1723 if (map->m_may_create) {
0ef81833 1724 f2fs_do_map_lock(sbi, flag, false);
6f2d8ed6 1725 f2fs_balance_fs(sbi, dn.node_changed);
eb47b800 1726 }
46008c6d 1727 goto next_dnode;
7df3a431 1728
bfad7c2d 1729sync_out:
1e78e8bd 1730
71f2c820
CY
1731 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1732 /*
1733 * for hardware encryption, but to avoid potential issue
1734 * in future
1735 */
1e78e8bd
ST
1736 f2fs_wait_on_block_writeback_range(inode,
1737 map->m_pblk, map->m_len);
1738
71f2c820
CY
1739 if (map->m_multidev_dio) {
1740 block_t blk_addr = map->m_pblk;
1741
1742 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1743
1744 map->m_bdev = FDEV(bidx).bdev;
1745 map->m_pblk -= FDEV(bidx).start_blk;
1746
1747 if (map->m_may_create)
1748 f2fs_update_device_state(sbi, inode->i_ino,
1749 blk_addr, map->m_len);
1750
1751 f2fs_bug_on(sbi, blk_addr + map->m_len >
1752 FDEV(bidx).end_blk + 1);
1753 }
1754 }
1755
c4020b2d
CY
1756 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1757 if (map->m_flags & F2FS_MAP_MAPPED) {
1758 unsigned int ofs = start_pgofs - map->m_lblk;
1759
e7547dac 1760 f2fs_update_read_extent_cache_range(&dn,
c4020b2d
CY
1761 start_pgofs, map->m_pblk + ofs,
1762 map->m_len - ofs);
1763 }
1764 if (map->m_next_extent)
1765 *map->m_next_extent = pgofs + 1;
1766 }
eb47b800 1767 f2fs_put_dnode(&dn);
bfad7c2d 1768unlock_out:
f9d6d059 1769 if (map->m_may_create) {
0ef81833 1770 f2fs_do_map_lock(sbi, flag, false);
6f2d8ed6 1771 f2fs_balance_fs(sbi, dn.node_changed);
2a340760 1772 }
bfad7c2d 1773out:
71f2c820 1774 trace_f2fs_map_blocks(inode, map, create, flag, err);
bfad7c2d 1775 return err;
eb47b800
JK
1776}
1777
b91050a8
HL
1778bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1779{
1780 struct f2fs_map_blocks map;
1781 block_t last_lblk;
1782 int err;
1783
1784 if (pos + len > i_size_read(inode))
1785 return false;
1786
1787 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1788 map.m_next_pgofs = NULL;
1789 map.m_next_extent = NULL;
1790 map.m_seg_type = NO_CHECK_TYPE;
f4f0b677 1791 map.m_may_create = false;
b91050a8
HL
1792 last_lblk = F2FS_BLK_ALIGN(pos + len);
1793
1794 while (map.m_lblk < last_lblk) {
1795 map.m_len = last_lblk - map.m_lblk;
1796 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1797 if (err || map.m_len == 0)
1798 return false;
1799 map.m_lblk += map.m_len;
1800 }
1801 return true;
1802}
1803
43b9d4b4
JK
1804static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1805{
1806 return (bytes >> inode->i_blkbits);
1807}
1808
1809static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1810{
1811 return (blks << inode->i_blkbits);
1812}
1813
442a9dbd
CY
1814static int f2fs_xattr_fiemap(struct inode *inode,
1815 struct fiemap_extent_info *fieinfo)
1816{
1817 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1818 struct page *page;
1819 struct node_info ni;
1820 __u64 phys = 0, len;
1821 __u32 flags;
1822 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1823 int err = 0;
1824
1825 if (f2fs_has_inline_xattr(inode)) {
1826 int offset;
1827
1828 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1829 inode->i_ino, false);
1830 if (!page)
1831 return -ENOMEM;
1832
a9419b63 1833 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
7735730d
CY
1834 if (err) {
1835 f2fs_put_page(page, 1);
1836 return err;
1837 }
442a9dbd 1838
6cbfcab5 1839 phys = blks_to_bytes(inode, ni.blk_addr);
442a9dbd
CY
1840 offset = offsetof(struct f2fs_inode, i_addr) +
1841 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
b323fd28 1842 get_inline_xattr_addrs(inode));
442a9dbd
CY
1843
1844 phys += offset;
1845 len = inline_xattr_size(inode);
1846
1847 f2fs_put_page(page, 1);
1848
1849 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1850
1851 if (!xnid)
1852 flags |= FIEMAP_EXTENT_LAST;
1853
1854 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
dd5a09bd 1855 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
ca7efd71 1856 if (err)
442a9dbd
CY
1857 return err;
1858 }
1859
1860 if (xnid) {
1861 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1862 if (!page)
1863 return -ENOMEM;
1864
a9419b63 1865 err = f2fs_get_node_info(sbi, xnid, &ni, false);
7735730d
CY
1866 if (err) {
1867 f2fs_put_page(page, 1);
1868 return err;
1869 }
442a9dbd 1870
6cbfcab5 1871 phys = blks_to_bytes(inode, ni.blk_addr);
442a9dbd
CY
1872 len = inode->i_sb->s_blocksize;
1873
1874 f2fs_put_page(page, 1);
1875
1876 flags = FIEMAP_EXTENT_LAST;
1877 }
1878
dd5a09bd 1879 if (phys) {
442a9dbd 1880 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
dd5a09bd
CY
1881 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1882 }
442a9dbd
CY
1883
1884 return (err < 0 ? err : 0);
1885}
1886
bf38fbad
CY
1887static loff_t max_inode_blocks(struct inode *inode)
1888{
1889 loff_t result = ADDRS_PER_INODE(inode);
1890 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1891
1892 /* two direct node blocks */
1893 result += (leaf_count * 2);
1894
1895 /* two indirect node blocks */
1896 leaf_count *= NIDS_PER_BLOCK;
1897 result += (leaf_count * 2);
1898
1899 /* one double indirect node block */
1900 leaf_count *= NIDS_PER_BLOCK;
1901 result += leaf_count;
1902
1903 return result;
1904}
1905
9ab70134
JK
1906int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1907 u64 start, u64 len)
1908{
b876f4c9 1909 struct f2fs_map_blocks map;
7f63eb77 1910 sector_t start_blk, last_blk;
da85985c 1911 pgoff_t next_pgofs;
7f63eb77
JK
1912 u64 logical = 0, phys = 0, size = 0;
1913 u32 flags = 0;
7f63eb77 1914 int ret = 0;
093f0bac 1915 bool compr_cluster = false, compr_appended;
bf38fbad 1916 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
093f0bac 1917 unsigned int count_in_cluster = 0;
0bb2045c 1918 loff_t maxbytes;
7f63eb77 1919
c4020b2d
CY
1920 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1921 ret = f2fs_precache_extents(inode);
1922 if (ret)
1923 return ret;
1924 }
1925
45dd052e 1926 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
7f63eb77
JK
1927 if (ret)
1928 return ret;
1929
f1b43d4c
CY
1930 inode_lock(inode);
1931
0bb2045c
CX
1932 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1933 if (start > maxbytes) {
1934 ret = -EFBIG;
1935 goto out;
1936 }
1937
1938 if (len > maxbytes || (maxbytes - len) < start)
1939 len = maxbytes - start;
1940
442a9dbd
CY
1941 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1942 ret = f2fs_xattr_fiemap(inode, fieinfo);
1943 goto out;
1944 }
1945
7975f349 1946 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
67f8cf3c
JK
1947 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1948 if (ret != -EAGAIN)
f1b43d4c 1949 goto out;
67f8cf3c
JK
1950 }
1951
6cbfcab5
JK
1952 if (bytes_to_blks(inode, len) == 0)
1953 len = blks_to_bytes(inode, 1);
7f63eb77 1954
6cbfcab5
JK
1955 start_blk = bytes_to_blks(inode, start);
1956 last_blk = bytes_to_blks(inode, start + len - 1);
9a950d52 1957
7f63eb77 1958next:
b876f4c9
JK
1959 memset(&map, 0, sizeof(map));
1960 map.m_lblk = start_blk;
1961 map.m_len = bytes_to_blks(inode, len);
1962 map.m_next_pgofs = &next_pgofs;
1963 map.m_seg_type = NO_CHECK_TYPE;
7f63eb77 1964
093f0bac
DJ
1965 if (compr_cluster) {
1966 map.m_lblk += 1;
1967 map.m_len = cluster_size - count_in_cluster;
1968 }
bf38fbad 1969
b876f4c9 1970 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
7f63eb77
JK
1971 if (ret)
1972 goto out;
1973
1974 /* HOLE */
093f0bac 1975 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
da85985c 1976 start_blk = next_pgofs;
58736fa6 1977
6cbfcab5 1978 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
bf38fbad 1979 max_inode_blocks(inode)))
9a950d52 1980 goto prep_next;
58736fa6 1981
9a950d52
FL
1982 flags |= FIEMAP_EXTENT_LAST;
1983 }
7f63eb77 1984
093f0bac
DJ
1985 compr_appended = false;
1986 /* In a case of compressed cluster, append this to the last extent */
1987 if (compr_cluster && ((map.m_flags & F2FS_MAP_UNWRITTEN) ||
1988 !(map.m_flags & F2FS_MAP_FLAGS))) {
1989 compr_appended = true;
1990 goto skip_fill;
1991 }
1992
da5af127 1993 if (size) {
0953fe86 1994 flags |= FIEMAP_EXTENT_MERGED;
62230e0d 1995 if (IS_ENCRYPTED(inode))
da5af127
CY
1996 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1997
9a950d52
FL
1998 ret = fiemap_fill_next_extent(fieinfo, logical,
1999 phys, size, flags);
dd5a09bd 2000 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
bf38fbad
CY
2001 if (ret)
2002 goto out;
2003 size = 0;
da5af127 2004 }
7f63eb77 2005
bf38fbad 2006 if (start_blk > last_blk)
9a950d52 2007 goto out;
7f63eb77 2008
093f0bac 2009skip_fill:
b876f4c9 2010 if (map.m_pblk == COMPRESS_ADDR) {
bf38fbad 2011 compr_cluster = true;
093f0bac
DJ
2012 count_in_cluster = 1;
2013 } else if (compr_appended) {
2014 unsigned int appended_blks = cluster_size -
2015 count_in_cluster + 1;
2016 size += blks_to_bytes(inode, appended_blks);
2017 start_blk += appended_blks;
2018 compr_cluster = false;
2019 } else {
2020 logical = blks_to_bytes(inode, start_blk);
2021 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2022 blks_to_bytes(inode, map.m_pblk) : 0;
2023 size = blks_to_bytes(inode, map.m_len);
2024 flags = 0;
2025
2026 if (compr_cluster) {
2027 flags = FIEMAP_EXTENT_ENCODED;
2028 count_in_cluster += map.m_len;
2029 if (count_in_cluster == cluster_size) {
2030 compr_cluster = false;
2031 size += blks_to_bytes(inode, 1);
2032 }
2033 } else if (map.m_flags & F2FS_MAP_UNWRITTEN) {
2034 flags = FIEMAP_EXTENT_UNWRITTEN;
2035 }
7f63eb77 2036
093f0bac
DJ
2037 start_blk += bytes_to_blks(inode, size);
2038 }
7f63eb77 2039
9a950d52 2040prep_next:
7f63eb77
JK
2041 cond_resched();
2042 if (fatal_signal_pending(current))
2043 ret = -EINTR;
2044 else
2045 goto next;
2046out:
2047 if (ret == 1)
2048 ret = 0;
2049
5955102c 2050 inode_unlock(inode);
7f63eb77 2051 return ret;
9ab70134
JK
2052}
2053
95ae251f
EB
2054static inline loff_t f2fs_readpage_limit(struct inode *inode)
2055{
2056 if (IS_ENABLED(CONFIG_FS_VERITY) &&
2057 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2058 return inode->i_sb->s_maxbytes;
2059
2060 return i_size_read(inode);
2061}
2062
2df0ab04
CY
2063static int f2fs_read_single_page(struct inode *inode, struct page *page,
2064 unsigned nr_pages,
2065 struct f2fs_map_blocks *map,
2066 struct bio **bio_ret,
2067 sector_t *last_block_in_bio,
2068 bool is_readahead)
2069{
2070 struct bio *bio = *bio_ret;
43b9d4b4 2071 const unsigned blocksize = blks_to_bytes(inode, 1);
2df0ab04
CY
2072 sector_t block_in_file;
2073 sector_t last_block;
2074 sector_t last_block_in_file;
2075 sector_t block_nr;
2076 int ret = 0;
2077
4969c06a 2078 block_in_file = (sector_t)page_index(page);
2df0ab04 2079 last_block = block_in_file + nr_pages;
43b9d4b4
JK
2080 last_block_in_file = bytes_to_blks(inode,
2081 f2fs_readpage_limit(inode) + blocksize - 1);
2df0ab04
CY
2082 if (last_block > last_block_in_file)
2083 last_block = last_block_in_file;
2084
2085 /* just zeroing out page which is beyond EOF */
2086 if (block_in_file >= last_block)
2087 goto zero_out;
2088 /*
2089 * Map blocks using the previous result first.
2090 */
2091 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2092 block_in_file > map->m_lblk &&
2093 block_in_file < (map->m_lblk + map->m_len))
2094 goto got_it;
2095
2096 /*
2097 * Then do more f2fs_map_blocks() calls until we are
2098 * done with this page.
2099 */
2100 map->m_lblk = block_in_file;
2101 map->m_len = last_block - block_in_file;
2102
2103 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2104 if (ret)
2105 goto out;
2106got_it:
2107 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2108 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2109 SetPageMappedToDisk(page);
2110
2df0ab04 2111 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
93770ab7 2112 DATA_GENERIC_ENHANCE_READ)) {
10f966bb 2113 ret = -EFSCORRUPTED;
95fa90c9
CY
2114 f2fs_handle_error(F2FS_I_SB(inode),
2115 ERROR_INVALID_BLKADDR);
2df0ab04
CY
2116 goto out;
2117 }
2118 } else {
2119zero_out:
2120 zero_user_segment(page, 0, PAGE_SIZE);
95ae251f
EB
2121 if (f2fs_need_verity(inode, page->index) &&
2122 !fsverity_verify_page(page)) {
2123 ret = -EIO;
2124 goto out;
2125 }
2df0ab04
CY
2126 if (!PageUptodate(page))
2127 SetPageUptodate(page);
2128 unlock_page(page);
2129 goto out;
2130 }
2131
2132 /*
2133 * This page will go to BIO. Do we need to send this
2134 * BIO off first?
2135 */
27aacd28
ST
2136 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2137 *last_block_in_bio, block_nr) ||
2138 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2df0ab04
CY
2139submit_and_realloc:
2140 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2141 bio = NULL;
2142 }
2143 if (bio == NULL) {
2144 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
0683728a 2145 is_readahead ? REQ_RAHEAD : 0, page->index,
7f59b277 2146 false);
2df0ab04
CY
2147 if (IS_ERR(bio)) {
2148 ret = PTR_ERR(bio);
2149 bio = NULL;
2150 goto out;
2151 }
2152 }
2153
2154 /*
2155 * If the page is under writeback, we need to wait for
2156 * its completion to see the correct decrypted data.
2157 */
2158 f2fs_wait_on_block_writeback(inode, block_nr);
2159
2160 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2161 goto submit_and_realloc;
2162
2163 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
34a23525
CY
2164 f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2165 F2FS_BLKSIZE);
2df0ab04
CY
2166 *last_block_in_bio = block_nr;
2167 goto out;
2df0ab04
CY
2168out:
2169 *bio_ret = bio;
2170 return ret;
2171}
2172
4c8ff709
CY
2173#ifdef CONFIG_F2FS_FS_COMPRESSION
2174int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2175 unsigned nr_pages, sector_t *last_block_in_bio,
0683728a 2176 bool is_readahead, bool for_write)
4c8ff709
CY
2177{
2178 struct dnode_of_data dn;
2179 struct inode *inode = cc->inode;
2180 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2181 struct bio *bio = *bio_ret;
2182 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2183 sector_t last_block_in_file;
43b9d4b4 2184 const unsigned blocksize = blks_to_bytes(inode, 1);
4c8ff709 2185 struct decompress_io_ctx *dic = NULL;
fe59109a 2186 struct extent_info ei = {};
94afd6d6 2187 bool from_dnode = true;
4c8ff709
CY
2188 int i;
2189 int ret = 0;
2190
2191 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2192
43b9d4b4
JK
2193 last_block_in_file = bytes_to_blks(inode,
2194 f2fs_readpage_limit(inode) + blocksize - 1);
4c8ff709
CY
2195
2196 /* get rid of pages beyond EOF */
2197 for (i = 0; i < cc->cluster_size; i++) {
2198 struct page *page = cc->rpages[i];
2199
2200 if (!page)
2201 continue;
2202 if ((sector_t)page->index >= last_block_in_file) {
2203 zero_user_segment(page, 0, PAGE_SIZE);
2204 if (!PageUptodate(page))
2205 SetPageUptodate(page);
2206 } else if (!PageUptodate(page)) {
2207 continue;
2208 }
2209 unlock_page(page);
9605f75c
JK
2210 if (for_write)
2211 put_page(page);
4c8ff709
CY
2212 cc->rpages[i] = NULL;
2213 cc->nr_rpages--;
2214 }
2215
2216 /* we are done since all pages are beyond EOF */
2217 if (f2fs_cluster_is_empty(cc))
2218 goto out;
2219
e7547dac 2220 if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
94afd6d6
CY
2221 from_dnode = false;
2222
2223 if (!from_dnode)
2224 goto skip_reading_dnode;
2225
4c8ff709
CY
2226 set_new_dnode(&dn, inode, NULL, NULL, 0);
2227 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2228 if (ret)
2229 goto out;
2230
a86d27dd 2231 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
4c8ff709 2232
94afd6d6 2233skip_reading_dnode:
4c8ff709
CY
2234 for (i = 1; i < cc->cluster_size; i++) {
2235 block_t blkaddr;
2236
94afd6d6
CY
2237 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2238 dn.ofs_in_node + i) :
2239 ei.blk + i - 1;
4c8ff709
CY
2240
2241 if (!__is_valid_data_blkaddr(blkaddr))
2242 break;
2243
2244 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2245 ret = -EFAULT;
2246 goto out_put_dnode;
2247 }
2248 cc->nr_cpages++;
94afd6d6
CY
2249
2250 if (!from_dnode && i >= ei.c_len)
2251 break;
4c8ff709
CY
2252 }
2253
2254 /* nothing to decompress */
2255 if (cc->nr_cpages == 0) {
2256 ret = 0;
2257 goto out_put_dnode;
2258 }
2259
2260 dic = f2fs_alloc_dic(cc);
2261 if (IS_ERR(dic)) {
2262 ret = PTR_ERR(dic);
2263 goto out_put_dnode;
2264 }
2265
6ce19aff 2266 for (i = 0; i < cc->nr_cpages; i++) {
4c8ff709
CY
2267 struct page *page = dic->cpages[i];
2268 block_t blkaddr;
7f59b277 2269 struct bio_post_read_ctx *ctx;
4c8ff709 2270
94afd6d6
CY
2271 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2272 dn.ofs_in_node + i + 1) :
2273 ei.blk + i;
4c8ff709 2274
6ce19aff
CY
2275 f2fs_wait_on_block_writeback(inode, blkaddr);
2276
2277 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2278 if (atomic_dec_and_test(&dic->remaining_pages))
bff139b4 2279 f2fs_decompress_cluster(dic, true);
6ce19aff
CY
2280 continue;
2281 }
2282
27aacd28
ST
2283 if (bio && (!page_is_mergeable(sbi, bio,
2284 *last_block_in_bio, blkaddr) ||
2285 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
4c8ff709
CY
2286submit_and_realloc:
2287 __submit_bio(sbi, bio, DATA);
2288 bio = NULL;
2289 }
2290
2291 if (!bio) {
2292 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2293 is_readahead ? REQ_RAHEAD : 0,
7f59b277 2294 page->index, for_write);
4c8ff709
CY
2295 if (IS_ERR(bio)) {
2296 ret = PTR_ERR(bio);
bff139b4 2297 f2fs_decompress_end_io(dic, ret, true);
4c8ff709 2298 f2fs_put_dnode(&dn);
f3494345 2299 *bio_ret = NULL;
4c8ff709
CY
2300 return ret;
2301 }
2302 }
2303
4c8ff709
CY
2304 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2305 goto submit_and_realloc;
2306
a4b68176 2307 ctx = get_post_read_ctx(bio);
7f59b277
EB
2308 ctx->enabled_steps |= STEP_DECOMPRESS;
2309 refcount_inc(&dic->refcnt);
03382f1a 2310
4c8ff709 2311 inc_page_count(sbi, F2FS_RD_DATA);
34a23525 2312 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
4c8ff709
CY
2313 *last_block_in_bio = blkaddr;
2314 }
2315
94afd6d6
CY
2316 if (from_dnode)
2317 f2fs_put_dnode(&dn);
4c8ff709
CY
2318
2319 *bio_ret = bio;
2320 return 0;
2321
2322out_put_dnode:
94afd6d6
CY
2323 if (from_dnode)
2324 f2fs_put_dnode(&dn);
4c8ff709 2325out:
7f59b277
EB
2326 for (i = 0; i < cc->cluster_size; i++) {
2327 if (cc->rpages[i]) {
2328 ClearPageUptodate(cc->rpages[i]);
7f59b277
EB
2329 unlock_page(cc->rpages[i]);
2330 }
2331 }
4c8ff709
CY
2332 *bio_ret = bio;
2333 return ret;
2334}
2335#endif
2336
f1e88660
JK
2337/*
2338 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2339 * Major change was from block_size == page_size in f2fs by default.
2340 */
e20a7693 2341static int f2fs_mpage_readpages(struct inode *inode,
23323196 2342 struct readahead_control *rac, struct page *page)
f1e88660
JK
2343{
2344 struct bio *bio = NULL;
f1e88660 2345 sector_t last_block_in_bio = 0;
f1e88660 2346 struct f2fs_map_blocks map;
4c8ff709
CY
2347#ifdef CONFIG_F2FS_FS_COMPRESSION
2348 struct compress_ctx cc = {
2349 .inode = inode,
2350 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2351 .cluster_size = F2FS_I(inode)->i_cluster_size,
2352 .cluster_idx = NULL_CLUSTER,
2353 .rpages = NULL,
2354 .cpages = NULL,
2355 .nr_rpages = 0,
2356 .nr_cpages = 0,
2357 };
a2649315 2358 pgoff_t nc_cluster_idx = NULL_CLUSTER;
4c8ff709 2359#endif
23323196 2360 unsigned nr_pages = rac ? readahead_count(rac) : 1;
4c8ff709 2361 unsigned max_nr_pages = nr_pages;
2df0ab04 2362 int ret = 0;
f1e88660
JK
2363
2364 map.m_pblk = 0;
2365 map.m_lblk = 0;
2366 map.m_len = 0;
2367 map.m_flags = 0;
da85985c 2368 map.m_next_pgofs = NULL;
c4020b2d 2369 map.m_next_extent = NULL;
d5097be5 2370 map.m_seg_type = NO_CHECK_TYPE;
f9d6d059 2371 map.m_may_create = false;
f1e88660 2372
736c0a74 2373 for (; nr_pages; nr_pages--) {
23323196
MWO
2374 if (rac) {
2375 page = readahead_page(rac);
a83d50bc 2376 prefetchw(&page->flags);
f1e88660
JK
2377 }
2378
4c8ff709
CY
2379#ifdef CONFIG_F2FS_FS_COMPRESSION
2380 if (f2fs_compressed_file(inode)) {
2381 /* there are remained comressed pages, submit them */
2382 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2383 ret = f2fs_read_multi_pages(&cc, &bio,
2384 max_nr_pages,
2385 &last_block_in_bio,
23323196 2386 rac != NULL, false);
8bfbfb0d 2387 f2fs_destroy_compress_ctx(&cc, false);
4c8ff709
CY
2388 if (ret)
2389 goto set_error_page;
2390 }
a2649315
FC
2391 if (cc.cluster_idx == NULL_CLUSTER) {
2392 if (nc_cluster_idx ==
2393 page->index >> cc.log_cluster_size) {
2394 goto read_single_page;
2395 }
4c8ff709 2396
a2649315
FC
2397 ret = f2fs_is_compressed_cluster(inode, page->index);
2398 if (ret < 0)
2399 goto set_error_page;
2400 else if (!ret) {
2401 nc_cluster_idx =
2402 page->index >> cc.log_cluster_size;
2403 goto read_single_page;
2404 }
4c8ff709 2405
a2649315
FC
2406 nc_cluster_idx = NULL_CLUSTER;
2407 }
4c8ff709
CY
2408 ret = f2fs_init_compress_ctx(&cc);
2409 if (ret)
2410 goto set_error_page;
2411
2412 f2fs_compress_ctx_add_page(&cc, page);
2413
2414 goto next_page;
2415 }
2416read_single_page:
2417#endif
2418
2419 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
23323196 2420 &bio, &last_block_in_bio, rac);
2df0ab04 2421 if (ret) {
4c8ff709
CY
2422#ifdef CONFIG_F2FS_FS_COMPRESSION
2423set_error_page:
2424#endif
09cbfeaf 2425 zero_user_segment(page, 0, PAGE_SIZE);
f1e88660 2426 unlock_page(page);
f1e88660 2427 }
23323196 2428#ifdef CONFIG_F2FS_FS_COMPRESSION
f1e88660 2429next_page:
23323196
MWO
2430#endif
2431 if (rac)
09cbfeaf 2432 put_page(page);
4c8ff709
CY
2433
2434#ifdef CONFIG_F2FS_FS_COMPRESSION
2435 if (f2fs_compressed_file(inode)) {
2436 /* last page */
2437 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2438 ret = f2fs_read_multi_pages(&cc, &bio,
2439 max_nr_pages,
2440 &last_block_in_bio,
23323196 2441 rac != NULL, false);
8bfbfb0d 2442 f2fs_destroy_compress_ctx(&cc, false);
4c8ff709
CY
2443 }
2444 }
2445#endif
f1e88660 2446 }
f1e88660 2447 if (bio)
4fc29c1a 2448 __submit_bio(F2FS_I_SB(inode), bio, DATA);
23323196 2449 return ret;
f1e88660
JK
2450}
2451
be05584f 2452static int f2fs_read_data_folio(struct file *file, struct folio *folio)
eb47b800 2453{
be05584f 2454 struct page *page = &folio->page;
4969c06a 2455 struct inode *inode = page_file_mapping(page)->host;
b3d208f9 2456 int ret = -EAGAIN;
9ffe0fb5 2457
c20e89cd
CY
2458 trace_f2fs_readpage(page, DATA);
2459
4c8ff709
CY
2460 if (!f2fs_is_compress_backend_ready(inode)) {
2461 unlock_page(page);
2462 return -EOPNOTSUPP;
2463 }
2464
e1c42045 2465 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
2466 if (f2fs_has_inline_data(inode))
2467 ret = f2fs_read_inline_data(inode, page);
b3d208f9 2468 if (ret == -EAGAIN)
e20a7693 2469 ret = f2fs_mpage_readpages(inode, NULL, page);
9ffe0fb5 2470 return ret;
eb47b800
JK
2471}
2472
23323196 2473static void f2fs_readahead(struct readahead_control *rac)
eb47b800 2474{
23323196 2475 struct inode *inode = rac->mapping->host;
b8c29400 2476
23323196 2477 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
9ffe0fb5 2478
4c8ff709 2479 if (!f2fs_is_compress_backend_ready(inode))
23323196 2480 return;
4c8ff709 2481
704528d8 2482 /* If the file has inline data, skip readahead */
9ffe0fb5 2483 if (f2fs_has_inline_data(inode))
23323196 2484 return;
9ffe0fb5 2485
e20a7693 2486 f2fs_mpage_readpages(inode, rac, NULL);
eb47b800
JK
2487}
2488
4c8ff709 2489int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
7eab0c0d
HP
2490{
2491 struct inode *inode = fio->page->mapping->host;
4c8ff709 2492 struct page *mpage, *page;
7eab0c0d
HP
2493 gfp_t gfp_flags = GFP_NOFS;
2494
1958593e 2495 if (!f2fs_encrypted_file(inode))
7eab0c0d
HP
2496 return 0;
2497
4c8ff709
CY
2498 page = fio->compressed_page ? fio->compressed_page : fio->page;
2499
6dbb1796 2500 /* wait for GCed page writeback via META_MAPPING */
0ded69f6 2501 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
7eab0c0d 2502
27aacd28
ST
2503 if (fscrypt_inode_uses_inline_crypto(inode))
2504 return 0;
2505
7eab0c0d 2506retry_encrypt:
4c8ff709
CY
2507 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2508 PAGE_SIZE, 0, gfp_flags);
6aa58d8a
CY
2509 if (IS_ERR(fio->encrypted_page)) {
2510 /* flush pending IOs and wait for a while in the ENOMEM case */
2511 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2512 f2fs_flush_merged_writes(fio->sbi);
4034247a 2513 memalloc_retry_wait(GFP_NOFS);
6aa58d8a
CY
2514 gfp_flags |= __GFP_NOFAIL;
2515 goto retry_encrypt;
2516 }
2517 return PTR_ERR(fio->encrypted_page);
2518 }
7eab0c0d 2519
6aa58d8a
CY
2520 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2521 if (mpage) {
2522 if (PageUptodate(mpage))
2523 memcpy(page_address(mpage),
2524 page_address(fio->encrypted_page), PAGE_SIZE);
2525 f2fs_put_page(mpage, 1);
7eab0c0d 2526 }
6aa58d8a 2527 return 0;
7eab0c0d
HP
2528}
2529
bb9e3bb8
CY
2530static inline bool check_inplace_update_policy(struct inode *inode,
2531 struct f2fs_io_info *fio)
7eab0c0d 2532{
bb9e3bb8
CY
2533 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2534 unsigned int policy = SM_I(sbi)->ipu_policy;
7eab0c0d 2535
1018a546
CY
2536 if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) &&
2537 is_inode_flag_set(inode, FI_OPU_WRITE))
2538 return false;
bb9e3bb8
CY
2539 if (policy & (0x1 << F2FS_IPU_FORCE))
2540 return true;
4d57b86d 2541 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
bb9e3bb8
CY
2542 return true;
2543 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2544 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2545 return true;
4d57b86d 2546 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
bb9e3bb8
CY
2547 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2548 return true;
2549
2550 /*
2551 * IPU for rewrite async pages
2552 */
2553 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2554 fio && fio->op == REQ_OP_WRITE &&
2555 !(fio->op_flags & REQ_SYNC) &&
62230e0d 2556 !IS_ENCRYPTED(inode))
bb9e3bb8
CY
2557 return true;
2558
2559 /* this is only set during fdatasync */
2560 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2561 is_inode_flag_set(inode, FI_NEED_IPU))
2562 return true;
2563
4354994f
DR
2564 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2565 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2566 return true;
2567
bb9e3bb8
CY
2568 return false;
2569}
2570
4d57b86d 2571bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
bb9e3bb8 2572{
859fca6b
CY
2573 /* swap file is migrating in aligned write mode */
2574 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2575 return false;
2576
1ad71a27
JK
2577 if (f2fs_is_pinned_file(inode))
2578 return true;
bb9e3bb8
CY
2579
2580 /* if this is cold file, we should overwrite to avoid fragmentation */
f3b23c78 2581 if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
bb9e3bb8
CY
2582 return true;
2583
2584 return check_inplace_update_policy(inode, fio);
2585}
2586
4d57b86d 2587bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
bb9e3bb8
CY
2588{
2589 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2590
19bdba52
JK
2591 /* The below cases were checked when setting it. */
2592 if (f2fs_is_pinned_file(inode))
2593 return false;
2594 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2595 return true;
b0332a0f 2596 if (f2fs_lfs_mode(sbi))
bb9e3bb8
CY
2597 return true;
2598 if (S_ISDIR(inode->i_mode))
2599 return true;
af033b2a
CY
2600 if (IS_NOQUOTA(inode))
2601 return true;
bb9e3bb8
CY
2602 if (f2fs_is_atomic_file(inode))
2603 return true;
859fca6b
CY
2604
2605 /* swap file is migrating in aligned write mode */
2606 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2607 return true;
2608
1018a546
CY
2609 if (is_inode_flag_set(inode, FI_OPU_WRITE))
2610 return true;
2611
bb9e3bb8 2612 if (fio) {
b763f3be 2613 if (page_private_gcing(fio->page))
bb9e3bb8 2614 return true;
b763f3be 2615 if (page_private_dummy(fio->page))
bb9e3bb8 2616 return true;
4354994f
DR
2617 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2618 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2619 return true;
bb9e3bb8
CY
2620 }
2621 return false;
2622}
2623
7eab0c0d
HP
2624static inline bool need_inplace_update(struct f2fs_io_info *fio)
2625{
2626 struct inode *inode = fio->page->mapping->host;
2627
4d57b86d 2628 if (f2fs_should_update_outplace(inode, fio))
7eab0c0d
HP
2629 return false;
2630
4d57b86d 2631 return f2fs_should_update_inplace(inode, fio);
7eab0c0d
HP
2632}
2633
4d57b86d 2634int f2fs_do_write_data_page(struct f2fs_io_info *fio)
eb47b800 2635{
05ca3632 2636 struct page *page = fio->page;
eb47b800 2637 struct inode *inode = page->mapping->host;
eb47b800 2638 struct dnode_of_data dn;
94afd6d6 2639 struct extent_info ei = {0, };
7735730d 2640 struct node_info ni;
e959c8f5 2641 bool ipu_force = false;
eb47b800
JK
2642 int err = 0;
2643
3db1de0e
DJ
2644 /* Use COW inode to make dnode_of_data for atomic write */
2645 if (f2fs_is_atomic_file(inode))
2646 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2647 else
2648 set_new_dnode(&dn, inode, NULL, NULL, 0);
2649
e959c8f5 2650 if (need_inplace_update(fio) &&
e7547dac 2651 f2fs_lookup_read_extent_cache(inode, page->index, &ei)) {
e959c8f5 2652 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
a817737e 2653
c9b60788 2654 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
95fa90c9
CY
2655 DATA_GENERIC_ENHANCE)) {
2656 f2fs_handle_error(fio->sbi,
2657 ERROR_INVALID_BLKADDR);
10f966bb 2658 return -EFSCORRUPTED;
95fa90c9 2659 }
c9b60788
CY
2660
2661 ipu_force = true;
2662 fio->need_lock = LOCK_DONE;
2663 goto got_it;
e959c8f5 2664 }
279d6df2 2665
d29460e5
JK
2666 /* Deadlock due to between page->lock and f2fs_lock_op */
2667 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2668 return -EAGAIN;
279d6df2 2669
4d57b86d 2670 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800 2671 if (err)
279d6df2 2672 goto out;
eb47b800 2673
28bc106b 2674 fio->old_blkaddr = dn.data_blkaddr;
eb47b800
JK
2675
2676 /* This page is already truncated */
7a9d7548 2677 if (fio->old_blkaddr == NULL_ADDR) {
2bca1e23 2678 ClearPageUptodate(page);
b763f3be 2679 clear_page_private_gcing(page);
eb47b800 2680 goto out_writepage;
2bca1e23 2681 }
e959c8f5 2682got_it:
c9b60788
CY
2683 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2684 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
93770ab7 2685 DATA_GENERIC_ENHANCE)) {
10f966bb 2686 err = -EFSCORRUPTED;
95fa90c9 2687 f2fs_handle_error(fio->sbi, ERROR_INVALID_BLKADDR);
c9b60788
CY
2688 goto out_writepage;
2689 }
3db1de0e 2690
eb47b800
JK
2691 /*
2692 * If current allocation needs SSR,
2693 * it had better in-place writes for updated data.
2694 */
93770ab7
CY
2695 if (ipu_force ||
2696 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
7b525dd0 2697 need_inplace_update(fio))) {
4c8ff709 2698 err = f2fs_encrypt_one_page(fio);
cc15620b
JK
2699 if (err)
2700 goto out_writepage;
2701
2702 set_page_writeback(page);
17c50035 2703 ClearPageError(page);
279d6df2 2704 f2fs_put_dnode(&dn);
cc15620b 2705 if (fio->need_lock == LOCK_REQ)
279d6df2 2706 f2fs_unlock_op(fio->sbi);
4d57b86d 2707 err = f2fs_inplace_write_data(fio);
6492a335 2708 if (err) {
27aacd28 2709 if (fscrypt_inode_uses_fs_layer_crypto(inode))
d2d0727b 2710 fscrypt_finalize_bounce_page(&fio->encrypted_page);
6492a335
CY
2711 if (PageWriteback(page))
2712 end_page_writeback(page);
cd23ffa9
CY
2713 } else {
2714 set_inode_flag(inode, FI_UPDATE_WRITE);
6492a335 2715 }
7eab0c0d 2716 trace_f2fs_do_write_data_page(fio->page, IPU);
279d6df2 2717 return err;
eb47b800 2718 }
279d6df2 2719
cc15620b
JK
2720 if (fio->need_lock == LOCK_RETRY) {
2721 if (!f2fs_trylock_op(fio->sbi)) {
2722 err = -EAGAIN;
2723 goto out_writepage;
2724 }
2725 fio->need_lock = LOCK_REQ;
2726 }
2727
a9419b63 2728 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
7735730d
CY
2729 if (err)
2730 goto out_writepage;
2731
2732 fio->version = ni.version;
2733
4c8ff709 2734 err = f2fs_encrypt_one_page(fio);
cc15620b
JK
2735 if (err)
2736 goto out_writepage;
2737
2738 set_page_writeback(page);
17c50035 2739 ClearPageError(page);
cc15620b 2740
4c8ff709
CY
2741 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2742 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2743
279d6df2 2744 /* LFS mode write path */
4d57b86d 2745 f2fs_outplace_write_data(&dn, fio);
279d6df2
HP
2746 trace_f2fs_do_write_data_page(page, OPU);
2747 set_inode_flag(inode, FI_APPEND_WRITE);
2748 if (page->index == 0)
2749 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
2750out_writepage:
2751 f2fs_put_dnode(&dn);
279d6df2 2752out:
cc15620b 2753 if (fio->need_lock == LOCK_REQ)
279d6df2 2754 f2fs_unlock_op(fio->sbi);
eb47b800
JK
2755 return err;
2756}
2757
4c8ff709 2758int f2fs_write_single_data_page(struct page *page, int *submitted,
8648de2c
CY
2759 struct bio **bio,
2760 sector_t *last_block,
b0af6d49 2761 struct writeback_control *wbc,
4c8ff709 2762 enum iostat_type io_type,
3afae09f
CY
2763 int compr_blocks,
2764 bool allow_balance)
eb47b800
JK
2765{
2766 struct inode *inode = page->mapping->host;
4081363f 2767 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800 2768 loff_t i_size = i_size_read(inode);
4c8ff709 2769 const pgoff_t end_index = ((unsigned long long)i_size)
09cbfeaf 2770 >> PAGE_SHIFT;
1f0d5c91 2771 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
9ffe0fb5 2772 unsigned offset = 0;
39936837 2773 bool need_balance_fs = false;
eb47b800 2774 int err = 0;
458e6197 2775 struct f2fs_io_info fio = {
05ca3632 2776 .sbi = sbi,
39d787be 2777 .ino = inode->i_ino,
458e6197 2778 .type = DATA,
04d328de 2779 .op = REQ_OP_WRITE,
7637241e 2780 .op_flags = wbc_to_write_flags(wbc),
e959c8f5 2781 .old_blkaddr = NULL_ADDR,
05ca3632 2782 .page = page,
4375a336 2783 .encrypted_page = NULL,
d68f735b 2784 .submitted = false,
4c8ff709 2785 .compr_blocks = compr_blocks,
cc15620b 2786 .need_lock = LOCK_RETRY,
0d5b9d81 2787 .post_read = f2fs_post_read_required(inode),
b0af6d49 2788 .io_type = io_type,
578c6478 2789 .io_wbc = wbc,
8648de2c
CY
2790 .bio = bio,
2791 .last_block = last_block,
458e6197 2792 };
eb47b800 2793
ecda0de3
CY
2794 trace_f2fs_writepage(page, DATA);
2795
db198ae0
CY
2796 /* we should bypass data pages to proceed the kworkder jobs */
2797 if (unlikely(f2fs_cp_error(sbi))) {
2798 mapping_set_error(page->mapping, -EIO);
1174abfd
CY
2799 /*
2800 * don't drop any dirty dentry pages for keeping lastest
2801 * directory structure.
2802 */
2803 if (S_ISDIR(inode->i_mode))
2804 goto redirty_out;
db198ae0
CY
2805 goto out;
2806 }
2807
0771fcc7
CY
2808 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2809 goto redirty_out;
2810
4c8ff709
CY
2811 if (page->index < end_index ||
2812 f2fs_verity_in_progress(inode) ||
2813 compr_blocks)
39936837 2814 goto write;
eb47b800
JK
2815
2816 /*
2817 * If the offset is out-of-range of file size,
2818 * this page does not have to be written to disk.
2819 */
09cbfeaf 2820 offset = i_size & (PAGE_SIZE - 1);
76f60268 2821 if ((page->index >= end_index + 1) || !offset)
39936837 2822 goto out;
eb47b800 2823
09cbfeaf 2824 zero_user_segment(page, offset, PAGE_SIZE);
39936837 2825write:
1e84371f
JK
2826 if (f2fs_is_drop_cache(inode))
2827 goto out;
eb47b800 2828
435cbab9
JK
2829 /* Dentry/quota blocks are controlled by checkpoint */
2830 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
79963d96
CY
2831 /*
2832 * We need to wait for node_write to avoid block allocation during
2833 * checkpoint. This can only happen to quota writes which can cause
2834 * the below discard race condition.
2835 */
2836 if (IS_NOQUOTA(inode))
e4544b63 2837 f2fs_down_read(&sbi->node_write);
79963d96 2838
cc15620b 2839 fio.need_lock = LOCK_DONE;
4d57b86d 2840 err = f2fs_do_write_data_page(&fio);
79963d96
CY
2841
2842 if (IS_NOQUOTA(inode))
e4544b63 2843 f2fs_up_read(&sbi->node_write);
79963d96 2844
8618b881
JK
2845 goto done;
2846 }
9ffe0fb5 2847
8618b881 2848 if (!wbc->for_reclaim)
39936837 2849 need_balance_fs = true;
7f3037a5 2850 else if (has_not_enough_free_secs(sbi, 0, 0))
39936837 2851 goto redirty_out;
ef095d19
JK
2852 else
2853 set_inode_flag(inode, FI_HOT_DATA);
eb47b800 2854
b3d208f9 2855 err = -EAGAIN;
dd7b2333 2856 if (f2fs_has_inline_data(inode)) {
b3d208f9 2857 err = f2fs_write_inline_data(inode, page);
dd7b2333
YH
2858 if (!err)
2859 goto out;
2860 }
279d6df2 2861
cc15620b 2862 if (err == -EAGAIN) {
4d57b86d 2863 err = f2fs_do_write_data_page(&fio);
cc15620b
JK
2864 if (err == -EAGAIN) {
2865 fio.need_lock = LOCK_REQ;
4d57b86d 2866 err = f2fs_do_write_data_page(&fio);
cc15620b
JK
2867 }
2868 }
a0d00fad 2869
eb449797
CY
2870 if (err) {
2871 file_set_keep_isize(inode);
2872 } else {
c10c9820 2873 spin_lock(&F2FS_I(inode)->i_size_lock);
eb449797
CY
2874 if (F2FS_I(inode)->last_disk_size < psize)
2875 F2FS_I(inode)->last_disk_size = psize;
c10c9820 2876 spin_unlock(&F2FS_I(inode)->i_size_lock);
eb449797 2877 }
279d6df2 2878
8618b881
JK
2879done:
2880 if (err && err != -ENOENT)
2881 goto redirty_out;
eb47b800 2882
39936837 2883out:
a7ffdbe2 2884 inode_dec_dirty_pages(inode);
2baf0781 2885 if (err) {
2bca1e23 2886 ClearPageUptodate(page);
b763f3be 2887 clear_page_private_gcing(page);
2baf0781 2888 }
0c3a5797
CY
2889
2890 if (wbc->for_reclaim) {
bab475c5 2891 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
ef095d19 2892 clear_inode_flag(inode, FI_HOT_DATA);
4d57b86d 2893 f2fs_remove_dirty_inode(inode);
d68f735b 2894 submitted = NULL;
0c3a5797 2895 }
eb47b800 2896 unlock_page(page);
186857c5 2897 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
d80afefb 2898 !F2FS_I(inode)->wb_task && allow_balance)
a7881893 2899 f2fs_balance_fs(sbi, need_balance_fs);
0c3a5797 2900
d68f735b 2901 if (unlikely(f2fs_cp_error(sbi))) {
b9109b0e 2902 f2fs_submit_merged_write(sbi, DATA);
0b20fcec 2903 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
d68f735b
JK
2904 submitted = NULL;
2905 }
2906
2907 if (submitted)
4c8ff709 2908 *submitted = fio.submitted ? 1 : 0;
0c3a5797 2909
eb47b800
JK
2910 return 0;
2911
eb47b800 2912redirty_out:
76f60268 2913 redirty_page_for_writepage(wbc, page);
5b19d284
JK
2914 /*
2915 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2916 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2917 * file_write_and_wait_range() will see EIO error, which is critical
2918 * to return value of fsync() followed by atomic_write failure to user.
2919 */
2920 if (!err || wbc->for_reclaim)
0002b61b 2921 return AOP_WRITEPAGE_ACTIVATE;
b230e6ca
JK
2922 unlock_page(page);
2923 return err;
fa9150a8
NJ
2924}
2925
f566bae8
JK
2926static int f2fs_write_data_page(struct page *page,
2927 struct writeback_control *wbc)
2928{
4c8ff709
CY
2929#ifdef CONFIG_F2FS_FS_COMPRESSION
2930 struct inode *inode = page->mapping->host;
2931
2932 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2933 goto out;
2934
2935 if (f2fs_compressed_file(inode)) {
2936 if (f2fs_is_compressed_cluster(inode, page->index)) {
2937 redirty_page_for_writepage(wbc, page);
2938 return AOP_WRITEPAGE_ACTIVATE;
2939 }
2940 }
2941out:
2942#endif
2943
2944 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
3afae09f 2945 wbc, FS_DATA_IO, 0, true);
f566bae8
JK
2946}
2947
8f46dcae
CY
2948/*
2949 * This function was copied from write_cche_pages from mm/page-writeback.c.
2950 * The major change is making write step of cold data page separately from
2951 * warm/hot data page.
2952 */
2953static int f2fs_write_cache_pages(struct address_space *mapping,
b0af6d49
CY
2954 struct writeback_control *wbc,
2955 enum iostat_type io_type)
8f46dcae
CY
2956{
2957 int ret = 0;
4c8ff709 2958 int done = 0, retry = 0;
01fc4b9a 2959 struct page *pages[F2FS_ONSTACK_PAGES];
c29fd0c0 2960 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
8648de2c
CY
2961 struct bio *bio = NULL;
2962 sector_t last_block;
4c8ff709
CY
2963#ifdef CONFIG_F2FS_FS_COMPRESSION
2964 struct inode *inode = mapping->host;
2965 struct compress_ctx cc = {
2966 .inode = inode,
2967 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2968 .cluster_size = F2FS_I(inode)->i_cluster_size,
2969 .cluster_idx = NULL_CLUSTER,
2970 .rpages = NULL,
2971 .nr_rpages = 0,
2972 .cpages = NULL,
3271d7eb 2973 .valid_nr_cpages = 0,
4c8ff709
CY
2974 .rbuf = NULL,
2975 .cbuf = NULL,
2976 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2977 .private = NULL,
2978 };
2979#endif
8f46dcae 2980 int nr_pages;
8f46dcae
CY
2981 pgoff_t index;
2982 pgoff_t end; /* Inclusive */
2983 pgoff_t done_index;
8f46dcae 2984 int range_whole = 0;
10bbd235 2985 xa_mark_t tag;
bab475c5 2986 int nwritten = 0;
4c8ff709
CY
2987 int submitted = 0;
2988 int i;
8f46dcae 2989
ef095d19
JK
2990 if (get_dirty_pages(mapping->host) <=
2991 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2992 set_inode_flag(mapping->host, FI_HOT_DATA);
2993 else
2994 clear_inode_flag(mapping->host, FI_HOT_DATA);
2995
8f46dcae 2996 if (wbc->range_cyclic) {
4df7a75f 2997 index = mapping->writeback_index; /* prev offset */
8f46dcae
CY
2998 end = -1;
2999 } else {
09cbfeaf
KS
3000 index = wbc->range_start >> PAGE_SHIFT;
3001 end = wbc->range_end >> PAGE_SHIFT;
8f46dcae
CY
3002 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3003 range_whole = 1;
8f46dcae
CY
3004 }
3005 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3006 tag = PAGECACHE_TAG_TOWRITE;
3007 else
3008 tag = PAGECACHE_TAG_DIRTY;
3009retry:
4c8ff709 3010 retry = 0;
8f46dcae
CY
3011 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3012 tag_pages_for_writeback(mapping, index, end);
3013 done_index = index;
4c8ff709 3014 while (!done && !retry && (index <= end)) {
01fc4b9a
FC
3015 nr_pages = find_get_pages_range_tag(mapping, &index, end,
3016 tag, F2FS_ONSTACK_PAGES, pages);
8f46dcae
CY
3017 if (nr_pages == 0)
3018 break;
3019
3020 for (i = 0; i < nr_pages; i++) {
01fc4b9a 3021 struct page *page = pages[i];
4c8ff709
CY
3022 bool need_readd;
3023readd:
3024 need_readd = false;
3025#ifdef CONFIG_F2FS_FS_COMPRESSION
3026 if (f2fs_compressed_file(inode)) {
b368cc5e
FC
3027 void *fsdata = NULL;
3028 struct page *pagep;
3029 int ret2;
3030
4c8ff709
CY
3031 ret = f2fs_init_compress_ctx(&cc);
3032 if (ret) {
3033 done = 1;
3034 break;
3035 }
3036
3037 if (!f2fs_cluster_can_merge_page(&cc,
3038 page->index)) {
3039 ret = f2fs_write_multi_pages(&cc,
3040 &submitted, wbc, io_type);
3041 if (!ret)
3042 need_readd = true;
3043 goto result;
3044 }
8f46dcae 3045
4c8ff709
CY
3046 if (unlikely(f2fs_cp_error(sbi)))
3047 goto lock_page;
3048
b368cc5e
FC
3049 if (!f2fs_cluster_is_empty(&cc))
3050 goto lock_page;
4c8ff709 3051
4f8219f8 3052 if (f2fs_all_cluster_page_ready(&cc,
01fc4b9a 3053 pages, i, nr_pages, true))
4f8219f8
FC
3054 goto lock_page;
3055
b368cc5e 3056 ret2 = f2fs_prepare_compress_overwrite(
4c8ff709
CY
3057 inode, &pagep,
3058 page->index, &fsdata);
b368cc5e
FC
3059 if (ret2 < 0) {
3060 ret = ret2;
3061 done = 1;
3062 break;
3063 } else if (ret2 &&
3064 (!f2fs_compress_write_end(inode,
3065 fsdata, page->index, 1) ||
4f8219f8 3066 !f2fs_all_cluster_page_ready(&cc,
01fc4b9a 3067 pages, i, nr_pages, false))) {
b368cc5e
FC
3068 retry = 1;
3069 break;
4c8ff709
CY
3070 }
3071 }
3072#endif
f8de4331 3073 /* give a priority to WB_SYNC threads */
c29fd0c0 3074 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
f8de4331
CY
3075 wbc->sync_mode == WB_SYNC_NONE) {
3076 done = 1;
3077 break;
3078 }
4c8ff709
CY
3079#ifdef CONFIG_F2FS_FS_COMPRESSION
3080lock_page:
3081#endif
8f46dcae 3082 done_index = page->index;
d29460e5 3083retry_write:
8f46dcae
CY
3084 lock_page(page);
3085
3086 if (unlikely(page->mapping != mapping)) {
3087continue_unlock:
3088 unlock_page(page);
3089 continue;
3090 }
3091
3092 if (!PageDirty(page)) {
3093 /* someone wrote it for us */
3094 goto continue_unlock;
3095 }
3096
8f46dcae 3097 if (PageWriteback(page)) {
0b20fcec 3098 if (wbc->sync_mode != WB_SYNC_NONE)
fec1d657 3099 f2fs_wait_on_page_writeback(page,
bae0ee7a 3100 DATA, true, true);
0b20fcec 3101 else
8f46dcae
CY
3102 goto continue_unlock;
3103 }
3104
8f46dcae
CY
3105 if (!clear_page_dirty_for_io(page))
3106 goto continue_unlock;
3107
4c8ff709
CY
3108#ifdef CONFIG_F2FS_FS_COMPRESSION
3109 if (f2fs_compressed_file(inode)) {
3110 get_page(page);
3111 f2fs_compress_ctx_add_page(&cc, page);
3112 continue;
3113 }
3114#endif
3115 ret = f2fs_write_single_data_page(page, &submitted,
3afae09f
CY
3116 &bio, &last_block, wbc, io_type,
3117 0, true);
4c8ff709
CY
3118 if (ret == AOP_WRITEPAGE_ACTIVATE)
3119 unlock_page(page);
3120#ifdef CONFIG_F2FS_FS_COMPRESSION
3121result:
3122#endif
3123 nwritten += submitted;
3124 wbc->nr_to_write -= submitted;
3125
8f46dcae 3126 if (unlikely(ret)) {
0002b61b
CY
3127 /*
3128 * keep nr_to_write, since vfs uses this to
3129 * get # of written pages.
3130 */
3131 if (ret == AOP_WRITEPAGE_ACTIVATE) {
0002b61b 3132 ret = 0;
4c8ff709 3133 goto next;
d29460e5
JK
3134 } else if (ret == -EAGAIN) {
3135 ret = 0;
3136 if (wbc->sync_mode == WB_SYNC_ALL) {
a64239d0 3137 f2fs_io_schedule_timeout(
5df7731f 3138 DEFAULT_IO_TIMEOUT);
d29460e5
JK
3139 goto retry_write;
3140 }
4c8ff709 3141 goto next;
0002b61b 3142 }
b230e6ca
JK
3143 done_index = page->index + 1;
3144 done = 1;
3145 break;
8f46dcae
CY
3146 }
3147
4c8ff709 3148 if (wbc->nr_to_write <= 0 &&
687de7f1 3149 wbc->sync_mode == WB_SYNC_NONE) {
8f46dcae
CY
3150 done = 1;
3151 break;
3152 }
4c8ff709
CY
3153next:
3154 if (need_readd)
3155 goto readd;
8f46dcae 3156 }
01fc4b9a 3157 release_pages(pages, nr_pages);
8f46dcae
CY
3158 cond_resched();
3159 }
4c8ff709
CY
3160#ifdef CONFIG_F2FS_FS_COMPRESSION
3161 /* flush remained pages in compress cluster */
3162 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3163 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3164 nwritten += submitted;
3165 wbc->nr_to_write -= submitted;
3166 if (ret) {
3167 done = 1;
3168 retry = 0;
3169 }
3170 }
adfc6943 3171 if (f2fs_compressed_file(inode))
8bfbfb0d 3172 f2fs_destroy_compress_ctx(&cc, false);
4c8ff709 3173#endif
e78790f8 3174 if (retry) {
8f46dcae 3175 index = 0;
e78790f8 3176 end = -1;
8f46dcae
CY
3177 goto retry;
3178 }
e78790f8
ST
3179 if (wbc->range_cyclic && !done)
3180 done_index = 0;
8f46dcae
CY
3181 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3182 mapping->writeback_index = done_index;
3183
bab475c5 3184 if (nwritten)
b9109b0e 3185 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
bab475c5 3186 NULL, 0, DATA);
8648de2c
CY
3187 /* submit cached bio of IPU write */
3188 if (bio)
0b20fcec 3189 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
6ca56ca4 3190
8f46dcae
CY
3191 return ret;
3192}
3193
853137ce
JK
3194static inline bool __should_serialize_io(struct inode *inode,
3195 struct writeback_control *wbc)
3196{
b13f67ff 3197 /* to avoid deadlock in path of data flush */
d80afefb 3198 if (F2FS_I(inode)->wb_task)
b13f67ff
CY
3199 return false;
3200
853137ce
JK
3201 if (!S_ISREG(inode->i_mode))
3202 return false;
af033b2a
CY
3203 if (IS_NOQUOTA(inode))
3204 return false;
b13f67ff 3205
602a16d5 3206 if (f2fs_need_compress_data(inode))
b13f67ff 3207 return true;
853137ce
JK
3208 if (wbc->sync_mode != WB_SYNC_ALL)
3209 return true;
3210 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3211 return true;
3212 return false;
3213}
3214
fc99fe27 3215static int __f2fs_write_data_pages(struct address_space *mapping,
b0af6d49
CY
3216 struct writeback_control *wbc,
3217 enum iostat_type io_type)
eb47b800
JK
3218{
3219 struct inode *inode = mapping->host;
4081363f 3220 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9dfa1baf 3221 struct blk_plug plug;
eb47b800 3222 int ret;
853137ce 3223 bool locked = false;
eb47b800 3224
cfb185a1 3225 /* deal with chardevs and other special file */
3226 if (!mapping->a_ops->writepage)
3227 return 0;
3228
6a290544
CY
3229 /* skip writing if there is no dirty page in this inode */
3230 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3231 return 0;
3232
0771fcc7
CY
3233 /* during POR, we don't need to trigger writepage at all. */
3234 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3235 goto skip_write;
3236
af033b2a
CY
3237 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3238 wbc->sync_mode == WB_SYNC_NONE &&
a1257023 3239 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
4d57b86d 3240 f2fs_available_free_memory(sbi, DIRTY_DENTS))
a1257023
JK
3241 goto skip_write;
3242
1018a546
CY
3243 /* skip writing in file defragment preparing stage */
3244 if (is_inode_flag_set(inode, FI_SKIP_WRITES))
d323d005
CY
3245 goto skip_write;
3246
d31c7c3f
YH
3247 trace_f2fs_writepages(mapping->host, wbc, DATA);
3248
687de7f1
JK
3249 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3250 if (wbc->sync_mode == WB_SYNC_ALL)
c29fd0c0 3251 atomic_inc(&sbi->wb_sync_req[DATA]);
34415099
CY
3252 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3253 /* to avoid potential deadlock */
3254 if (current->plug)
3255 blk_finish_plug(current->plug);
687de7f1 3256 goto skip_write;
34415099 3257 }
687de7f1 3258
853137ce
JK
3259 if (__should_serialize_io(inode, wbc)) {
3260 mutex_lock(&sbi->writepages);
3261 locked = true;
3262 }
3263
9dfa1baf 3264 blk_start_plug(&plug);
b0af6d49 3265 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
9dfa1baf 3266 blk_finish_plug(&plug);
687de7f1 3267
853137ce
JK
3268 if (locked)
3269 mutex_unlock(&sbi->writepages);
3270
687de7f1 3271 if (wbc->sync_mode == WB_SYNC_ALL)
c29fd0c0 3272 atomic_dec(&sbi->wb_sync_req[DATA]);
28ea6162
JK
3273 /*
3274 * if some pages were truncated, we cannot guarantee its mapping->host
3275 * to detect pending bios.
3276 */
458e6197 3277
4d57b86d 3278 f2fs_remove_dirty_inode(inode);
eb47b800 3279 return ret;
d3baf95d
JK
3280
3281skip_write:
a7ffdbe2 3282 wbc->pages_skipped += get_dirty_pages(inode);
d31c7c3f 3283 trace_f2fs_writepages(mapping->host, wbc, DATA);
d3baf95d 3284 return 0;
eb47b800
JK
3285}
3286
b0af6d49
CY
3287static int f2fs_write_data_pages(struct address_space *mapping,
3288 struct writeback_control *wbc)
3289{
3290 struct inode *inode = mapping->host;
3291
3292 return __f2fs_write_data_pages(mapping, wbc,
3293 F2FS_I(inode)->cp_task == current ?
3294 FS_CP_DATA_IO : FS_DATA_IO);
3295}
3296
a1e09b03 3297void f2fs_write_failed(struct inode *inode, loff_t to)
3aab8f82 3298{
819d9153 3299 loff_t i_size = i_size_read(inode);
3aab8f82 3300
3f188c23
JK
3301 if (IS_NOQUOTA(inode))
3302 return;
3303
95ae251f
EB
3304 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3305 if (to > i_size && !f2fs_verity_in_progress(inode)) {
e4544b63 3306 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6abaa83c 3307 filemap_invalidate_lock(inode->i_mapping);
a33c1502 3308
819d9153 3309 truncate_pagecache(inode, i_size);
3f188c23 3310 f2fs_truncate_blocks(inode, i_size, true);
a33c1502 3311
6abaa83c 3312 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 3313 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3aab8f82
CY
3314 }
3315}
3316
2aadac08
JK
3317static int prepare_write_begin(struct f2fs_sb_info *sbi,
3318 struct page *page, loff_t pos, unsigned len,
3319 block_t *blk_addr, bool *node_changed)
3320{
3321 struct inode *inode = page->mapping->host;
3322 pgoff_t index = page->index;
3323 struct dnode_of_data dn;
3324 struct page *ipage;
b4d07a3e 3325 bool locked = false;
94afd6d6 3326 struct extent_info ei = {0, };
2aadac08 3327 int err = 0;
2866fb16 3328 int flag;
2aadac08 3329
24b84912 3330 /*
3d697a4a
EB
3331 * If a whole page is being written and we already preallocated all the
3332 * blocks, then there is no need to get a block address now.
24b84912 3333 */
3d697a4a 3334 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
24b84912
JK
3335 return 0;
3336
2866fb16
SY
3337 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3338 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3339 flag = F2FS_GET_BLOCK_DEFAULT;
3340 else
3341 flag = F2FS_GET_BLOCK_PRE_AIO;
3342
b4d07a3e 3343 if (f2fs_has_inline_data(inode) ||
09cbfeaf 3344 (pos & PAGE_MASK) >= i_size_read(inode)) {
0ef81833 3345 f2fs_do_map_lock(sbi, flag, true);
b4d07a3e
JK
3346 locked = true;
3347 }
4c8ff709 3348
b4d07a3e 3349restart:
2aadac08 3350 /* check inline_data */
4d57b86d 3351 ipage = f2fs_get_node_page(sbi, inode->i_ino);
2aadac08
JK
3352 if (IS_ERR(ipage)) {
3353 err = PTR_ERR(ipage);
3354 goto unlock_out;
3355 }
3356
3357 set_new_dnode(&dn, inode, ipage, ipage, 0);
3358
3359 if (f2fs_has_inline_data(inode)) {
f2470371 3360 if (pos + len <= MAX_INLINE_DATA(inode)) {
4d57b86d 3361 f2fs_do_read_inline_data(page, ipage);
91942321 3362 set_inode_flag(inode, FI_DATA_EXIST);
ab47036d 3363 if (inode->i_nlink)
b763f3be 3364 set_page_private_inline(ipage);
2aadac08
JK
3365 } else {
3366 err = f2fs_convert_inline_page(&dn, page);
3367 if (err)
b4d07a3e
JK
3368 goto out;
3369 if (dn.data_blkaddr == NULL_ADDR)
3370 err = f2fs_get_block(&dn, index);
3371 }
3372 } else if (locked) {
3373 err = f2fs_get_block(&dn, index);
3374 } else {
e7547dac 3375 if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
b4d07a3e
JK
3376 dn.data_blkaddr = ei.blk + index - ei.fofs;
3377 } else {
b4d07a3e 3378 /* hole case */
4d57b86d 3379 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
4da7bf5a 3380 if (err || dn.data_blkaddr == NULL_ADDR) {
b4d07a3e 3381 f2fs_put_dnode(&dn);
0ef81833 3382 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
59c9081b 3383 true);
2866fb16 3384 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
b4d07a3e
JK
3385 locked = true;
3386 goto restart;
3387 }
2aadac08
JK
3388 }
3389 }
b4d07a3e 3390
2aadac08
JK
3391 /* convert_inline_page can make node_changed */
3392 *blk_addr = dn.data_blkaddr;
3393 *node_changed = dn.node_changed;
b4d07a3e 3394out:
2aadac08
JK
3395 f2fs_put_dnode(&dn);
3396unlock_out:
b4d07a3e 3397 if (locked)
0ef81833 3398 f2fs_do_map_lock(sbi, flag, false);
2aadac08
JK
3399 return err;
3400}
3401
3db1de0e
DJ
3402static int __find_data_block(struct inode *inode, pgoff_t index,
3403 block_t *blk_addr)
3404{
3405 struct dnode_of_data dn;
3406 struct page *ipage;
3407 struct extent_info ei = {0, };
3408 int err = 0;
3409
3410 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
3411 if (IS_ERR(ipage))
3412 return PTR_ERR(ipage);
3413
3414 set_new_dnode(&dn, inode, ipage, ipage, 0);
3415
e7547dac 3416 if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
3db1de0e
DJ
3417 dn.data_blkaddr = ei.blk + index - ei.fofs;
3418 } else {
3419 /* hole case */
3420 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3421 if (err) {
3422 dn.data_blkaddr = NULL_ADDR;
3423 err = 0;
3424 }
3425 }
3426 *blk_addr = dn.data_blkaddr;
3427 f2fs_put_dnode(&dn);
3428 return err;
3429}
3430
3431static int __reserve_data_block(struct inode *inode, pgoff_t index,
3432 block_t *blk_addr, bool *node_changed)
3433{
3434 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3435 struct dnode_of_data dn;
3436 struct page *ipage;
3437 int err = 0;
3438
3439 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
3440
3441 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3442 if (IS_ERR(ipage)) {
3443 err = PTR_ERR(ipage);
3444 goto unlock_out;
3445 }
3446 set_new_dnode(&dn, inode, ipage, ipage, 0);
3447
3448 err = f2fs_get_block(&dn, index);
3449
3450 *blk_addr = dn.data_blkaddr;
3451 *node_changed = dn.node_changed;
3452 f2fs_put_dnode(&dn);
3453
3454unlock_out:
3455 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
3456 return err;
3457}
3458
3459static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
3460 struct page *page, loff_t pos, unsigned int len,
3461 block_t *blk_addr, bool *node_changed)
3462{
3463 struct inode *inode = page->mapping->host;
3464 struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3465 pgoff_t index = page->index;
3466 int err = 0;
f8e2f32b 3467 block_t ori_blk_addr = NULL_ADDR;
3db1de0e
DJ
3468
3469 /* If pos is beyond the end of file, reserve a new block in COW inode */
3470 if ((pos & PAGE_MASK) >= i_size_read(inode))
f8e2f32b 3471 goto reserve_block;
3db1de0e
DJ
3472
3473 /* Look for the block in COW inode first */
3474 err = __find_data_block(cow_inode, index, blk_addr);
3475 if (err)
3476 return err;
3477 else if (*blk_addr != NULL_ADDR)
3478 return 0;
3479
41e8f85a
DJ
3480 if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
3481 goto reserve_block;
3482
3db1de0e
DJ
3483 /* Look for the block in the original inode */
3484 err = __find_data_block(inode, index, &ori_blk_addr);
3485 if (err)
3486 return err;
3487
f8e2f32b 3488reserve_block:
3db1de0e
DJ
3489 /* Finally, we should reserve a new block in COW inode for the update */
3490 err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
3491 if (err)
3492 return err;
f8e2f32b 3493 inc_atomic_write_cnt(inode);
3db1de0e
DJ
3494
3495 if (ori_blk_addr != NULL_ADDR)
3496 *blk_addr = ori_blk_addr;
3497 return 0;
3498}
3499
eb47b800 3500static int f2fs_write_begin(struct file *file, struct address_space *mapping,
9d6b0cd7 3501 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
eb47b800
JK
3502{
3503 struct inode *inode = mapping->host;
4081363f 3504 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b 3505 struct page *page = NULL;
09cbfeaf 3506 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3db1de0e 3507 bool need_balance = false;
2aadac08 3508 block_t blkaddr = NULL_ADDR;
eb47b800
JK
3509 int err = 0;
3510
9d6b0cd7 3511 trace_f2fs_write_begin(inode, pos, len);
62aed044 3512
00e09c0b
CY
3513 if (!f2fs_is_checkpoint_ready(sbi)) {
3514 err = -ENOSPC;
4354994f 3515 goto fail;
00e09c0b 3516 }
4354994f 3517
5f727395
JK
3518 /*
3519 * We should check this at this moment to avoid deadlock on inode page
3520 * and #0 page. The locking rule for inline_data conversion should be:
3521 * lock_page(page #0) -> lock_page(inode_page)
3522 */
3523 if (index != 0) {
3524 err = f2fs_convert_inline_inode(inode);
3525 if (err)
3526 goto fail;
3527 }
4c8ff709
CY
3528
3529#ifdef CONFIG_F2FS_FS_COMPRESSION
3530 if (f2fs_compressed_file(inode)) {
3531 int ret;
3532
3533 *fsdata = NULL;
3534
9b56adcf 3535 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
7eab7a69
FC
3536 goto repeat;
3537
4c8ff709
CY
3538 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3539 index, fsdata);
3540 if (ret < 0) {
3541 err = ret;
3542 goto fail;
3543 } else if (ret) {
3544 return 0;
3545 }
3546 }
3547#endif
3548
afcb7ca0 3549repeat:
86d54795
JK
3550 /*
3551 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3552 * wait_for_stable_page. Will wait that below with our IO control.
3553 */
01eccef7 3554 page = f2fs_pagecache_get_page(mapping, index,
86d54795 3555 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3aab8f82
CY
3556 if (!page) {
3557 err = -ENOMEM;
3558 goto fail;
3559 }
d5f66990 3560
4c8ff709
CY
3561 /* TODO: cluster can be compressed due to race with .writepage */
3562
eb47b800
JK
3563 *pagep = page;
3564
3db1de0e
DJ
3565 if (f2fs_is_atomic_file(inode))
3566 err = prepare_atomic_write_begin(sbi, page, pos, len,
3567 &blkaddr, &need_balance);
3568 else
3569 err = prepare_write_begin(sbi, page, pos, len,
2aadac08 3570 &blkaddr, &need_balance);
9ba69cf9 3571 if (err)
2aadac08 3572 goto fail;
9ba69cf9 3573
af033b2a
CY
3574 if (need_balance && !IS_NOQUOTA(inode) &&
3575 has_not_enough_free_secs(sbi, 0, 0)) {
2a340760 3576 unlock_page(page);
2c4db1a6 3577 f2fs_balance_fs(sbi, true);
2a340760
JK
3578 lock_page(page);
3579 if (page->mapping != mapping) {
3580 /* The page got truncated from under us */
3581 f2fs_put_page(page, 1);
3582 goto repeat;
3583 }
3584 }
3585
bae0ee7a 3586 f2fs_wait_on_page_writeback(page, DATA, false, true);
b3d208f9 3587
649d7df2
JK
3588 if (len == PAGE_SIZE || PageUptodate(page))
3589 return 0;
eb47b800 3590
95ae251f
EB
3591 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3592 !f2fs_verity_in_progress(inode)) {
746e2403
YH
3593 zero_user_segment(page, len, PAGE_SIZE);
3594 return 0;
3595 }
3596
2aadac08 3597 if (blkaddr == NEW_ADDR) {
09cbfeaf 3598 zero_user_segment(page, 0, PAGE_SIZE);
649d7df2 3599 SetPageUptodate(page);
eb47b800 3600 } else {
93770ab7
CY
3601 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3602 DATA_GENERIC_ENHANCE_READ)) {
10f966bb 3603 err = -EFSCORRUPTED;
95fa90c9 3604 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
93770ab7
CY
3605 goto fail;
3606 }
b7973091 3607 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
13ba41e3 3608 if (err)
78682f79 3609 goto fail;
d54c795b 3610
393ff91f 3611 lock_page(page);
6bacf52f 3612 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
3613 f2fs_put_page(page, 1);
3614 goto repeat;
eb47b800 3615 }
1563ac75
CY
3616 if (unlikely(!PageUptodate(page))) {
3617 err = -EIO;
3618 goto fail;
4375a336 3619 }
eb47b800 3620 }
eb47b800 3621 return 0;
9ba69cf9 3622
3aab8f82 3623fail:
86531d6b 3624 f2fs_put_page(page, 1);
3e679dc7 3625 f2fs_write_failed(inode, pos + len);
3aab8f82 3626 return err;
eb47b800
JK
3627}
3628
a1dd3c13
JK
3629static int f2fs_write_end(struct file *file,
3630 struct address_space *mapping,
3631 loff_t pos, unsigned len, unsigned copied,
3632 struct page *page, void *fsdata)
3633{
3634 struct inode *inode = page->mapping->host;
3635
dfb2bf38
CY
3636 trace_f2fs_write_end(inode, pos, len, copied);
3637
649d7df2
JK
3638 /*
3639 * This should be come from len == PAGE_SIZE, and we expect copied
3640 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3641 * let generic_perform_write() try to copy data again through copied=0.
3642 */
3643 if (!PageUptodate(page)) {
746e2403 3644 if (unlikely(copied != len))
649d7df2
JK
3645 copied = 0;
3646 else
3647 SetPageUptodate(page);
3648 }
4c8ff709
CY
3649
3650#ifdef CONFIG_F2FS_FS_COMPRESSION
3651 /* overwrite compressed file */
3652 if (f2fs_compressed_file(inode) && fsdata) {
3653 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3654 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
944dd22e
CY
3655
3656 if (pos + copied > i_size_read(inode) &&
3657 !f2fs_verity_in_progress(inode))
3658 f2fs_i_size_write(inode, pos + copied);
4c8ff709
CY
3659 return copied;
3660 }
3661#endif
3662
649d7df2
JK
3663 if (!copied)
3664 goto unlock_out;
3665
34ba94ba 3666 set_page_dirty(page);
a1dd3c13 3667
95ae251f 3668 if (pos + copied > i_size_read(inode) &&
3db1de0e 3669 !f2fs_verity_in_progress(inode)) {
fc9581c8 3670 f2fs_i_size_write(inode, pos + copied);
3db1de0e
DJ
3671 if (f2fs_is_atomic_file(inode))
3672 f2fs_i_size_write(F2FS_I(inode)->cow_inode,
3673 pos + copied);
3674 }
649d7df2 3675unlock_out:
3024c9a1 3676 f2fs_put_page(page, 1);
d0239e1b 3677 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
a1dd3c13
JK
3678 return copied;
3679}
3680
91503996 3681void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
eb47b800 3682{
91503996 3683 struct inode *inode = folio->mapping->host;
487261f3 3684 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 3685
487261f3 3686 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
91503996 3687 (offset || length != folio_size(folio)))
a7ffdbe2
JK
3688 return;
3689
91503996 3690 if (folio_test_dirty(folio)) {
933439c8 3691 if (inode->i_ino == F2FS_META_INO(sbi)) {
487261f3 3692 dec_page_count(sbi, F2FS_DIRTY_META);
933439c8 3693 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
487261f3 3694 dec_page_count(sbi, F2FS_DIRTY_NODES);
933439c8 3695 } else {
487261f3 3696 inode_dec_dirty_pages(inode);
4d57b86d 3697 f2fs_remove_dirty_inode(inode);
933439c8 3698 }
487261f3 3699 }
decd36b6 3700
91503996 3701 clear_page_private_gcing(&folio->page);
2baf0781 3702
2a64e303
CY
3703 if (test_opt(sbi, COMPRESS_CACHE) &&
3704 inode->i_ino == F2FS_COMPRESS_INO(sbi))
91503996 3705 clear_page_private_data(&folio->page);
6ce19aff 3706
91503996 3707 folio_detach_private(folio);
eb47b800
JK
3708}
3709
c26cd045 3710bool f2fs_release_folio(struct folio *folio, gfp_t wait)
eb47b800 3711{
c26cd045
MWO
3712 struct f2fs_sb_info *sbi;
3713
3714 /* If this is dirty folio, keep private data */
3715 if (folio_test_dirty(folio))
3716 return false;
f68daeeb 3717
c26cd045
MWO
3718 sbi = F2FS_M_SB(folio->mapping);
3719 if (test_opt(sbi, COMPRESS_CACHE)) {
3720 struct inode *inode = folio->mapping->host;
6ce19aff 3721
c26cd045
MWO
3722 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3723 clear_page_private_data(&folio->page);
6ce19aff
CY
3724 }
3725
c26cd045 3726 clear_page_private_gcing(&folio->page);
b763f3be 3727
c26cd045
MWO
3728 folio_detach_private(folio);
3729 return true;
eb47b800
JK
3730}
3731
4f5e34f7
MWO
3732static bool f2fs_dirty_data_folio(struct address_space *mapping,
3733 struct folio *folio)
eb47b800 3734{
4f5e34f7 3735 struct inode *inode = mapping->host;
eb47b800 3736
4f5e34f7 3737 trace_f2fs_set_page_dirty(&folio->page, DATA);
26c6b887 3738
4f5e34f7
MWO
3739 if (!folio_test_uptodate(folio))
3740 folio_mark_uptodate(folio);
3741 BUG_ON(folio_test_swapcache(folio));
34ba94ba 3742
9b7eadd9 3743 if (filemap_dirty_folio(mapping, folio)) {
4f5e34f7
MWO
3744 f2fs_update_dirty_folio(inode, folio);
3745 return true;
eb47b800 3746 }
0fb5b2eb 3747 return false;
eb47b800
JK
3748}
3749
c1c63387
CY
3750
3751static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3752{
3753#ifdef CONFIG_F2FS_FS_COMPRESSION
3754 struct dnode_of_data dn;
3755 sector_t start_idx, blknr = 0;
3756 int ret;
3757
3758 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3759
3760 set_new_dnode(&dn, inode, NULL, NULL, 0);
3761 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3762 if (ret)
3763 return 0;
3764
3765 if (dn.data_blkaddr != COMPRESS_ADDR) {
3766 dn.ofs_in_node += block - start_idx;
3767 blknr = f2fs_data_blkaddr(&dn);
3768 if (!__is_valid_data_blkaddr(blknr))
3769 blknr = 0;
3770 }
3771
3772 f2fs_put_dnode(&dn);
c1c63387
CY
3773 return blknr;
3774#else
250e84d7 3775 return 0;
c1c63387
CY
3776#endif
3777}
3778
3779
c01e54b7
JK
3780static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3781{
454ae7e5 3782 struct inode *inode = mapping->host;
b79b0a31 3783 sector_t blknr = 0;
454ae7e5 3784
1d373a0e 3785 if (f2fs_has_inline_data(inode))
b79b0a31 3786 goto out;
1d373a0e
JK
3787
3788 /* make sure allocating whole blocks */
3789 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3790 filemap_write_and_wait(mapping);
3791
4eda1682 3792 /* Block number less than F2FS MAX BLOCKS */
6d1451bf 3793 if (unlikely(block >= max_file_blocks(inode)))
4eda1682 3794 goto out;
c1c63387 3795
4eda1682
DJ
3796 if (f2fs_compressed_file(inode)) {
3797 blknr = f2fs_bmap_compress(inode, block);
3798 } else {
b876f4c9
JK
3799 struct f2fs_map_blocks map;
3800
3801 memset(&map, 0, sizeof(map));
3802 map.m_lblk = block;
3803 map.m_len = 1;
3804 map.m_next_pgofs = NULL;
3805 map.m_seg_type = NO_CHECK_TYPE;
3806
3807 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3808 blknr = map.m_pblk;
4eda1682 3809 }
b79b0a31
CY
3810out:
3811 trace_f2fs_bmap(inode, block, blknr);
3812 return blknr;
429511cd
CY
3813}
3814
4969c06a 3815#ifdef CONFIG_SWAP
859fca6b
CY
3816static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3817 unsigned int blkcnt)
3818{
3819 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3820 unsigned int blkofs;
3821 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3822 unsigned int secidx = start_blk / blk_per_sec;
3823 unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3824 int ret = 0;
3825
e4544b63 3826 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 3827 filemap_invalidate_lock(inode->i_mapping);
859fca6b
CY
3828
3829 set_inode_flag(inode, FI_ALIGNED_WRITE);
1018a546 3830 set_inode_flag(inode, FI_OPU_WRITE);
859fca6b
CY
3831
3832 for (; secidx < end_sec; secidx++) {
e4544b63 3833 f2fs_down_write(&sbi->pin_sem);
859fca6b
CY
3834
3835 f2fs_lock_op(sbi);
3836 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3837 f2fs_unlock_op(sbi);
3838
1018a546 3839 set_inode_flag(inode, FI_SKIP_WRITES);
859fca6b
CY
3840
3841 for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
3842 struct page *page;
3843 unsigned int blkidx = secidx * blk_per_sec + blkofs;
3844
3845 page = f2fs_get_lock_data_page(inode, blkidx, true);
3846 if (IS_ERR(page)) {
e4544b63 3847 f2fs_up_write(&sbi->pin_sem);
859fca6b
CY
3848 ret = PTR_ERR(page);
3849 goto done;
3850 }
3851
3852 set_page_dirty(page);
3853 f2fs_put_page(page, 1);
3854 }
3855
1018a546 3856 clear_inode_flag(inode, FI_SKIP_WRITES);
859fca6b
CY
3857
3858 ret = filemap_fdatawrite(inode->i_mapping);
3859
e4544b63 3860 f2fs_up_write(&sbi->pin_sem);
859fca6b
CY
3861
3862 if (ret)
3863 break;
3864 }
3865
3866done:
1018a546
CY
3867 clear_inode_flag(inode, FI_SKIP_WRITES);
3868 clear_inode_flag(inode, FI_OPU_WRITE);
859fca6b
CY
3869 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3870
edc6d01b 3871 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 3872 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
859fca6b
CY
3873
3874 return ret;
3875}
3876
0b8fc006 3877static int check_swap_activate(struct swap_info_struct *sis,
af4b6b8e
CY
3878 struct file *swap_file, sector_t *span)
3879{
3880 struct address_space *mapping = swap_file->f_mapping;
3881 struct inode *inode = mapping->host;
36e4d958 3882 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
af4b6b8e
CY
3883 sector_t cur_lblock;
3884 sector_t last_lblock;
3885 sector_t pblock;
3886 sector_t lowest_pblock = -1;
3887 sector_t highest_pblock = 0;
3888 int nr_extents = 0;
3889 unsigned long nr_pblocks;
859fca6b
CY
3890 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3891 unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
ca298241 3892 unsigned int not_aligned = 0;
36e4d958 3893 int ret = 0;
af4b6b8e
CY
3894
3895 /*
3896 * Map all the blocks into the extent list. This code doesn't try
3897 * to be very smart.
3898 */
3899 cur_lblock = 0;
6cbfcab5 3900 last_lblock = bytes_to_blks(inode, i_size_read(inode));
af4b6b8e 3901
1da66103 3902 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
b876f4c9 3903 struct f2fs_map_blocks map;
859fca6b 3904retry:
af4b6b8e
CY
3905 cond_resched();
3906
b876f4c9
JK
3907 memset(&map, 0, sizeof(map));
3908 map.m_lblk = cur_lblock;
36e4d958 3909 map.m_len = last_lblock - cur_lblock;
3910 map.m_next_pgofs = NULL;
3911 map.m_next_extent = NULL;
b876f4c9 3912 map.m_seg_type = NO_CHECK_TYPE;
36e4d958 3913 map.m_may_create = false;
af4b6b8e 3914
b876f4c9 3915 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
af4b6b8e 3916 if (ret)
36e4d958 3917 goto out;
af4b6b8e
CY
3918
3919 /* hole */
36e4d958 3920 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
833dcd35 3921 f2fs_err(sbi, "Swapfile has holes");
f395183f 3922 ret = -EINVAL;
36e4d958 3923 goto out;
3924 }
af4b6b8e 3925
b876f4c9
JK
3926 pblock = map.m_pblk;
3927 nr_pblocks = map.m_len;
af4b6b8e 3928
859fca6b
CY
3929 if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
3930 nr_pblocks & sec_blks_mask) {
ca298241 3931 not_aligned++;
36e4d958 3932
859fca6b
CY
3933 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
3934 if (cur_lblock + nr_pblocks > sis->max)
3935 nr_pblocks -= blks_per_sec;
3936
3937 if (!nr_pblocks) {
3938 /* this extent is last one */
3939 nr_pblocks = map.m_len;
3940 f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
3941 goto next;
3942 }
3943
3944 ret = f2fs_migrate_blocks(inode, cur_lblock,
3945 nr_pblocks);
3946 if (ret)
3947 goto out;
3948 goto retry;
3949 }
3950next:
af4b6b8e
CY
3951 if (cur_lblock + nr_pblocks >= sis->max)
3952 nr_pblocks = sis->max - cur_lblock;
3953
3954 if (cur_lblock) { /* exclude the header page */
3955 if (pblock < lowest_pblock)
3956 lowest_pblock = pblock;
3957 if (pblock + nr_pblocks - 1 > highest_pblock)
3958 highest_pblock = pblock + nr_pblocks - 1;
3959 }
3960
3961 /*
3962 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3963 */
3964 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
3965 if (ret < 0)
3966 goto out;
3967 nr_extents += ret;
3968 cur_lblock += nr_pblocks;
3969 }
3970 ret = nr_extents;
3971 *span = 1 + highest_pblock - lowest_pblock;
3972 if (cur_lblock == 0)
3973 cur_lblock = 1; /* force Empty message */
3974 sis->max = cur_lblock;
3975 sis->pages = cur_lblock - 1;
3976 sis->highest_bit = cur_lblock - 1;
3977out:
859fca6b
CY
3978 if (not_aligned)
3979 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
3980 not_aligned, blks_per_sec * F2FS_BLKSIZE);
af4b6b8e 3981 return ret;
af4b6b8e
CY
3982}
3983
4969c06a
JK
3984static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3985 sector_t *span)
3986{
3987 struct inode *inode = file_inode(file);
3988 int ret;
3989
3990 if (!S_ISREG(inode->i_mode))
3991 return -EINVAL;
3992
3993 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3994 return -EROFS;
3995
d927ccfc
SK
3996 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
3997 f2fs_err(F2FS_I_SB(inode),
3998 "Swapfile not supported in LFS mode");
3999 return -EINVAL;
4000 }
4001
4969c06a
JK
4002 ret = f2fs_convert_inline_inode(inode);
4003 if (ret)
4004 return ret;
4005
78134d03 4006 if (!f2fs_disable_compressed_file(inode))
4c8ff709
CY
4007 return -EINVAL;
4008
0b979f1b
CY
4009 f2fs_precache_extents(inode);
4010
3e5e479a
CY
4011 ret = check_swap_activate(sis, file, span);
4012 if (ret < 0)
4969c06a
JK
4013 return ret;
4014
8ec071c3 4015 stat_inc_swapfile_inode(inode);
4969c06a 4016 set_inode_flag(inode, FI_PIN_FILE);
4969c06a 4017 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3e5e479a 4018 return ret;
4969c06a
JK
4019}
4020
4021static void f2fs_swap_deactivate(struct file *file)
4022{
4023 struct inode *inode = file_inode(file);
4024
8ec071c3 4025 stat_dec_swapfile_inode(inode);
4969c06a
JK
4026 clear_inode_flag(inode, FI_PIN_FILE);
4027}
4028#else
4029static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4030 sector_t *span)
4031{
4032 return -EOPNOTSUPP;
4033}
4034
4035static void f2fs_swap_deactivate(struct file *file)
4036{
4037}
4038#endif
4039
eb47b800 4040const struct address_space_operations f2fs_dblock_aops = {
be05584f 4041 .read_folio = f2fs_read_data_folio,
23323196 4042 .readahead = f2fs_readahead,
eb47b800
JK
4043 .writepage = f2fs_write_data_page,
4044 .writepages = f2fs_write_data_pages,
4045 .write_begin = f2fs_write_begin,
a1dd3c13 4046 .write_end = f2fs_write_end,
4f5e34f7 4047 .dirty_folio = f2fs_dirty_data_folio,
1d5b9bd6 4048 .migrate_folio = filemap_migrate_folio,
91503996 4049 .invalidate_folio = f2fs_invalidate_folio,
c26cd045 4050 .release_folio = f2fs_release_folio,
a1e09b03 4051 .direct_IO = noop_direct_IO,
c01e54b7 4052 .bmap = f2fs_bmap,
4969c06a
JK
4053 .swap_activate = f2fs_swap_activate,
4054 .swap_deactivate = f2fs_swap_deactivate,
eb47b800 4055};
6dbb1796 4056
5ec2d99d 4057void f2fs_clear_page_cache_dirty_tag(struct page *page)
aec2f729
CY
4058{
4059 struct address_space *mapping = page_mapping(page);
4060 unsigned long flags;
4061
4062 xa_lock_irqsave(&mapping->i_pages, flags);
5ec2d99d 4063 __xa_clear_mark(&mapping->i_pages, page_index(page),
aec2f729
CY
4064 PAGECACHE_TAG_DIRTY);
4065 xa_unlock_irqrestore(&mapping->i_pages, flags);
4066}
4067
6dbb1796
EB
4068int __init f2fs_init_post_read_processing(void)
4069{
95ae251f
EB
4070 bio_post_read_ctx_cache =
4071 kmem_cache_create("f2fs_bio_post_read_ctx",
4072 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
6dbb1796
EB
4073 if (!bio_post_read_ctx_cache)
4074 goto fail;
4075 bio_post_read_ctx_pool =
4076 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4077 bio_post_read_ctx_cache);
4078 if (!bio_post_read_ctx_pool)
4079 goto fail_free_cache;
4080 return 0;
4081
4082fail_free_cache:
4083 kmem_cache_destroy(bio_post_read_ctx_cache);
4084fail:
4085 return -ENOMEM;
4086}
4087
0b20fcec 4088void f2fs_destroy_post_read_processing(void)
6dbb1796
EB
4089{
4090 mempool_destroy(bio_post_read_ctx_pool);
4091 kmem_cache_destroy(bio_post_read_ctx_cache);
4092}
0b20fcec 4093
4c8ff709
CY
4094int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4095{
4096 if (!f2fs_sb_has_encrypt(sbi) &&
4097 !f2fs_sb_has_verity(sbi) &&
4098 !f2fs_sb_has_compression(sbi))
4099 return 0;
4100
4101 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4102 WQ_UNBOUND | WQ_HIGHPRI,
4103 num_online_cpus());
870af777 4104 return sbi->post_read_wq ? 0 : -ENOMEM;
4c8ff709
CY
4105}
4106
4107void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4108{
4109 if (sbi->post_read_wq)
4110 destroy_workqueue(sbi->post_read_wq);
4111}
4112
0b20fcec
CY
4113int __init f2fs_init_bio_entry_cache(void)
4114{
98510003 4115 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
0b20fcec 4116 sizeof(struct bio_entry));
870af777 4117 return bio_entry_slab ? 0 : -ENOMEM;
0b20fcec
CY
4118}
4119
f543805f 4120void f2fs_destroy_bio_entry_cache(void)
0b20fcec
CY
4121{
4122 kmem_cache_destroy(bio_entry_slab);
4123}
1517c1a7
EB
4124
4125static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4126 unsigned int flags, struct iomap *iomap,
4127 struct iomap *srcmap)
4128{
4129 struct f2fs_map_blocks map = {};
4130 pgoff_t next_pgofs = 0;
4131 int err;
4132
4133 map.m_lblk = bytes_to_blks(inode, offset);
4134 map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
4135 map.m_next_pgofs = &next_pgofs;
4136 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4137 if (flags & IOMAP_WRITE)
4138 map.m_may_create = true;
4139
4140 err = f2fs_map_blocks(inode, &map, flags & IOMAP_WRITE,
4141 F2FS_GET_BLOCK_DIO);
4142 if (err)
4143 return err;
4144
4145 iomap->offset = blks_to_bytes(inode, map.m_lblk);
4146
8a2c77bc
EB
4147 /*
4148 * When inline encryption is enabled, sometimes I/O to an encrypted file
4149 * has to be broken up to guarantee DUN contiguity. Handle this by
4150 * limiting the length of the mapping returned.
4151 */
4152 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4153
1517c1a7
EB
4154 if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
4155 iomap->length = blks_to_bytes(inode, map.m_len);
4156 if (map.m_flags & F2FS_MAP_MAPPED) {
4157 iomap->type = IOMAP_MAPPED;
4158 iomap->flags |= IOMAP_F_MERGED;
4159 } else {
4160 iomap->type = IOMAP_UNWRITTEN;
4161 }
4162 if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
4163 return -EINVAL;
4164
4165 iomap->bdev = map.m_bdev;
4166 iomap->addr = blks_to_bytes(inode, map.m_pblk);
4167 } else {
4168 iomap->length = blks_to_bytes(inode, next_pgofs) -
4169 iomap->offset;
4170 iomap->type = IOMAP_HOLE;
4171 iomap->addr = IOMAP_NULL_ADDR;
4172 }
4173
4174 if (map.m_flags & F2FS_MAP_NEW)
4175 iomap->flags |= IOMAP_F_NEW;
4176 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4177 offset + length > i_size_read(inode))
4178 iomap->flags |= IOMAP_F_DIRTY;
4179
4180 return 0;
4181}
4182
4183const struct iomap_ops f2fs_iomap_ops = {
4184 .iomap_begin = f2fs_iomap_begin,
4185};