f2fs: use atomic_t to record hit ratio info of extent cache
[linux-2.6-block.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
8f46dcae 17#include <linux/pagevec.h>
eb47b800
JK
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
e2e40f2c 21#include <linux/uio.h>
f1e88660 22#include <linux/cleancache.h>
eb47b800
JK
23
24#include "f2fs.h"
25#include "node.h"
26#include "segment.h"
db9f7c1a 27#include "trace.h"
848753aa 28#include <trace/events/f2fs.h>
eb47b800 29
93dfe2ac
JK
30static void f2fs_read_end_io(struct bio *bio, int err)
31{
f568849e
LT
32 struct bio_vec *bvec;
33 int i;
93dfe2ac 34
4375a336
JK
35 if (f2fs_bio_encrypted(bio)) {
36 if (err) {
37 f2fs_release_crypto_ctx(bio->bi_private);
38 } else {
39 f2fs_end_io_crypto_work(bio->bi_private, bio);
40 return;
41 }
42 }
43
12377024
CY
44 bio_for_each_segment_all(bvec, bio, i) {
45 struct page *page = bvec->bv_page;
f1e88660
JK
46
47 if (!err) {
48 SetPageUptodate(page);
49 } else {
50 ClearPageUptodate(page);
51 SetPageError(page);
52 }
53 unlock_page(page);
54 }
f1e88660
JK
55 bio_put(bio);
56}
57
93dfe2ac
JK
58static void f2fs_write_end_io(struct bio *bio, int err)
59{
1b1f559f 60 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
61 struct bio_vec *bvec;
62 int i;
93dfe2ac 63
f568849e 64 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
65 struct page *page = bvec->bv_page;
66
4375a336
JK
67 f2fs_restore_and_release_control_page(&page);
68
f568849e 69 if (unlikely(err)) {
cf779cab 70 set_page_dirty(page);
93dfe2ac 71 set_bit(AS_EIO, &page->mapping->flags);
744602cf 72 f2fs_stop_checkpoint(sbi);
93dfe2ac
JK
73 }
74 end_page_writeback(page);
75 dec_page_count(sbi, F2FS_WRITEBACK);
f568849e 76 }
93dfe2ac 77
93dfe2ac
JK
78 if (!get_pages(sbi, F2FS_WRITEBACK) &&
79 !list_empty(&sbi->cp_wait.task_list))
80 wake_up(&sbi->cp_wait);
81
82 bio_put(bio);
83}
84
940a6d34
GZ
85/*
86 * Low-level block read/write IO operations.
87 */
88static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
89 int npages, bool is_read)
90{
91 struct bio *bio;
92
93 /* No failure on bio allocation */
94 bio = bio_alloc(GFP_NOIO, npages);
95
96 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 97 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 98 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
12377024 99 bio->bi_private = is_read ? NULL : sbi;
940a6d34
GZ
100
101 return bio;
102}
103
458e6197 104static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 105{
458e6197 106 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
107
108 if (!io->bio)
109 return;
110
6a8f8ca5 111 if (is_read_io(fio->rw))
2ace38e0 112 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 113 else
2ace38e0 114 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 115
6a8f8ca5 116 submit_bio(fio->rw, io->bio);
93dfe2ac
JK
117 io->bio = NULL;
118}
119
120void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
458e6197 121 enum page_type type, int rw)
93dfe2ac
JK
122{
123 enum page_type btype = PAGE_TYPE_OF_BIO(type);
124 struct f2fs_bio_info *io;
125
126 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
127
df0f8dc0 128 down_write(&io->io_rwsem);
458e6197
JK
129
130 /* change META to META_FLUSH in the checkpoint procedure */
131 if (type >= META_FLUSH) {
132 io->fio.type = META_FLUSH;
0f7b2abd
JK
133 if (test_opt(sbi, NOBARRIER))
134 io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
135 else
136 io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
458e6197
JK
137 }
138 __submit_merged_bio(io);
df0f8dc0 139 up_write(&io->io_rwsem);
93dfe2ac
JK
140}
141
142/*
143 * Fill the locked page with data located in the block address.
144 * Return unlocked page.
145 */
05ca3632 146int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 147{
93dfe2ac 148 struct bio *bio;
4375a336 149 struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
93dfe2ac 150
2ace38e0 151 trace_f2fs_submit_page_bio(page, fio);
05ca3632 152 f2fs_trace_ios(fio, 0);
93dfe2ac
JK
153
154 /* Allocate a new bio */
05ca3632 155 bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
93dfe2ac
JK
156
157 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
158 bio_put(bio);
159 f2fs_put_page(page, 1);
160 return -EFAULT;
161 }
162
cf04e8eb 163 submit_bio(fio->rw, bio);
93dfe2ac
JK
164 return 0;
165}
166
05ca3632 167void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
93dfe2ac 168{
05ca3632 169 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 170 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 171 struct f2fs_bio_info *io;
940a6d34 172 bool is_read = is_read_io(fio->rw);
4375a336 173 struct page *bio_page;
93dfe2ac 174
940a6d34 175 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 176
cf04e8eb 177 verify_block_addr(sbi, fio->blk_addr);
93dfe2ac 178
df0f8dc0 179 down_write(&io->io_rwsem);
93dfe2ac 180
940a6d34 181 if (!is_read)
93dfe2ac
JK
182 inc_page_count(sbi, F2FS_WRITEBACK);
183
cf04e8eb 184 if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
458e6197
JK
185 io->fio.rw != fio->rw))
186 __submit_merged_bio(io);
93dfe2ac
JK
187alloc_new:
188 if (io->bio == NULL) {
90a893c7 189 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 190
cf04e8eb 191 io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
458e6197 192 io->fio = *fio;
93dfe2ac
JK
193 }
194
4375a336
JK
195 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
196
197 if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
93dfe2ac 198 PAGE_CACHE_SIZE) {
458e6197 199 __submit_merged_bio(io);
93dfe2ac
JK
200 goto alloc_new;
201 }
202
cf04e8eb 203 io->last_block_in_bio = fio->blk_addr;
05ca3632 204 f2fs_trace_ios(fio, 0);
93dfe2ac 205
df0f8dc0 206 up_write(&io->io_rwsem);
05ca3632 207 trace_f2fs_submit_page_mbio(fio->page, fio);
93dfe2ac
JK
208}
209
0a8165d7 210/*
eb47b800
JK
211 * Lock ordering for the change of data block address:
212 * ->data_page
213 * ->node_page
214 * update block addresses in the node page
215 */
216a620a 216void set_data_blkaddr(struct dnode_of_data *dn)
eb47b800
JK
217{
218 struct f2fs_node *rn;
219 __le32 *addr_array;
220 struct page *node_page = dn->node_page;
221 unsigned int ofs_in_node = dn->ofs_in_node;
222
5514f0aa 223 f2fs_wait_on_page_writeback(node_page, NODE);
eb47b800 224
45590710 225 rn = F2FS_NODE(node_page);
eb47b800
JK
226
227 /* Get physical address of data block */
228 addr_array = blkaddr_in_node(rn);
e1509cf2 229 addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
eb47b800
JK
230 set_page_dirty(node_page);
231}
232
233int reserve_new_block(struct dnode_of_data *dn)
234{
4081363f 235 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 236
6bacf52f 237 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
eb47b800 238 return -EPERM;
cfb271d4 239 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
eb47b800
JK
240 return -ENOSPC;
241
c01e2853
NJ
242 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
243
eb47b800 244 dn->data_blkaddr = NEW_ADDR;
216a620a 245 set_data_blkaddr(dn);
a18ff063 246 mark_inode_dirty(dn->inode);
eb47b800
JK
247 sync_inode_page(dn);
248 return 0;
249}
250
b600965c
HL
251int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
252{
253 bool need_put = dn->inode_page ? false : true;
254 int err;
255
256 err = get_dnode_of_data(dn, index, ALLOC_NODE);
257 if (err)
258 return err;
a8865372 259
b600965c
HL
260 if (dn->data_blkaddr == NULL_ADDR)
261 err = reserve_new_block(dn);
a8865372 262 if (err || need_put)
b600965c
HL
263 f2fs_put_dnode(dn);
264 return err;
265}
266
43f3eae1 267struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
eb47b800 268{
eb47b800
JK
269 struct address_space *mapping = inode->i_mapping;
270 struct dnode_of_data dn;
271 struct page *page;
cb3bc9ee 272 struct extent_info ei;
eb47b800 273 int err;
cf04e8eb 274 struct f2fs_io_info fio = {
05ca3632 275 .sbi = F2FS_I_SB(inode),
cf04e8eb 276 .type = DATA,
43f3eae1 277 .rw = rw,
4375a336 278 .encrypted_page = NULL,
cf04e8eb 279 };
eb47b800 280
4375a336
JK
281 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
282 return read_mapping_page(mapping, index, NULL);
283
9ac1349a 284 page = grab_cache_page(mapping, index);
650495de
JK
285 if (!page)
286 return ERR_PTR(-ENOMEM);
287
cb3bc9ee
CY
288 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
289 dn.data_blkaddr = ei.blk + index - ei.fofs;
290 goto got_it;
291 }
292
eb47b800 293 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 294 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
650495de
JK
295 if (err) {
296 f2fs_put_page(page, 1);
eb47b800 297 return ERR_PTR(err);
650495de 298 }
eb47b800
JK
299 f2fs_put_dnode(&dn);
300
6bacf52f 301 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
650495de 302 f2fs_put_page(page, 1);
eb47b800 303 return ERR_PTR(-ENOENT);
650495de 304 }
cb3bc9ee 305got_it:
43f3eae1
JK
306 if (PageUptodate(page)) {
307 unlock_page(page);
eb47b800 308 return page;
43f3eae1 309 }
eb47b800 310
d59ff4df
JK
311 /*
312 * A new dentry page is allocated but not able to be written, since its
313 * new inode page couldn't be allocated due to -ENOSPC.
314 * In such the case, its blkaddr can be remained as NEW_ADDR.
315 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
316 */
317 if (dn.data_blkaddr == NEW_ADDR) {
318 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
319 SetPageUptodate(page);
43f3eae1 320 unlock_page(page);
d59ff4df
JK
321 return page;
322 }
eb47b800 323
cf04e8eb 324 fio.blk_addr = dn.data_blkaddr;
05ca3632
JK
325 fio.page = page;
326 err = f2fs_submit_page_bio(&fio);
393ff91f 327 if (err)
eb47b800 328 return ERR_PTR(err);
43f3eae1
JK
329 return page;
330}
331
332struct page *find_data_page(struct inode *inode, pgoff_t index)
333{
334 struct address_space *mapping = inode->i_mapping;
335 struct page *page;
336
337 page = find_get_page(mapping, index);
338 if (page && PageUptodate(page))
339 return page;
340 f2fs_put_page(page, 0);
341
342 page = get_read_data_page(inode, index, READ_SYNC);
343 if (IS_ERR(page))
344 return page;
345
346 if (PageUptodate(page))
347 return page;
348
349 wait_on_page_locked(page);
350 if (unlikely(!PageUptodate(page))) {
351 f2fs_put_page(page, 0);
352 return ERR_PTR(-EIO);
353 }
354 return page;
355}
356
357/*
358 * If it tries to access a hole, return an error.
359 * Because, the callers, functions in dir.c and GC, should be able to know
360 * whether this page exists or not.
361 */
362struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
363{
364 struct address_space *mapping = inode->i_mapping;
365 struct page *page;
366repeat:
367 page = get_read_data_page(inode, index, READ_SYNC);
368 if (IS_ERR(page))
369 return page;
393ff91f 370
43f3eae1 371 /* wait for read completion */
393ff91f 372 lock_page(page);
6bacf52f 373 if (unlikely(!PageUptodate(page))) {
393ff91f
JK
374 f2fs_put_page(page, 1);
375 return ERR_PTR(-EIO);
eb47b800 376 }
6bacf52f 377 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
378 f2fs_put_page(page, 1);
379 goto repeat;
eb47b800
JK
380 }
381 return page;
382}
383
0a8165d7 384/*
eb47b800
JK
385 * Caller ensures that this data page is never allocated.
386 * A new zero-filled data page is allocated in the page cache.
39936837 387 *
4f4124d0
CY
388 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
389 * f2fs_unlock_op().
a8865372 390 * Note that, ipage is set only by make_empty_dir.
eb47b800 391 */
64aa7ed9 392struct page *get_new_data_page(struct inode *inode,
a8865372 393 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 394{
eb47b800
JK
395 struct address_space *mapping = inode->i_mapping;
396 struct page *page;
397 struct dnode_of_data dn;
398 int err;
01f28610
JK
399repeat:
400 page = grab_cache_page(mapping, index);
401 if (!page)
402 return ERR_PTR(-ENOMEM);
eb47b800 403
a8865372 404 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 405 err = f2fs_reserve_block(&dn, index);
01f28610
JK
406 if (err) {
407 f2fs_put_page(page, 1);
eb47b800 408 return ERR_PTR(err);
a8865372 409 }
01f28610
JK
410 if (!ipage)
411 f2fs_put_dnode(&dn);
eb47b800
JK
412
413 if (PageUptodate(page))
01f28610 414 goto got_it;
eb47b800
JK
415
416 if (dn.data_blkaddr == NEW_ADDR) {
417 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
393ff91f 418 SetPageUptodate(page);
eb47b800 419 } else {
4375a336 420 f2fs_put_page(page, 1);
a8865372 421
4375a336
JK
422 page = get_read_data_page(inode, index, READ_SYNC);
423 if (IS_ERR(page))
afcb7ca0 424 goto repeat;
4375a336
JK
425
426 /* wait for read completion */
427 lock_page(page);
eb47b800 428 }
01f28610 429got_it:
eb47b800
JK
430 if (new_i_size &&
431 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
432 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
699489bb
JK
433 /* Only the directory inode sets new_i_size */
434 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
eb47b800
JK
435 }
436 return page;
437}
438
bfad7c2d
JK
439static int __allocate_data_block(struct dnode_of_data *dn)
440{
4081363f 441 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
976e4c50 442 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
bfad7c2d 443 struct f2fs_summary sum;
bfad7c2d 444 struct node_info ni;
38aa0889 445 int seg = CURSEG_WARM_DATA;
976e4c50 446 pgoff_t fofs;
bfad7c2d
JK
447
448 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
449 return -EPERM;
df6136ef
CY
450
451 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
452 if (dn->data_blkaddr == NEW_ADDR)
453 goto alloc;
454
bfad7c2d
JK
455 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
456 return -ENOSPC;
457
df6136ef 458alloc:
bfad7c2d
JK
459 get_node_info(sbi, dn->nid, &ni);
460 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
461
38aa0889
JK
462 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
463 seg = CURSEG_DIRECT_IO;
464
df6136ef
CY
465 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
466 &sum, seg);
216a620a 467 set_data_blkaddr(dn);
bfad7c2d 468
976e4c50
JK
469 /* update i_size */
470 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
471 dn->ofs_in_node;
472 if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
473 i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
474
3e72f721 475 /* direct IO doesn't use extent cache to maximize the performance */
a28ef1f5 476 f2fs_drop_largest_extent(dn->inode, fofs);
3e72f721 477
bfad7c2d
JK
478 return 0;
479}
480
59b802e5
JK
481static void __allocate_data_blocks(struct inode *inode, loff_t offset,
482 size_t count)
483{
484 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
485 struct dnode_of_data dn;
486 u64 start = F2FS_BYTES_TO_BLK(offset);
487 u64 len = F2FS_BYTES_TO_BLK(count);
488 bool allocated;
489 u64 end_offset;
490
491 while (len) {
492 f2fs_balance_fs(sbi);
493 f2fs_lock_op(sbi);
494
495 /* When reading holes, we need its node page */
496 set_new_dnode(&dn, inode, NULL, NULL, 0);
497 if (get_dnode_of_data(&dn, start, ALLOC_NODE))
498 goto out;
499
500 allocated = false;
501 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
502
503 while (dn.ofs_in_node < end_offset && len) {
d6d4f1cb
CY
504 block_t blkaddr;
505
506 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
df6136ef 507 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
59b802e5
JK
508 if (__allocate_data_block(&dn))
509 goto sync_out;
510 allocated = true;
511 }
512 len--;
513 start++;
514 dn.ofs_in_node++;
515 }
516
517 if (allocated)
518 sync_inode_page(&dn);
519
520 f2fs_put_dnode(&dn);
521 f2fs_unlock_op(sbi);
522 }
523 return;
524
525sync_out:
526 if (allocated)
527 sync_inode_page(&dn);
528 f2fs_put_dnode(&dn);
529out:
530 f2fs_unlock_op(sbi);
531 return;
532}
533
0a8165d7 534/*
003a3e1d
JK
535 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
536 * f2fs_map_blocks structure.
4f4124d0
CY
537 * If original data blocks are allocated, then give them to blockdev.
538 * Otherwise,
539 * a. preallocate requested block addresses
540 * b. do not use extent cache for better performance
541 * c. give the block addresses to blockdev
eb47b800 542 */
003a3e1d
JK
543static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
544 int create, bool fiemap)
eb47b800 545{
003a3e1d 546 unsigned int maxblocks = map->m_len;
eb47b800 547 struct dnode_of_data dn;
bfad7c2d
JK
548 int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
549 pgoff_t pgofs, end_offset;
550 int err = 0, ofs = 1;
a2e7d1bf 551 struct extent_info ei;
bfad7c2d 552 bool allocated = false;
eb47b800 553
003a3e1d
JK
554 map->m_len = 0;
555 map->m_flags = 0;
556
557 /* it only supports block size == page size */
558 pgofs = (pgoff_t)map->m_lblk;
eb47b800 559
7e4dde79 560 if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d
JK
561 map->m_pblk = ei.blk + pgofs - ei.fofs;
562 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
563 map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d 564 goto out;
a2e7d1bf 565 }
bfad7c2d 566
59b802e5 567 if (create)
4081363f 568 f2fs_lock_op(F2FS_I_SB(inode));
eb47b800
JK
569
570 /* When reading holes, we need its node page */
571 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 572 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 573 if (err) {
bfad7c2d
JK
574 if (err == -ENOENT)
575 err = 0;
576 goto unlock_out;
848753aa 577 }
ccfb3000 578 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083 579 goto put_out;
eb47b800 580
bfad7c2d 581 if (dn.data_blkaddr != NULL_ADDR) {
003a3e1d
JK
582 map->m_flags = F2FS_MAP_MAPPED;
583 map->m_pblk = dn.data_blkaddr;
7f63eb77
JK
584 if (dn.data_blkaddr == NEW_ADDR)
585 map->m_flags |= F2FS_MAP_UNWRITTEN;
bfad7c2d
JK
586 } else if (create) {
587 err = __allocate_data_block(&dn);
588 if (err)
589 goto put_out;
590 allocated = true;
003a3e1d
JK
591 map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
592 map->m_pblk = dn.data_blkaddr;
bfad7c2d
JK
593 } else {
594 goto put_out;
595 }
596
6403eb1f 597 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
003a3e1d 598 map->m_len = 1;
bfad7c2d
JK
599 dn.ofs_in_node++;
600 pgofs++;
601
602get_next:
603 if (dn.ofs_in_node >= end_offset) {
604 if (allocated)
605 sync_inode_page(&dn);
606 allocated = false;
607 f2fs_put_dnode(&dn);
608
609 set_new_dnode(&dn, inode, NULL, NULL, 0);
610 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 611 if (err) {
bfad7c2d
JK
612 if (err == -ENOENT)
613 err = 0;
614 goto unlock_out;
615 }
ccfb3000 616 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
1ec79083
JK
617 goto put_out;
618
6403eb1f 619 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
bfad7c2d 620 }
eb47b800 621
003a3e1d 622 if (maxblocks > map->m_len) {
bfad7c2d
JK
623 block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
624 if (blkaddr == NULL_ADDR && create) {
625 err = __allocate_data_block(&dn);
626 if (err)
627 goto sync_out;
628 allocated = true;
003a3e1d 629 map->m_flags |= F2FS_MAP_NEW;
bfad7c2d
JK
630 blkaddr = dn.data_blkaddr;
631 }
e1c42045 632 /* Give more consecutive addresses for the readahead */
7f63eb77
JK
633 if ((map->m_pblk != NEW_ADDR &&
634 blkaddr == (map->m_pblk + ofs)) ||
635 (map->m_pblk == NEW_ADDR &&
636 blkaddr == NEW_ADDR)) {
bfad7c2d
JK
637 ofs++;
638 dn.ofs_in_node++;
639 pgofs++;
003a3e1d 640 map->m_len++;
bfad7c2d
JK
641 goto get_next;
642 }
eb47b800 643 }
bfad7c2d
JK
644sync_out:
645 if (allocated)
646 sync_inode_page(&dn);
647put_out:
eb47b800 648 f2fs_put_dnode(&dn);
bfad7c2d
JK
649unlock_out:
650 if (create)
4081363f 651 f2fs_unlock_op(F2FS_I_SB(inode));
bfad7c2d 652out:
003a3e1d 653 trace_f2fs_map_blocks(inode, map, err);
bfad7c2d 654 return err;
eb47b800
JK
655}
656
003a3e1d
JK
657static int __get_data_block(struct inode *inode, sector_t iblock,
658 struct buffer_head *bh, int create, bool fiemap)
659{
660 struct f2fs_map_blocks map;
661 int ret;
662
663 map.m_lblk = iblock;
664 map.m_len = bh->b_size >> inode->i_blkbits;
665
666 ret = f2fs_map_blocks(inode, &map, create, fiemap);
667 if (!ret) {
668 map_bh(bh, inode->i_sb, map.m_pblk);
669 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
670 bh->b_size = map.m_len << inode->i_blkbits;
671 }
672 return ret;
673}
674
ccfb3000
JK
675static int get_data_block(struct inode *inode, sector_t iblock,
676 struct buffer_head *bh_result, int create)
677{
678 return __get_data_block(inode, iblock, bh_result, create, false);
679}
680
681static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
682 struct buffer_head *bh_result, int create)
683{
684 return __get_data_block(inode, iblock, bh_result, create, true);
685}
686
7f63eb77
JK
687static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
688{
689 return (offset >> inode->i_blkbits);
690}
691
692static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
693{
694 return (blk << inode->i_blkbits);
695}
696
9ab70134
JK
697int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
698 u64 start, u64 len)
699{
7f63eb77
JK
700 struct buffer_head map_bh;
701 sector_t start_blk, last_blk;
702 loff_t isize = i_size_read(inode);
703 u64 logical = 0, phys = 0, size = 0;
704 u32 flags = 0;
705 bool past_eof = false, whole_file = false;
706 int ret = 0;
707
708 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
709 if (ret)
710 return ret;
711
712 mutex_lock(&inode->i_mutex);
713
714 if (len >= isize) {
715 whole_file = true;
716 len = isize;
717 }
718
719 if (logical_to_blk(inode, len) == 0)
720 len = blk_to_logical(inode, 1);
721
722 start_blk = logical_to_blk(inode, start);
723 last_blk = logical_to_blk(inode, start + len - 1);
724next:
725 memset(&map_bh, 0, sizeof(struct buffer_head));
726 map_bh.b_size = len;
727
728 ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0);
729 if (ret)
730 goto out;
731
732 /* HOLE */
733 if (!buffer_mapped(&map_bh)) {
734 start_blk++;
735
736 if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
737 past_eof = 1;
738
739 if (past_eof && size) {
740 flags |= FIEMAP_EXTENT_LAST;
741 ret = fiemap_fill_next_extent(fieinfo, logical,
742 phys, size, flags);
743 } else if (size) {
744 ret = fiemap_fill_next_extent(fieinfo, logical,
745 phys, size, flags);
746 size = 0;
747 }
748
749 /* if we have holes up to/past EOF then we're done */
750 if (start_blk > last_blk || past_eof || ret)
751 goto out;
752 } else {
753 if (start_blk > last_blk && !whole_file) {
754 ret = fiemap_fill_next_extent(fieinfo, logical,
755 phys, size, flags);
756 goto out;
757 }
758
759 /*
760 * if size != 0 then we know we already have an extent
761 * to add, so add it.
762 */
763 if (size) {
764 ret = fiemap_fill_next_extent(fieinfo, logical,
765 phys, size, flags);
766 if (ret)
767 goto out;
768 }
769
770 logical = blk_to_logical(inode, start_blk);
771 phys = blk_to_logical(inode, map_bh.b_blocknr);
772 size = map_bh.b_size;
773 flags = 0;
774 if (buffer_unwritten(&map_bh))
775 flags = FIEMAP_EXTENT_UNWRITTEN;
776
777 start_blk += logical_to_blk(inode, size);
778
779 /*
780 * If we are past the EOF, then we need to make sure as
781 * soon as we find a hole that the last extent we found
782 * is marked with FIEMAP_EXTENT_LAST
783 */
784 if (!past_eof && logical + size >= isize)
785 past_eof = true;
786 }
787 cond_resched();
788 if (fatal_signal_pending(current))
789 ret = -EINTR;
790 else
791 goto next;
792out:
793 if (ret == 1)
794 ret = 0;
795
796 mutex_unlock(&inode->i_mutex);
797 return ret;
9ab70134
JK
798}
799
f1e88660
JK
800/*
801 * This function was originally taken from fs/mpage.c, and customized for f2fs.
802 * Major change was from block_size == page_size in f2fs by default.
803 */
804static int f2fs_mpage_readpages(struct address_space *mapping,
805 struct list_head *pages, struct page *page,
806 unsigned nr_pages)
807{
808 struct bio *bio = NULL;
809 unsigned page_idx;
810 sector_t last_block_in_bio = 0;
811 struct inode *inode = mapping->host;
812 const unsigned blkbits = inode->i_blkbits;
813 const unsigned blocksize = 1 << blkbits;
814 sector_t block_in_file;
815 sector_t last_block;
816 sector_t last_block_in_file;
817 sector_t block_nr;
818 struct block_device *bdev = inode->i_sb->s_bdev;
819 struct f2fs_map_blocks map;
820
821 map.m_pblk = 0;
822 map.m_lblk = 0;
823 map.m_len = 0;
824 map.m_flags = 0;
825
826 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
827
828 prefetchw(&page->flags);
829 if (pages) {
830 page = list_entry(pages->prev, struct page, lru);
831 list_del(&page->lru);
832 if (add_to_page_cache_lru(page, mapping,
833 page->index, GFP_KERNEL))
834 goto next_page;
835 }
836
837 block_in_file = (sector_t)page->index;
838 last_block = block_in_file + nr_pages;
839 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
840 blkbits;
841 if (last_block > last_block_in_file)
842 last_block = last_block_in_file;
843
844 /*
845 * Map blocks using the previous result first.
846 */
847 if ((map.m_flags & F2FS_MAP_MAPPED) &&
848 block_in_file > map.m_lblk &&
849 block_in_file < (map.m_lblk + map.m_len))
850 goto got_it;
851
852 /*
853 * Then do more f2fs_map_blocks() calls until we are
854 * done with this page.
855 */
856 map.m_flags = 0;
857
858 if (block_in_file < last_block) {
859 map.m_lblk = block_in_file;
860 map.m_len = last_block - block_in_file;
861
862 if (f2fs_map_blocks(inode, &map, 0, false))
863 goto set_error_page;
864 }
865got_it:
866 if ((map.m_flags & F2FS_MAP_MAPPED)) {
867 block_nr = map.m_pblk + block_in_file - map.m_lblk;
868 SetPageMappedToDisk(page);
869
870 if (!PageUptodate(page) && !cleancache_get_page(page)) {
871 SetPageUptodate(page);
872 goto confused;
873 }
874 } else {
875 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
876 SetPageUptodate(page);
877 unlock_page(page);
878 goto next_page;
879 }
880
881 /*
882 * This page will go to BIO. Do we need to send this
883 * BIO off first?
884 */
885 if (bio && (last_block_in_bio != block_nr - 1)) {
886submit_and_realloc:
887 submit_bio(READ, bio);
888 bio = NULL;
889 }
890 if (bio == NULL) {
4375a336
JK
891 struct f2fs_crypto_ctx *ctx = NULL;
892
893 if (f2fs_encrypted_inode(inode) &&
894 S_ISREG(inode->i_mode)) {
895 struct page *cpage;
896
897 ctx = f2fs_get_crypto_ctx(inode);
898 if (IS_ERR(ctx))
899 goto set_error_page;
900
901 /* wait the page to be moved by cleaning */
902 cpage = find_lock_page(
903 META_MAPPING(F2FS_I_SB(inode)),
904 block_nr);
905 if (cpage) {
906 f2fs_wait_on_page_writeback(cpage,
907 DATA);
908 f2fs_put_page(cpage, 1);
909 }
910 }
911
f1e88660
JK
912 bio = bio_alloc(GFP_KERNEL,
913 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
4375a336
JK
914 if (!bio) {
915 if (ctx)
916 f2fs_release_crypto_ctx(ctx);
f1e88660 917 goto set_error_page;
4375a336 918 }
f1e88660
JK
919 bio->bi_bdev = bdev;
920 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
12377024 921 bio->bi_end_io = f2fs_read_end_io;
4375a336 922 bio->bi_private = ctx;
f1e88660
JK
923 }
924
925 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
926 goto submit_and_realloc;
927
928 last_block_in_bio = block_nr;
929 goto next_page;
930set_error_page:
931 SetPageError(page);
932 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
933 unlock_page(page);
934 goto next_page;
935confused:
936 if (bio) {
937 submit_bio(READ, bio);
938 bio = NULL;
939 }
940 unlock_page(page);
941next_page:
942 if (pages)
943 page_cache_release(page);
944 }
945 BUG_ON(pages && !list_empty(pages));
946 if (bio)
947 submit_bio(READ, bio);
948 return 0;
949}
950
eb47b800
JK
951static int f2fs_read_data_page(struct file *file, struct page *page)
952{
9ffe0fb5 953 struct inode *inode = page->mapping->host;
b3d208f9 954 int ret = -EAGAIN;
9ffe0fb5 955
c20e89cd
CY
956 trace_f2fs_readpage(page, DATA);
957
e1c42045 958 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
959 if (f2fs_has_inline_data(inode))
960 ret = f2fs_read_inline_data(inode, page);
b3d208f9 961 if (ret == -EAGAIN)
f1e88660 962 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5 963 return ret;
eb47b800
JK
964}
965
966static int f2fs_read_data_pages(struct file *file,
967 struct address_space *mapping,
968 struct list_head *pages, unsigned nr_pages)
969{
9ffe0fb5
HL
970 struct inode *inode = file->f_mapping->host;
971
972 /* If the file has inline data, skip readpages */
973 if (f2fs_has_inline_data(inode))
974 return 0;
975
f1e88660 976 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b800
JK
977}
978
05ca3632 979int do_write_data_page(struct f2fs_io_info *fio)
eb47b800 980{
05ca3632 981 struct page *page = fio->page;
eb47b800 982 struct inode *inode = page->mapping->host;
eb47b800
JK
983 struct dnode_of_data dn;
984 int err = 0;
985
986 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 987 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
988 if (err)
989 return err;
990
cf04e8eb 991 fio->blk_addr = dn.data_blkaddr;
eb47b800
JK
992
993 /* This page is already truncated */
2bca1e23
JK
994 if (fio->blk_addr == NULL_ADDR) {
995 ClearPageUptodate(page);
eb47b800 996 goto out_writepage;
2bca1e23 997 }
eb47b800 998
4375a336
JK
999 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1000 fio->encrypted_page = f2fs_encrypt(inode, fio->page);
1001 if (IS_ERR(fio->encrypted_page)) {
1002 err = PTR_ERR(fio->encrypted_page);
1003 goto out_writepage;
1004 }
1005 }
1006
eb47b800
JK
1007 set_page_writeback(page);
1008
1009 /*
1010 * If current allocation needs SSR,
1011 * it had better in-place writes for updated data.
1012 */
cf04e8eb 1013 if (unlikely(fio->blk_addr != NEW_ADDR &&
b25958b6
HL
1014 !is_cold_data(page) &&
1015 need_inplace_update(inode))) {
05ca3632 1016 rewrite_data_page(fio);
fff04f90 1017 set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
8ce67cb0 1018 trace_f2fs_do_write_data_page(page, IPU);
eb47b800 1019 } else {
05ca3632 1020 write_data_page(&dn, fio);
216a620a 1021 set_data_blkaddr(&dn);
7e4dde79 1022 f2fs_update_extent_cache(&dn);
8ce67cb0 1023 trace_f2fs_do_write_data_page(page, OPU);
fff04f90 1024 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
3c6c2beb
JK
1025 if (page->index == 0)
1026 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
1027 }
1028out_writepage:
1029 f2fs_put_dnode(&dn);
1030 return err;
1031}
1032
1033static int f2fs_write_data_page(struct page *page,
1034 struct writeback_control *wbc)
1035{
1036 struct inode *inode = page->mapping->host;
4081363f 1037 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
1038 loff_t i_size = i_size_read(inode);
1039 const pgoff_t end_index = ((unsigned long long) i_size)
1040 >> PAGE_CACHE_SHIFT;
9ffe0fb5 1041 unsigned offset = 0;
39936837 1042 bool need_balance_fs = false;
eb47b800 1043 int err = 0;
458e6197 1044 struct f2fs_io_info fio = {
05ca3632 1045 .sbi = sbi,
458e6197 1046 .type = DATA,
6c311ec6 1047 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
05ca3632 1048 .page = page,
4375a336 1049 .encrypted_page = NULL,
458e6197 1050 };
eb47b800 1051
ecda0de3
CY
1052 trace_f2fs_writepage(page, DATA);
1053
eb47b800 1054 if (page->index < end_index)
39936837 1055 goto write;
eb47b800
JK
1056
1057 /*
1058 * If the offset is out-of-range of file size,
1059 * this page does not have to be written to disk.
1060 */
1061 offset = i_size & (PAGE_CACHE_SIZE - 1);
76f60268 1062 if ((page->index >= end_index + 1) || !offset)
39936837 1063 goto out;
eb47b800
JK
1064
1065 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
39936837 1066write:
caf0047e 1067 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b800 1068 goto redirty_out;
1e84371f
JK
1069 if (f2fs_is_drop_cache(inode))
1070 goto out;
1071 if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
1072 available_free_memory(sbi, BASE_CHECK))
1073 goto redirty_out;
eb47b800 1074
39936837 1075 /* Dentry blocks are controlled by checkpoint */
eb47b800 1076 if (S_ISDIR(inode->i_mode)) {
cf779cab
JK
1077 if (unlikely(f2fs_cp_error(sbi)))
1078 goto redirty_out;
05ca3632 1079 err = do_write_data_page(&fio);
8618b881
JK
1080 goto done;
1081 }
9ffe0fb5 1082
cf779cab
JK
1083 /* we should bypass data pages to proceed the kworkder jobs */
1084 if (unlikely(f2fs_cp_error(sbi))) {
1085 SetPageError(page);
a7ffdbe2 1086 goto out;
cf779cab
JK
1087 }
1088
8618b881 1089 if (!wbc->for_reclaim)
39936837 1090 need_balance_fs = true;
8618b881 1091 else if (has_not_enough_free_secs(sbi, 0))
39936837 1092 goto redirty_out;
eb47b800 1093
b3d208f9 1094 err = -EAGAIN;
8618b881 1095 f2fs_lock_op(sbi);
b3d208f9
JK
1096 if (f2fs_has_inline_data(inode))
1097 err = f2fs_write_inline_data(inode, page);
1098 if (err == -EAGAIN)
05ca3632 1099 err = do_write_data_page(&fio);
8618b881
JK
1100 f2fs_unlock_op(sbi);
1101done:
1102 if (err && err != -ENOENT)
1103 goto redirty_out;
eb47b800 1104
eb47b800 1105 clear_cold_data(page);
39936837 1106out:
a7ffdbe2 1107 inode_dec_dirty_pages(inode);
2bca1e23
JK
1108 if (err)
1109 ClearPageUptodate(page);
eb47b800 1110 unlock_page(page);
39936837 1111 if (need_balance_fs)
eb47b800 1112 f2fs_balance_fs(sbi);
2aea39ec
JK
1113 if (wbc->for_reclaim)
1114 f2fs_submit_merged_bio(sbi, DATA, WRITE);
eb47b800
JK
1115 return 0;
1116
eb47b800 1117redirty_out:
76f60268 1118 redirty_page_for_writepage(wbc, page);
8618b881 1119 return AOP_WRITEPAGE_ACTIVATE;
eb47b800
JK
1120}
1121
fa9150a8
NJ
1122static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
1123 void *data)
1124{
1125 struct address_space *mapping = data;
1126 int ret = mapping->a_ops->writepage(page, wbc);
1127 mapping_set_error(mapping, ret);
1128 return ret;
1129}
1130
8f46dcae
CY
1131/*
1132 * This function was copied from write_cche_pages from mm/page-writeback.c.
1133 * The major change is making write step of cold data page separately from
1134 * warm/hot data page.
1135 */
1136static int f2fs_write_cache_pages(struct address_space *mapping,
1137 struct writeback_control *wbc, writepage_t writepage,
1138 void *data)
1139{
1140 int ret = 0;
1141 int done = 0;
1142 struct pagevec pvec;
1143 int nr_pages;
1144 pgoff_t uninitialized_var(writeback_index);
1145 pgoff_t index;
1146 pgoff_t end; /* Inclusive */
1147 pgoff_t done_index;
1148 int cycled;
1149 int range_whole = 0;
1150 int tag;
1151 int step = 0;
1152
1153 pagevec_init(&pvec, 0);
1154next:
1155 if (wbc->range_cyclic) {
1156 writeback_index = mapping->writeback_index; /* prev offset */
1157 index = writeback_index;
1158 if (index == 0)
1159 cycled = 1;
1160 else
1161 cycled = 0;
1162 end = -1;
1163 } else {
1164 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1165 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1166 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1167 range_whole = 1;
1168 cycled = 1; /* ignore range_cyclic tests */
1169 }
1170 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1171 tag = PAGECACHE_TAG_TOWRITE;
1172 else
1173 tag = PAGECACHE_TAG_DIRTY;
1174retry:
1175 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1176 tag_pages_for_writeback(mapping, index, end);
1177 done_index = index;
1178 while (!done && (index <= end)) {
1179 int i;
1180
1181 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1182 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1183 if (nr_pages == 0)
1184 break;
1185
1186 for (i = 0; i < nr_pages; i++) {
1187 struct page *page = pvec.pages[i];
1188
1189 if (page->index > end) {
1190 done = 1;
1191 break;
1192 }
1193
1194 done_index = page->index;
1195
1196 lock_page(page);
1197
1198 if (unlikely(page->mapping != mapping)) {
1199continue_unlock:
1200 unlock_page(page);
1201 continue;
1202 }
1203
1204 if (!PageDirty(page)) {
1205 /* someone wrote it for us */
1206 goto continue_unlock;
1207 }
1208
1209 if (step == 0 && !is_cold_data(page))
1210 goto continue_unlock;
1211 if (step == 1 && is_cold_data(page))
1212 goto continue_unlock;
1213
1214 if (PageWriteback(page)) {
1215 if (wbc->sync_mode != WB_SYNC_NONE)
1216 f2fs_wait_on_page_writeback(page, DATA);
1217 else
1218 goto continue_unlock;
1219 }
1220
1221 BUG_ON(PageWriteback(page));
1222 if (!clear_page_dirty_for_io(page))
1223 goto continue_unlock;
1224
1225 ret = (*writepage)(page, wbc, data);
1226 if (unlikely(ret)) {
1227 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1228 unlock_page(page);
1229 ret = 0;
1230 } else {
1231 done_index = page->index + 1;
1232 done = 1;
1233 break;
1234 }
1235 }
1236
1237 if (--wbc->nr_to_write <= 0 &&
1238 wbc->sync_mode == WB_SYNC_NONE) {
1239 done = 1;
1240 break;
1241 }
1242 }
1243 pagevec_release(&pvec);
1244 cond_resched();
1245 }
1246
1247 if (step < 1) {
1248 step++;
1249 goto next;
1250 }
1251
1252 if (!cycled && !done) {
1253 cycled = 1;
1254 index = 0;
1255 end = writeback_index - 1;
1256 goto retry;
1257 }
1258 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1259 mapping->writeback_index = done_index;
1260
1261 return ret;
1262}
1263
25ca923b 1264static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
1265 struct writeback_control *wbc)
1266{
1267 struct inode *inode = mapping->host;
4081363f 1268 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5463e7c1 1269 bool locked = false;
eb47b800 1270 int ret;
50c8cdb3 1271 long diff;
eb47b800 1272
e5748434
CY
1273 trace_f2fs_writepages(mapping->host, wbc, DATA);
1274
cfb185a1 1275 /* deal with chardevs and other special file */
1276 if (!mapping->a_ops->writepage)
1277 return 0;
1278
87d6f890 1279 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
a7ffdbe2 1280 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
6fb03f3a 1281 available_free_memory(sbi, DIRTY_DENTS))
d3baf95d 1282 goto skip_write;
87d6f890 1283
d5669f7b
JK
1284 /* during POR, we don't need to trigger writepage at all. */
1285 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1286 goto skip_write;
1287
50c8cdb3 1288 diff = nr_pages_to_write(sbi, DATA, wbc);
eb47b800 1289
5463e7c1
JK
1290 if (!S_ISDIR(inode->i_mode)) {
1291 mutex_lock(&sbi->writepages);
1292 locked = true;
1293 }
8f46dcae 1294 ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
bb96a8d5 1295 f2fs_submit_merged_bio(sbi, DATA, WRITE);
5463e7c1
JK
1296 if (locked)
1297 mutex_unlock(&sbi->writepages);
458e6197 1298
eb47b800
JK
1299 remove_dirty_dir_inode(inode);
1300
50c8cdb3 1301 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
eb47b800 1302 return ret;
d3baf95d
JK
1303
1304skip_write:
a7ffdbe2 1305 wbc->pages_skipped += get_dirty_pages(inode);
d3baf95d 1306 return 0;
eb47b800
JK
1307}
1308
3aab8f82
CY
1309static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1310{
1311 struct inode *inode = mapping->host;
1312
1313 if (to > inode->i_size) {
1314 truncate_pagecache(inode, inode->i_size);
764aa3e9 1315 truncate_blocks(inode, inode->i_size, true);
3aab8f82
CY
1316 }
1317}
1318
eb47b800
JK
1319static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1320 loff_t pos, unsigned len, unsigned flags,
1321 struct page **pagep, void **fsdata)
1322{
1323 struct inode *inode = mapping->host;
4081363f 1324 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9ba69cf9 1325 struct page *page, *ipage;
eb47b800
JK
1326 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
1327 struct dnode_of_data dn;
1328 int err = 0;
1329
62aed044
CY
1330 trace_f2fs_write_begin(inode, pos, len, flags);
1331
eb47b800 1332 f2fs_balance_fs(sbi);
5f727395
JK
1333
1334 /*
1335 * We should check this at this moment to avoid deadlock on inode page
1336 * and #0 page. The locking rule for inline_data conversion should be:
1337 * lock_page(page #0) -> lock_page(inode_page)
1338 */
1339 if (index != 0) {
1340 err = f2fs_convert_inline_inode(inode);
1341 if (err)
1342 goto fail;
1343 }
afcb7ca0 1344repeat:
eb47b800 1345 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
1346 if (!page) {
1347 err = -ENOMEM;
1348 goto fail;
1349 }
d5f66990 1350
eb47b800
JK
1351 *pagep = page;
1352
e479556b 1353 f2fs_lock_op(sbi);
9ba69cf9
JK
1354
1355 /* check inline_data */
1356 ipage = get_node_page(sbi, inode->i_ino);
cd34e296
CY
1357 if (IS_ERR(ipage)) {
1358 err = PTR_ERR(ipage);
9ba69cf9 1359 goto unlock_fail;
cd34e296 1360 }
9ba69cf9 1361
b3d208f9
JK
1362 set_new_dnode(&dn, inode, ipage, ipage, 0);
1363
9ba69cf9 1364 if (f2fs_has_inline_data(inode)) {
b3d208f9
JK
1365 if (pos + len <= MAX_INLINE_DATA) {
1366 read_inline_data(page, ipage);
1367 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
1368 sync_inode_page(&dn);
1369 goto put_next;
b3d208f9 1370 }
5f727395
JK
1371 err = f2fs_convert_inline_page(&dn, page);
1372 if (err)
1373 goto put_fail;
b600965c 1374 }
9ba69cf9
JK
1375 err = f2fs_reserve_block(&dn, index);
1376 if (err)
8cdcb713 1377 goto put_fail;
b3d208f9 1378put_next:
9ba69cf9
JK
1379 f2fs_put_dnode(&dn);
1380 f2fs_unlock_op(sbi);
1381
90d4388a
CY
1382 if (len == PAGE_CACHE_SIZE)
1383 goto out_update;
1384 if (PageUptodate(page))
1385 goto out_clear;
eb47b800 1386
b3d208f9
JK
1387 f2fs_wait_on_page_writeback(page, DATA);
1388
eb47b800
JK
1389 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
1390 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1391 unsigned end = start + len;
1392
1393 /* Reading beyond i_size is simple: memset to zero */
1394 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
90d4388a 1395 goto out_update;
eb47b800
JK
1396 }
1397
b3d208f9 1398 if (dn.data_blkaddr == NEW_ADDR) {
eb47b800
JK
1399 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
1400 } else {
cf04e8eb 1401 struct f2fs_io_info fio = {
05ca3632 1402 .sbi = sbi,
cf04e8eb
JK
1403 .type = DATA,
1404 .rw = READ_SYNC,
1405 .blk_addr = dn.data_blkaddr,
05ca3632 1406 .page = page,
4375a336 1407 .encrypted_page = NULL,
cf04e8eb 1408 };
05ca3632 1409 err = f2fs_submit_page_bio(&fio);
9234f319
JK
1410 if (err)
1411 goto fail;
d54c795b 1412
393ff91f 1413 lock_page(page);
6bacf52f 1414 if (unlikely(!PageUptodate(page))) {
393ff91f 1415 f2fs_put_page(page, 1);
3aab8f82
CY
1416 err = -EIO;
1417 goto fail;
eb47b800 1418 }
6bacf52f 1419 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1420 f2fs_put_page(page, 1);
1421 goto repeat;
eb47b800 1422 }
4375a336
JK
1423
1424 /* avoid symlink page */
1425 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
1426 err = f2fs_decrypt_one(inode, page);
1427 if (err) {
1428 f2fs_put_page(page, 1);
1429 goto fail;
1430 }
1431 }
eb47b800 1432 }
90d4388a 1433out_update:
eb47b800 1434 SetPageUptodate(page);
90d4388a 1435out_clear:
eb47b800
JK
1436 clear_cold_data(page);
1437 return 0;
9ba69cf9 1438
8cdcb713
JK
1439put_fail:
1440 f2fs_put_dnode(&dn);
9ba69cf9
JK
1441unlock_fail:
1442 f2fs_unlock_op(sbi);
b3d208f9 1443 f2fs_put_page(page, 1);
3aab8f82
CY
1444fail:
1445 f2fs_write_failed(mapping, pos + len);
1446 return err;
eb47b800
JK
1447}
1448
a1dd3c13
JK
1449static int f2fs_write_end(struct file *file,
1450 struct address_space *mapping,
1451 loff_t pos, unsigned len, unsigned copied,
1452 struct page *page, void *fsdata)
1453{
1454 struct inode *inode = page->mapping->host;
1455
dfb2bf38
CY
1456 trace_f2fs_write_end(inode, pos, len, copied);
1457
34ba94ba 1458 set_page_dirty(page);
a1dd3c13
JK
1459
1460 if (pos + copied > i_size_read(inode)) {
1461 i_size_write(inode, pos + copied);
1462 mark_inode_dirty(inode);
1463 update_inode_page(inode);
1464 }
1465
75c3c8bc 1466 f2fs_put_page(page, 1);
a1dd3c13
JK
1467 return copied;
1468}
1469
6f673763
OS
1470static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1471 loff_t offset)
944fcfc1
JK
1472{
1473 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1 1474
6f673763 1475 if (iov_iter_rw(iter) == READ)
944fcfc1
JK
1476 return 0;
1477
1478 if (offset & blocksize_mask)
1479 return -EINVAL;
1480
5b46f25d
AV
1481 if (iov_iter_alignment(iter) & blocksize_mask)
1482 return -EINVAL;
1483
944fcfc1
JK
1484 return 0;
1485}
1486
22c6186e
OS
1487static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
1488 loff_t offset)
eb47b800
JK
1489{
1490 struct file *file = iocb->ki_filp;
3aab8f82
CY
1491 struct address_space *mapping = file->f_mapping;
1492 struct inode *inode = mapping->host;
1493 size_t count = iov_iter_count(iter);
1494 int err;
944fcfc1 1495
b3d208f9
JK
1496 /* we don't need to use inline_data strictly */
1497 if (f2fs_has_inline_data(inode)) {
1498 err = f2fs_convert_inline_inode(inode);
1499 if (err)
1500 return err;
1501 }
9ffe0fb5 1502
fcc85a4d
JK
1503 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1504 return 0;
1505
6f673763 1506 if (check_direct_IO(inode, iter, offset))
944fcfc1
JK
1507 return 0;
1508
6f673763 1509 trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
70407fad 1510
6f673763 1511 if (iov_iter_rw(iter) == WRITE)
59b802e5
JK
1512 __allocate_data_blocks(inode, offset, count);
1513
17f8c842 1514 err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
6f673763 1515 if (err < 0 && iov_iter_rw(iter) == WRITE)
3aab8f82 1516 f2fs_write_failed(mapping, offset + count);
70407fad 1517
6f673763 1518 trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
70407fad 1519
3aab8f82 1520 return err;
eb47b800
JK
1521}
1522
487261f3
CY
1523void f2fs_invalidate_page(struct page *page, unsigned int offset,
1524 unsigned int length)
eb47b800
JK
1525{
1526 struct inode *inode = page->mapping->host;
487261f3 1527 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 1528
487261f3
CY
1529 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
1530 (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
a7ffdbe2
JK
1531 return;
1532
487261f3
CY
1533 if (PageDirty(page)) {
1534 if (inode->i_ino == F2FS_META_INO(sbi))
1535 dec_page_count(sbi, F2FS_DIRTY_META);
1536 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1537 dec_page_count(sbi, F2FS_DIRTY_NODES);
1538 else
1539 inode_dec_dirty_pages(inode);
1540 }
eb47b800
JK
1541 ClearPagePrivate(page);
1542}
1543
487261f3 1544int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 1545{
f68daeeb
JK
1546 /* If this is dirty page, keep PagePrivate */
1547 if (PageDirty(page))
1548 return 0;
1549
eb47b800 1550 ClearPagePrivate(page);
c3850aa1 1551 return 1;
eb47b800
JK
1552}
1553
1554static int f2fs_set_data_page_dirty(struct page *page)
1555{
1556 struct address_space *mapping = page->mapping;
1557 struct inode *inode = mapping->host;
1558
26c6b887
JK
1559 trace_f2fs_set_page_dirty(page, DATA);
1560
eb47b800 1561 SetPageUptodate(page);
34ba94ba 1562
1e84371f 1563 if (f2fs_is_atomic_file(inode)) {
34ba94ba
JK
1564 register_inmem_page(inode, page);
1565 return 1;
1566 }
1567
eb47b800
JK
1568 if (!PageDirty(page)) {
1569 __set_page_dirty_nobuffers(page);
a7ffdbe2 1570 update_dirty_page(inode, page);
eb47b800
JK
1571 return 1;
1572 }
1573 return 0;
1574}
1575
c01e54b7
JK
1576static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1577{
454ae7e5
CY
1578 struct inode *inode = mapping->host;
1579
b3d208f9
JK
1580 /* we don't need to use inline_data strictly */
1581 if (f2fs_has_inline_data(inode)) {
1582 int err = f2fs_convert_inline_inode(inode);
1583 if (err)
1584 return err;
1585 }
bfad7c2d 1586 return generic_block_bmap(mapping, block, get_data_block);
c01e54b7
JK
1587}
1588
eb47b800
JK
1589const struct address_space_operations f2fs_dblock_aops = {
1590 .readpage = f2fs_read_data_page,
1591 .readpages = f2fs_read_data_pages,
1592 .writepage = f2fs_write_data_page,
1593 .writepages = f2fs_write_data_pages,
1594 .write_begin = f2fs_write_begin,
a1dd3c13 1595 .write_end = f2fs_write_end,
eb47b800 1596 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
1597 .invalidatepage = f2fs_invalidate_page,
1598 .releasepage = f2fs_release_page,
eb47b800 1599 .direct_IO = f2fs_direct_IO,
c01e54b7 1600 .bmap = f2fs_bmap,
eb47b800 1601};