f2fs: add a f2fs_ prefix to punch_hole() and expand_inode_data()
[linux-block.git] / fs / f2fs / file.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
fbfa2cc5
JK
3 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
fbfa2cc5
JK
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/stat.h>
11#include <linux/buffer_head.h>
12#include <linux/writeback.h>
ae51fb31 13#include <linux/blkdev.h>
fbfa2cc5
JK
14#include <linux/falloc.h>
15#include <linux/types.h>
e9750824 16#include <linux/compat.h>
fbfa2cc5
JK
17#include <linux/uaccess.h>
18#include <linux/mount.h>
7f7670fe 19#include <linux/pagevec.h>
dc91de78 20#include <linux/uio.h>
8da4b8c4 21#include <linux/uuid.h>
4dd6f977 22#include <linux/file.h>
4507847c 23#include <linux/nls.h>
9af84648 24#include <linux/sched/signal.h>
9b1bb01c 25#include <linux/fileattr.h>
0f6b56ec 26#include <linux/fadvise.h>
a1e09b03 27#include <linux/iomap.h>
fbfa2cc5
JK
28
29#include "f2fs.h"
30#include "node.h"
31#include "segment.h"
32#include "xattr.h"
33#include "acl.h"
c1c1b583 34#include "gc.h"
52118743 35#include "iostat.h"
a2a4a7e4 36#include <trace/events/f2fs.h>
fa4320ce 37#include <uapi/linux/f2fs.h>
fbfa2cc5 38
ea4d479b 39static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
5a3a2d83
QS
40{
41 struct inode *inode = file_inode(vmf->vma->vm_file);
ea4d479b 42 vm_fault_t ret;
5a3a2d83 43
ea4d479b 44 ret = filemap_fault(vmf);
8b83ac81 45 if (!ret)
34a23525
CY
46 f2fs_update_iostat(F2FS_I_SB(inode), inode,
47 APP_MAPPED_READ_IO, F2FS_BLKSIZE);
8b83ac81 48
d7648343
CY
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50
ea4d479b 51 return ret;
5a3a2d83
QS
52}
53
ea4d479b 54static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
fbfa2cc5
JK
55{
56 struct page *page = vmf->page;
11bac800 57 struct inode *inode = file_inode(vmf->vma->vm_file);
4081363f 58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
bdf03299 59 struct dnode_of_data dn;
4c8ff709
CY
60 bool need_alloc = true;
61 int err = 0;
fbfa2cc5 62
e0fcd015
CY
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
65
c6140415
JK
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
68
1f227a3e
JK
69 if (unlikely(f2fs_cp_error(sbi))) {
70 err = -EIO;
71 goto err;
72 }
73
00e09c0b
CY
74 if (!f2fs_is_checkpoint_ready(sbi)) {
75 err = -ENOSPC;
955ebcd3 76 goto err;
00e09c0b 77 }
1f227a3e 78
c8e43d55
CY
79 err = f2fs_convert_inline_inode(inode);
80 if (err)
81 goto err;
82
4c8ff709
CY
83#ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
86
87 if (ret < 0) {
88 err = ret;
89 goto err;
90 } else if (ret) {
4c8ff709
CY
91 need_alloc = false;
92 }
93 }
94#endif
bdf03299 95 /* should do out of any locked page */
4c8ff709
CY
96 if (need_alloc)
97 f2fs_balance_fs(sbi, true);
bdf03299 98
fbfa2cc5 99 sb_start_pagefault(inode->i_sb);
b3d208f9
JK
100
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
b067ba1f 102
11bac800 103 file_update_time(vmf->vma->vm_file);
edc6d01b 104 filemap_invalidate_lock_shared(inode->i_mapping);
fbfa2cc5 105 lock_page(page);
6bacf52f 106 if (unlikely(page->mapping != inode->i_mapping ||
9851e6e1 107 page_offset(page) > i_size_read(inode) ||
6bacf52f 108 !PageUptodate(page))) {
fbfa2cc5
JK
109 unlock_page(page);
110 err = -EFAULT;
5a3a2d83 111 goto out_sem;
fbfa2cc5
JK
112 }
113
4c8ff709
CY
114 if (need_alloc) {
115 /* block allocation */
4c8ff709 116 set_new_dnode(&dn, inode, NULL, NULL, 0);
cf342d3b 117 err = f2fs_get_block_locked(&dn, page->index);
39a86958
CY
118 }
119
06c7540f
CY
120#ifdef CONFIG_F2FS_FS_COMPRESSION
121 if (!need_alloc) {
122 set_new_dnode(&dn, inode, NULL, NULL, 0);
123 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
124 f2fs_put_dnode(&dn);
125 }
126#endif
127 if (err) {
128 unlock_page(page);
129 goto out_sem;
39a86958
CY
130 }
131
bae0ee7a 132 f2fs_wait_on_page_writeback(page, DATA, false, true);
39a86958
CY
133
134 /* wait for GCed page writeback via META_MAPPING */
135 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
136
fbfa2cc5
JK
137 /*
138 * check to see if the page is mapped already (no holes)
139 */
140 if (PageMappedToDisk(page))
39a86958 141 goto out_sem;
fbfa2cc5
JK
142
143 /* page is wholly or partially inside EOF */
09cbfeaf 144 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
9edcdabf 145 i_size_read(inode)) {
193bea1d 146 loff_t offset;
f11e98bd 147
09cbfeaf
KS
148 offset = i_size_read(inode) & ~PAGE_MASK;
149 zero_user_segment(page, offset, PAGE_SIZE);
fbfa2cc5
JK
150 }
151 set_page_dirty(page);
237c0790
JK
152 if (!PageUptodate(page))
153 SetPageUptodate(page);
fbfa2cc5 154
34a23525 155 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
c75f2feb 156 f2fs_update_time(sbi, REQ_TIME);
b0af6d49 157
e943a10d 158 trace_f2fs_vm_page_mkwrite(page, DATA);
5a3a2d83 159out_sem:
edc6d01b 160 filemap_invalidate_unlock_shared(inode->i_mapping);
39a86958 161
fbfa2cc5 162 sb_end_pagefault(inode->i_sb);
1f227a3e 163err:
fbfa2cc5
JK
164 return block_page_mkwrite_return(err);
165}
166
167static const struct vm_operations_struct f2fs_file_vm_ops = {
5a3a2d83 168 .fault = f2fs_filemap_fault,
f1820361 169 .map_pages = filemap_map_pages,
692bb55d 170 .page_mkwrite = f2fs_vm_page_mkwrite,
fbfa2cc5
JK
171};
172
354a3399
JK
173static int get_parent_ino(struct inode *inode, nid_t *pino)
174{
175 struct dentry *dentry;
176
84c9c2de
EB
177 /*
178 * Make sure to get the non-deleted alias. The alias associated with
179 * the open file descriptor being fsync()'ed may be deleted already.
180 */
181 dentry = d_find_alias(inode);
354a3399
JK
182 if (!dentry)
183 return 0;
184
f0947e5c
JK
185 *pino = parent_ino(dentry);
186 dput(dentry);
354a3399
JK
187 return 1;
188}
189
a5fd5050 190static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
9d1589ef 191{
4081363f 192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a5fd5050 193 enum cp_reason_type cp_reason = CP_NO_NEEDED;
9d1589ef 194
a5fd5050
CY
195 if (!S_ISREG(inode->i_mode))
196 cp_reason = CP_NON_REGULAR;
4c8ff709
CY
197 else if (f2fs_compressed_file(inode))
198 cp_reason = CP_COMPRESSED;
a5fd5050
CY
199 else if (inode->i_nlink != 1)
200 cp_reason = CP_HARDLINK;
bbf156f7 201 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
a5fd5050 202 cp_reason = CP_SB_NEED_CP;
9d1589ef 203 else if (file_wrong_pino(inode))
a5fd5050 204 cp_reason = CP_WRONG_PINO;
4d57b86d 205 else if (!f2fs_space_for_roll_forward(sbi))
a5fd5050 206 cp_reason = CP_NO_SPC_ROLL;
4d57b86d 207 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
a5fd5050 208 cp_reason = CP_NODE_NEED_CP;
d5053a34 209 else if (test_opt(sbi, FASTBOOT))
a5fd5050 210 cp_reason = CP_FASTBOOT_MODE;
63189b78 211 else if (F2FS_OPTION(sbi).active_logs == 2)
a5fd5050 212 cp_reason = CP_SPEC_LOG_NUM;
63189b78 213 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
4d57b86d
CY
214 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
216 TRANS_DIR_INO))
0a007b97 217 cp_reason = CP_RECOVER_DIR;
9d1589ef 218
a5fd5050 219 return cp_reason;
9d1589ef
CY
220}
221
9c7bb702
CL
222static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
223{
224 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
225 bool ret = false;
226 /* But we need to avoid that there are some inode updates */
4d57b86d 227 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
9c7bb702
CL
228 ret = true;
229 f2fs_put_page(i, 0);
230 return ret;
231}
232
51455b19
CL
233static void try_to_fix_pino(struct inode *inode)
234{
235 struct f2fs_inode_info *fi = F2FS_I(inode);
236 nid_t pino;
237
e4544b63 238 f2fs_down_write(&fi->i_sem);
51455b19
CL
239 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 get_parent_ino(inode, &pino)) {
205b9822 241 f2fs_i_pino_write(inode, pino);
51455b19 242 file_got_pino(inode);
51455b19 243 }
e4544b63 244 f2fs_up_write(&fi->i_sem);
51455b19
CL
245}
246
608514de
JK
247static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
248 int datasync, bool atomic)
fbfa2cc5
JK
249{
250 struct inode *inode = file->f_mapping->host;
4081363f 251 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2403c155 252 nid_t ino = inode->i_ino;
fbfa2cc5 253 int ret = 0;
a5fd5050 254 enum cp_reason_type cp_reason = 0;
fbfa2cc5 255 struct writeback_control wbc = {
c81bf1c8 256 .sync_mode = WB_SYNC_ALL,
fbfa2cc5
JK
257 .nr_to_write = LONG_MAX,
258 .for_reclaim = 0,
259 };
50fa53ec 260 unsigned int seq_id = 0;
fbfa2cc5 261
dddd3d65 262 if (unlikely(f2fs_readonly(inode->i_sb)))
1fa95b0b
NJ
263 return 0;
264
a2a4a7e4 265 trace_f2fs_sync_file_enter(inode);
ea1aa12c 266
b61ac5b7
YH
267 if (S_ISDIR(inode->i_mode))
268 goto go_write;
269
ea1aa12c 270 /* if fdatasync is triggered, let's do in-place-update */
c46a155b 271 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
91942321 272 set_inode_flag(inode, FI_NEED_IPU);
3b49c9a1 273 ret = file_write_and_wait_range(file, start, end);
91942321 274 clear_inode_flag(inode, FI_NEED_IPU);
c1ce1b02 275
dddd3d65 276 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
a5fd5050 277 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
fbfa2cc5 278 return ret;
a2a4a7e4 279 }
fbfa2cc5 280
9c7bb702 281 /* if the inode is dirty, let's recover all the time */
281518c6 282 if (!f2fs_skip_inode_update(inode, datasync)) {
2286c020 283 f2fs_write_inode(inode, NULL);
9c7bb702
CL
284 goto go_write;
285 }
286
6d99ba41
JK
287 /*
288 * if there is no written data, don't waste time to write recovery info.
289 */
91942321 290 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
4d57b86d 291 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
19c9c466 292
9c7bb702
CL
293 /* it may call write_inode just prior to fsync */
294 if (need_inode_page_update(sbi, ino))
19c9c466 295 goto go_write;
19c9c466 296
91942321 297 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
4d57b86d 298 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
6d99ba41
JK
299 goto flush_out;
300 goto out;
27879915
CY
301 } else {
302 /*
303 * for OPU case, during fsync(), node can be persisted before
304 * data when lower device doesn't support write barrier, result
305 * in data corruption after SPO.
306 * So for strict fsync mode, force to use atomic write sematics
307 * to keep write order in between data/node and last node to
308 * avoid potential data corruption.
309 */
310 if (F2FS_OPTION(sbi).fsync_mode ==
311 FSYNC_MODE_STRICT && !atomic)
312 atomic = true;
6d99ba41 313 }
19c9c466 314go_write:
e5d2385e
JK
315 /*
316 * Both of fdatasync() and fsync() are able to be recovered from
317 * sudden-power-off.
318 */
e4544b63 319 f2fs_down_read(&F2FS_I(inode)->i_sem);
a5fd5050 320 cp_reason = need_do_checkpoint(inode);
e4544b63 321 f2fs_up_read(&F2FS_I(inode)->i_sem);
d928bfbf 322
a5fd5050 323 if (cp_reason) {
fbfa2cc5
JK
324 /* all the dirty node pages should be flushed for POR */
325 ret = f2fs_sync_fs(inode->i_sb, 1);
d928bfbf 326
51455b19
CL
327 /*
328 * We've secured consistency through sync_fs. Following pino
329 * will be used only for fsynced inodes after checkpoint.
330 */
331 try_to_fix_pino(inode);
91942321
JK
332 clear_inode_flag(inode, FI_APPEND_WRITE);
333 clear_inode_flag(inode, FI_UPDATE_WRITE);
51455b19
CL
334 goto out;
335 }
88bd02c9 336sync_nodes:
c29fd0c0 337 atomic_inc(&sbi->wb_sync_req[NODE]);
50fa53ec 338 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
c29fd0c0 339 atomic_dec(&sbi->wb_sync_req[NODE]);
c267ec15
JK
340 if (ret)
341 goto out;
51455b19 342
871f599f 343 /* if cp_error was enabled, we should avoid infinite loop */
6d5a1495
CY
344 if (unlikely(f2fs_cp_error(sbi))) {
345 ret = -EIO;
871f599f 346 goto out;
6d5a1495 347 }
871f599f 348
4d57b86d 349 if (f2fs_need_inode_block_update(sbi, ino)) {
7c45729a 350 f2fs_mark_inode_dirty_sync(inode, true);
51455b19
CL
351 f2fs_write_inode(inode, NULL);
352 goto sync_nodes;
fbfa2cc5 353 }
51455b19 354
b6a245eb
JK
355 /*
356 * If it's atomic_write, it's just fine to keep write ordering. So
357 * here we don't need to wait for node write completion, since we use
358 * node chain which serializes node blocks. If one of node writes are
359 * reordered, we can see simply broken chain, resulting in stopping
360 * roll-forward recovery. It means we'll recover all or none node blocks
361 * given fsync mark.
362 */
363 if (!atomic) {
50fa53ec 364 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
b6a245eb
JK
365 if (ret)
366 goto out;
367 }
51455b19
CL
368
369 /* once recovery info is written, don't need to tack this */
4d57b86d 370 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
91942321 371 clear_inode_flag(inode, FI_APPEND_WRITE);
51455b19 372flush_out:
c550e25b
JK
373 if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
374 (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
39d787be 375 ret = f2fs_issue_flush(sbi, inode->i_ino);
3f06252f 376 if (!ret) {
4d57b86d 377 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
3f06252f 378 clear_inode_flag(inode, FI_UPDATE_WRITE);
4d57b86d 379 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
3f06252f 380 }
d0239e1b 381 f2fs_update_time(sbi, REQ_TIME);
fbfa2cc5 382out:
a5fd5050 383 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
fbfa2cc5
JK
384 return ret;
385}
386
608514de
JK
387int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
388{
1f227a3e
JK
389 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
390 return -EIO;
608514de
JK
391 return f2fs_do_sync_file(file, start, end, datasync, false);
392}
393
4cb03fec
MWO
394static bool __found_offset(struct address_space *mapping, block_t blkaddr,
395 pgoff_t index, int whence)
7f7670fe
JK
396{
397 switch (whence) {
398 case SEEK_DATA:
4cb03fec
MWO
399 if (__is_valid_data_blkaddr(blkaddr))
400 return true;
401 if (blkaddr == NEW_ADDR &&
402 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
7f7670fe
JK
403 return true;
404 break;
405 case SEEK_HOLE:
406 if (blkaddr == NULL_ADDR)
407 return true;
408 break;
409 }
410 return false;
411}
412
267378d4
CY
413static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
414{
415 struct inode *inode = file->f_mapping->host;
416 loff_t maxbytes = inode->i_sb->s_maxbytes;
417 struct dnode_of_data dn;
4cb03fec 418 pgoff_t pgofs, end_offset;
7f7670fe
JK
419 loff_t data_ofs = offset;
420 loff_t isize;
267378d4
CY
421 int err = 0;
422
5955102c 423 inode_lock(inode);
267378d4
CY
424
425 isize = i_size_read(inode);
426 if (offset >= isize)
427 goto fail;
428
429 /* handle inline data case */
7a6e59d7
CY
430 if (f2fs_has_inline_data(inode)) {
431 if (whence == SEEK_HOLE) {
432 data_ofs = isize;
433 goto found;
434 } else if (whence == SEEK_DATA) {
435 data_ofs = offset;
436 goto found;
437 }
267378d4
CY
438 }
439
09cbfeaf 440 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
267378d4 441
09cbfeaf 442 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
267378d4 443 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 444 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
267378d4
CY
445 if (err && err != -ENOENT) {
446 goto fail;
447 } else if (err == -ENOENT) {
e1c42045 448 /* direct node does not exists */
267378d4 449 if (whence == SEEK_DATA) {
4d57b86d 450 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
267378d4
CY
451 continue;
452 } else {
453 goto found;
454 }
455 }
456
81ca7350 457 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
267378d4
CY
458
459 /* find data/hole in dnode block */
460 for (; dn.ofs_in_node < end_offset;
461 dn.ofs_in_node++, pgofs++,
09cbfeaf 462 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
267378d4 463 block_t blkaddr;
f11e98bd 464
a2ced1ce 465 blkaddr = f2fs_data_blkaddr(&dn);
267378d4 466
c9b60788
CY
467 if (__is_valid_data_blkaddr(blkaddr) &&
468 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
93770ab7 469 blkaddr, DATA_GENERIC_ENHANCE)) {
c9b60788
CY
470 f2fs_put_dnode(&dn);
471 goto fail;
472 }
473
4cb03fec 474 if (__found_offset(file->f_mapping, blkaddr,
e1da7872 475 pgofs, whence)) {
267378d4
CY
476 f2fs_put_dnode(&dn);
477 goto found;
478 }
479 }
480 f2fs_put_dnode(&dn);
481 }
482
483 if (whence == SEEK_DATA)
484 goto fail;
267378d4 485found:
fe369bc8
JK
486 if (whence == SEEK_HOLE && data_ofs > isize)
487 data_ofs = isize;
5955102c 488 inode_unlock(inode);
267378d4
CY
489 return vfs_setpos(file, data_ofs, maxbytes);
490fail:
5955102c 491 inode_unlock(inode);
267378d4
CY
492 return -ENXIO;
493}
494
495static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
496{
497 struct inode *inode = file->f_mapping->host;
498 loff_t maxbytes = inode->i_sb->s_maxbytes;
499
6d1451bf
CX
500 if (f2fs_compressed_file(inode))
501 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
502
267378d4
CY
503 switch (whence) {
504 case SEEK_SET:
505 case SEEK_CUR:
506 case SEEK_END:
507 return generic_file_llseek_size(file, offset, whence,
508 maxbytes, i_size_read(inode));
509 case SEEK_DATA:
510 case SEEK_HOLE:
0b4c5afd
JK
511 if (offset < 0)
512 return -ENXIO;
267378d4
CY
513 return f2fs_seek_block(file, offset, whence);
514 }
515
516 return -EINVAL;
517}
518
fbfa2cc5
JK
519static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
520{
b3d208f9
JK
521 struct inode *inode = file_inode(file);
522
1f227a3e
JK
523 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
524 return -EIO;
525
4c8ff709
CY
526 if (!f2fs_is_compress_backend_ready(inode))
527 return -EOPNOTSUPP;
528
fbfa2cc5
JK
529 file_accessed(file);
530 vma->vm_ops = &f2fs_file_vm_ops;
4c8ff709 531 set_inode_flag(inode, FI_MMAP_FILE);
fbfa2cc5
JK
532 return 0;
533}
534
fcc85a4d
JK
535static int f2fs_file_open(struct inode *inode, struct file *filp)
536{
2e168c82 537 int err = fscrypt_file_open(inode, filp);
fcc85a4d 538
95ae251f
EB
539 if (err)
540 return err;
541
4c8ff709
CY
542 if (!f2fs_is_compress_backend_ready(inode))
543 return -EOPNOTSUPP;
544
95ae251f 545 err = fsverity_file_open(inode, filp);
2e168c82
EB
546 if (err)
547 return err;
b91050a8
HL
548
549 filp->f_mode |= FMODE_NOWAIT;
550
0abd675e 551 return dquot_file_open(inode, filp);
fcc85a4d
JK
552}
553
4d57b86d 554void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
fbfa2cc5 555{
4081363f 556 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
fbfa2cc5 557 struct f2fs_node *raw_node;
19b2c30d 558 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
fbfa2cc5 559 __le32 *addr;
7a2af766 560 int base = 0;
4c8ff709
CY
561 bool compressed_cluster = false;
562 int cluster_index = 0, valid_blocks = 0;
563 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
c2759eba 564 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
7a2af766
CY
565
566 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
567 base = get_extra_isize(dn->inode);
fbfa2cc5 568
45590710 569 raw_node = F2FS_NODE(dn->node_page);
7a2af766 570 addr = blkaddr_in_node(raw_node) + base + ofs;
fbfa2cc5 571
a5029a57 572 /* Assumption: truncation starts with cluster */
4c8ff709 573 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
fbfa2cc5 574 block_t blkaddr = le32_to_cpu(*addr);
f11e98bd 575
4c8ff709
CY
576 if (f2fs_compressed_file(dn->inode) &&
577 !(cluster_index & (cluster_size - 1))) {
578 if (compressed_cluster)
579 f2fs_i_compr_blocks_update(dn->inode,
580 valid_blocks, false);
581 compressed_cluster = (blkaddr == COMPRESS_ADDR);
582 valid_blocks = 0;
583 }
584
fbfa2cc5
JK
585 if (blkaddr == NULL_ADDR)
586 continue;
587
e1509cf2 588 dn->data_blkaddr = NULL_ADDR;
4d57b86d 589 f2fs_set_data_blkaddr(dn);
c9b60788 590
4c8ff709
CY
591 if (__is_valid_data_blkaddr(blkaddr)) {
592 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
93770ab7 593 DATA_GENERIC_ENHANCE))
4c8ff709
CY
594 continue;
595 if (compressed_cluster)
596 valid_blocks++;
597 }
c9b60788 598
3c6c2beb 599 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
91942321 600 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
4c8ff709
CY
601
602 f2fs_invalidate_blocks(sbi, blkaddr);
ef8d563f
CY
603
604 if (!released || blkaddr != COMPRESS_ADDR)
605 nr_free++;
fbfa2cc5 606 }
19b2c30d 607
4c8ff709
CY
608 if (compressed_cluster)
609 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
610
fbfa2cc5 611 if (nr_free) {
19b2c30d
CY
612 pgoff_t fofs;
613 /*
614 * once we invalidate valid blkaddr in range [ofs, ofs + count],
615 * we will invalidate all blkaddr in the whole range.
616 */
4d57b86d 617 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
81ca7350 618 dn->inode) + ofs;
e7547dac 619 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
71644dff 620 f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
d7cc950b 621 dec_valid_block_count(sbi, dn->inode, nr_free);
fbfa2cc5
JK
622 }
623 dn->ofs_in_node = ofs;
51dd6249 624
d0239e1b 625 f2fs_update_time(sbi, REQ_TIME);
51dd6249
NJ
626 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
627 dn->ofs_in_node, nr_free);
fbfa2cc5
JK
628}
629
4d57b86d 630void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
fbfa2cc5 631{
d02a6e61 632 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
fbfa2cc5
JK
633}
634
0bfcfcca 635static int truncate_partial_data_page(struct inode *inode, u64 from,
43f3eae1 636 bool cache_only)
fbfa2cc5 637{
193bea1d 638 loff_t offset = from & (PAGE_SIZE - 1);
09cbfeaf 639 pgoff_t index = from >> PAGE_SHIFT;
43f3eae1 640 struct address_space *mapping = inode->i_mapping;
fbfa2cc5
JK
641 struct page *page;
642
43f3eae1 643 if (!offset && !cache_only)
b3d208f9 644 return 0;
fbfa2cc5 645
43f3eae1 646 if (cache_only) {
34b5d5c2 647 page = find_lock_page(mapping, index);
43f3eae1
JK
648 if (page && PageUptodate(page))
649 goto truncate_out;
650 f2fs_put_page(page, 1);
b3d208f9 651 return 0;
43f3eae1 652 }
fbfa2cc5 653
4d57b86d 654 page = f2fs_get_lock_data_page(inode, index, true);
43f3eae1 655 if (IS_ERR(page))
a78aaa2c 656 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
43f3eae1 657truncate_out:
bae0ee7a 658 f2fs_wait_on_page_writeback(page, DATA, true, true);
09cbfeaf 659 zero_user(page, offset, PAGE_SIZE - offset);
a9bcf9bc
JK
660
661 /* An encrypted inode should have a key and truncate the last page. */
62230e0d 662 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
a9bcf9bc 663 if (!cache_only)
0bfcfcca 664 set_page_dirty(page);
fbfa2cc5 665 f2fs_put_page(page, 1);
b3d208f9 666 return 0;
fbfa2cc5
JK
667}
668
3265d3db 669int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
fbfa2cc5 670{
4081363f 671 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
fbfa2cc5
JK
672 struct dnode_of_data dn;
673 pgoff_t free_from;
9ffe0fb5 674 int count = 0, err = 0;
b3d208f9 675 struct page *ipage;
0bfcfcca 676 bool truncate_page = false;
fbfa2cc5 677
51dd6249
NJ
678 trace_f2fs_truncate_blocks_enter(inode, from);
679
df033caf 680 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
fbfa2cc5 681
6d1451bf 682 if (free_from >= max_file_blocks(inode))
09210c97
CY
683 goto free_partial;
684
764aa3e9 685 if (lock)
c42d28ce 686 f2fs_lock_op(sbi);
9ffe0fb5 687
4d57b86d 688 ipage = f2fs_get_node_page(sbi, inode->i_ino);
b3d208f9
JK
689 if (IS_ERR(ipage)) {
690 err = PTR_ERR(ipage);
691 goto out;
692 }
693
694 if (f2fs_has_inline_data(inode)) {
4d57b86d 695 f2fs_truncate_inline_inode(inode, ipage, from);
b3d208f9 696 f2fs_put_page(ipage, 1);
0bfcfcca 697 truncate_page = true;
b3d208f9
JK
698 goto out;
699 }
700
701 set_new_dnode(&dn, inode, ipage, NULL, 0);
4d57b86d 702 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
fbfa2cc5
JK
703 if (err) {
704 if (err == -ENOENT)
705 goto free_next;
b3d208f9 706 goto out;
1ce86bf6
JK
707 }
708
81ca7350 709 count = ADDRS_PER_PAGE(dn.node_page, inode);
fbfa2cc5
JK
710
711 count -= dn.ofs_in_node;
9850cf4a 712 f2fs_bug_on(sbi, count < 0);
39936837 713
fbfa2cc5 714 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
4d57b86d 715 f2fs_truncate_data_blocks_range(&dn, count);
fbfa2cc5
JK
716 free_from += count;
717 }
718
719 f2fs_put_dnode(&dn);
720free_next:
4d57b86d 721 err = f2fs_truncate_inode_blocks(inode, free_from);
764d2c80
JK
722out:
723 if (lock)
c42d28ce 724 f2fs_unlock_op(sbi);
09210c97 725free_partial:
b3d208f9
JK
726 /* lastly zero out the first data page */
727 if (!err)
0bfcfcca 728 err = truncate_partial_data_page(inode, from, truncate_page);
fbfa2cc5 729
51dd6249 730 trace_f2fs_truncate_blocks_exit(inode, err);
fbfa2cc5
JK
731 return err;
732}
733
4c8ff709
CY
734int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
735{
736 u64 free_from = from;
3265d3db 737 int err;
4c8ff709 738
3265d3db 739#ifdef CONFIG_F2FS_FS_COMPRESSION
4c8ff709
CY
740 /*
741 * for compressed file, only support cluster size
742 * aligned truncation.
743 */
4fec3fc0
CY
744 if (f2fs_compressed_file(inode))
745 free_from = round_up(from,
746 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
3265d3db
CY
747#endif
748
749 err = f2fs_do_truncate_blocks(inode, free_from, lock);
750 if (err)
751 return err;
752
753#ifdef CONFIG_F2FS_FS_COMPRESSION
4a4fc043
FC
754 /*
755 * For compressed file, after release compress blocks, don't allow write
756 * direct, but we should allow write direct after truncate to zero.
757 */
758 if (f2fs_compressed_file(inode) && !free_from
759 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
760 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
761
17d7648d 762 if (from != free_from) {
3265d3db 763 err = f2fs_truncate_partial_cluster(inode, from, lock);
17d7648d
CY
764 if (err)
765 return err;
766 }
3265d3db 767#endif
4c8ff709 768
17d7648d 769 return 0;
4c8ff709
CY
770}
771
9a449e9c 772int f2fs_truncate(struct inode *inode)
fbfa2cc5 773{
b0154891
CY
774 int err;
775
1f227a3e
JK
776 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
777 return -EIO;
778
fbfa2cc5
JK
779 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
780 S_ISLNK(inode->i_mode)))
b0154891 781 return 0;
fbfa2cc5 782
51dd6249
NJ
783 trace_f2fs_truncate(inode);
784
14b44d23 785 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
c45d6002 786 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
14b44d23
JK
787 return -EIO;
788 }
7fa750a1 789
10a26878 790 err = f2fs_dquot_initialize(inode);
25fb04db
YC
791 if (err)
792 return err;
793
92dffd01 794 /* we should check inline_data size */
b9d777b8 795 if (!f2fs_may_inline_data(inode)) {
b0154891
CY
796 err = f2fs_convert_inline_inode(inode);
797 if (err)
798 return err;
92dffd01
JK
799 }
800
c42d28ce 801 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
b0154891
CY
802 if (err)
803 return err;
804
078cd827 805 inode->i_mtime = inode->i_ctime = current_time(inode);
7c45729a 806 f2fs_mark_inode_dirty_sync(inode, false);
b0154891 807 return 0;
fbfa2cc5
JK
808}
809
bd367329 810static bool f2fs_force_buffered_io(struct inode *inode, int rw)
2db0487f
EB
811{
812 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2db0487f
EB
813
814 if (!fscrypt_dio_supported(inode))
815 return true;
816 if (fsverity_active(inode))
817 return true;
818 if (f2fs_compressed_file(inode))
819 return true;
820
821 /* disallow direct IO if any of devices has unaligned blksize */
822 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
823 return true;
5d170fe4
LT
824 /*
825 * for blkzoned device, fallback direct IO to buffered IO, so
826 * all IOs can be serialized by log-structured write.
827 */
828 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
829 return true;
bd367329
EB
830 if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
831 return true;
832 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
2db0487f
EB
833 return true;
834
835 return false;
836}
837
549c7297
CB
838int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
839 struct kstat *stat, u32 request_mask, unsigned int query_flags)
fbfa2cc5 840{
a528d35e 841 struct inode *inode = d_inode(path->dentry);
1c6d8ee4 842 struct f2fs_inode_info *fi = F2FS_I(inode);
d13732cc 843 struct f2fs_inode *ri = NULL;
1c6d8ee4
CY
844 unsigned int flags;
845
1c1d35df 846 if (f2fs_has_extra_attr(inode) &&
7beb01f7 847 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
1c1d35df
CY
848 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
849 stat->result_mask |= STATX_BTIME;
850 stat->btime.tv_sec = fi->i_crtime.tv_sec;
851 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
852 }
853
c8c02272
EB
854 /*
855 * Return the DIO alignment restrictions if requested. We only return
856 * this information when requested, since on encrypted files it might
857 * take a fair bit of work to get if the file wasn't opened recently.
858 *
859 * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
860 * cannot represent that, so in that case we report no DIO support.
861 */
862 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
863 unsigned int bsize = i_blocksize(inode);
864
865 stat->result_mask |= STATX_DIOALIGN;
866 if (!f2fs_force_buffered_io(inode, WRITE)) {
867 stat->dio_mem_align = bsize;
868 stat->dio_offset_align = bsize;
869 }
870 }
871
36098557 872 flags = fi->i_flags;
fd26725f
CY
873 if (flags & F2FS_COMPR_FL)
874 stat->attributes |= STATX_ATTR_COMPRESSED;
59c84408 875 if (flags & F2FS_APPEND_FL)
1c6d8ee4 876 stat->attributes |= STATX_ATTR_APPEND;
62230e0d 877 if (IS_ENCRYPTED(inode))
1c6d8ee4 878 stat->attributes |= STATX_ATTR_ENCRYPTED;
59c84408 879 if (flags & F2FS_IMMUTABLE_FL)
1c6d8ee4 880 stat->attributes |= STATX_ATTR_IMMUTABLE;
59c84408 881 if (flags & F2FS_NODUMP_FL)
1c6d8ee4 882 stat->attributes |= STATX_ATTR_NODUMP;
924e3194
EB
883 if (IS_VERITY(inode))
884 stat->attributes |= STATX_ATTR_VERITY;
1c6d8ee4 885
fd26725f
CY
886 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
887 STATX_ATTR_APPEND |
1c6d8ee4
CY
888 STATX_ATTR_ENCRYPTED |
889 STATX_ATTR_IMMUTABLE |
924e3194
EB
890 STATX_ATTR_NODUMP |
891 STATX_ATTR_VERITY);
1c6d8ee4 892
984fc4e7 893 generic_fillattr(mnt_userns, inode, stat);
5b4267d1
JK
894
895 /* we need to show initial sectors used for inline_data/dentries */
896 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
897 f2fs_has_inline_dentry(inode))
898 stat->blocks += (stat->size + 511) >> 9;
899
fbfa2cc5
JK
900 return 0;
901}
902
903#ifdef CONFIG_F2FS_FS_POSIX_ACL
e65ce2a5
CB
904static void __setattr_copy(struct user_namespace *mnt_userns,
905 struct inode *inode, const struct iattr *attr)
fbfa2cc5 906{
fbfa2cc5
JK
907 unsigned int ia_valid = attr->ia_valid;
908
b27c82e1
CB
909 i_uid_update(mnt_userns, attr, inode);
910 i_gid_update(mnt_userns, attr, inode);
eb31e2f6
AG
911 if (ia_valid & ATTR_ATIME)
912 inode->i_atime = attr->ia_atime;
913 if (ia_valid & ATTR_MTIME)
914 inode->i_mtime = attr->ia_mtime;
915 if (ia_valid & ATTR_CTIME)
916 inode->i_ctime = attr->ia_ctime;
fbfa2cc5
JK
917 if (ia_valid & ATTR_MODE) {
918 umode_t mode = attr->ia_mode;
1e8a9191 919 vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
fbfa2cc5 920
1e8a9191
CB
921 if (!vfsgid_in_group_p(vfsgid) &&
922 !capable_wrt_inode_uidgid(mnt_userns, inode, CAP_FSETID))
fbfa2cc5 923 mode &= ~S_ISGID;
91942321 924 set_acl_inode(inode, mode);
fbfa2cc5
JK
925 }
926}
927#else
928#define __setattr_copy setattr_copy
929#endif
930
549c7297
CB
931int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
932 struct iattr *attr)
fbfa2cc5 933{
2b0143b5 934 struct inode *inode = d_inode(dentry);
fbfa2cc5
JK
935 int err;
936
1f227a3e
JK
937 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
938 return -EIO;
939
e0fcd015
CY
940 if (unlikely(IS_IMMUTABLE(inode)))
941 return -EPERM;
942
943 if (unlikely(IS_APPEND(inode) &&
944 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
945 ATTR_GID | ATTR_TIMES_SET))))
946 return -EPERM;
947
4c8ff709
CY
948 if ((attr->ia_valid & ATTR_SIZE) &&
949 !f2fs_is_compress_backend_ready(inode))
950 return -EOPNOTSUPP;
951
984fc4e7 952 err = setattr_prepare(mnt_userns, dentry, attr);
fbfa2cc5
JK
953 if (err)
954 return err;
955
20bb2479
EB
956 err = fscrypt_prepare_setattr(dentry, attr);
957 if (err)
958 return err;
959
95ae251f
EB
960 err = fsverity_prepare_setattr(dentry, attr);
961 if (err)
962 return err;
963
b27c82e1 964 if (is_quota_modification(mnt_userns, inode, attr)) {
10a26878 965 err = f2fs_dquot_initialize(inode);
0abd675e
CY
966 if (err)
967 return err;
968 }
b27c82e1
CB
969 if (i_uid_needs_update(mnt_userns, attr, inode) ||
970 i_gid_needs_update(mnt_userns, attr, inode)) {
af033b2a 971 f2fs_lock_op(F2FS_I_SB(inode));
b27c82e1 972 err = dquot_transfer(mnt_userns, inode, attr);
af033b2a
CY
973 if (err) {
974 set_sbi_flag(F2FS_I_SB(inode),
975 SBI_QUOTA_NEED_REPAIR);
976 f2fs_unlock_op(F2FS_I_SB(inode));
0abd675e 977 return err;
af033b2a
CY
978 }
979 /*
980 * update uid/gid under lock_op(), so that dquot and inode can
981 * be updated atomically.
982 */
b27c82e1
CB
983 i_uid_update(mnt_userns, attr, inode);
984 i_gid_update(mnt_userns, attr, inode);
af033b2a
CY
985 f2fs_mark_inode_dirty_sync(inode, true);
986 f2fs_unlock_op(F2FS_I_SB(inode));
0abd675e
CY
987 }
988
09db6a2e 989 if (attr->ia_valid & ATTR_SIZE) {
cfb9a34d
JK
990 loff_t old_size = i_size_read(inode);
991
992 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
993 /*
994 * should convert inline inode before i_size_write to
995 * keep smaller than inline_data size with inline flag.
996 */
997 err = f2fs_convert_inline_inode(inode);
998 if (err)
999 return err;
1000 }
a33c1502 1001
e4544b63 1002 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 1003 filemap_invalidate_lock(inode->i_mapping);
a33c1502
CY
1004
1005 truncate_setsize(inode, attr->ia_size);
1006
cfb9a34d 1007 if (attr->ia_size <= old_size)
9a449e9c 1008 err = f2fs_truncate(inode);
a33c1502
CY
1009 /*
1010 * do not trim all blocks after i_size if target size is
1011 * larger than i_size.
1012 */
edc6d01b 1013 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 1014 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
a33c1502
CY
1015 if (err)
1016 return err;
0cab80ee 1017
c10c9820 1018 spin_lock(&F2FS_I(inode)->i_size_lock);
cfb9a34d 1019 inode->i_mtime = inode->i_ctime = current_time(inode);
a0d00fad 1020 F2FS_I(inode)->last_disk_size = i_size_read(inode);
c10c9820 1021 spin_unlock(&F2FS_I(inode)->i_size_lock);
fbfa2cc5
JK
1022 }
1023
984fc4e7 1024 __setattr_copy(mnt_userns, inode, attr);
fbfa2cc5
JK
1025
1026 if (attr->ia_valid & ATTR_MODE) {
138060ba 1027 err = posix_acl_chmod(mnt_userns, dentry, f2fs_get_inode_mode(inode));
17232e83
CY
1028
1029 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1030 if (!err)
1031 inode->i_mode = F2FS_I(inode)->i_acl_mode;
91942321 1032 clear_inode_flag(inode, FI_ACL_MODE);
fbfa2cc5
JK
1033 }
1034 }
1035
c0ed4405 1036 /* file size may changed here */
ca597bdd 1037 f2fs_mark_inode_dirty_sync(inode, true);
15d04354
JK
1038
1039 /* inode change will produce dirty node pages flushed by checkpoint */
1040 f2fs_balance_fs(F2FS_I_SB(inode), true);
1041
fbfa2cc5
JK
1042 return err;
1043}
1044
1045const struct inode_operations f2fs_file_inode_operations = {
1046 .getattr = f2fs_getattr,
1047 .setattr = f2fs_setattr,
cac2f8b8 1048 .get_inode_acl = f2fs_get_acl,
a6dda0e6 1049 .set_acl = f2fs_set_acl,
fbfa2cc5 1050 .listxattr = f2fs_listxattr,
9ab70134 1051 .fiemap = f2fs_fiemap,
9b1bb01c
MS
1052 .fileattr_get = f2fs_fileattr_get,
1053 .fileattr_set = f2fs_fileattr_set,
fbfa2cc5
JK
1054};
1055
6394328a 1056static int fill_zero(struct inode *inode, pgoff_t index,
fbfa2cc5
JK
1057 loff_t start, loff_t len)
1058{
4081363f 1059 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
fbfa2cc5
JK
1060 struct page *page;
1061
1062 if (!len)
6394328a 1063 return 0;
fbfa2cc5 1064
2c4db1a6 1065 f2fs_balance_fs(sbi, true);
bd43df02 1066
e479556b 1067 f2fs_lock_op(sbi);
4d57b86d 1068 page = f2fs_get_new_data_page(inode, NULL, index, false);
e479556b 1069 f2fs_unlock_op(sbi);
fbfa2cc5 1070
6394328a
CY
1071 if (IS_ERR(page))
1072 return PTR_ERR(page);
1073
bae0ee7a 1074 f2fs_wait_on_page_writeback(page, DATA, true, true);
6394328a
CY
1075 zero_user(page, start, len);
1076 set_page_dirty(page);
1077 f2fs_put_page(page, 1);
1078 return 0;
fbfa2cc5
JK
1079}
1080
4d57b86d 1081int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
fbfa2cc5 1082{
fbfa2cc5
JK
1083 int err;
1084
ea58711e 1085 while (pg_start < pg_end) {
fbfa2cc5 1086 struct dnode_of_data dn;
ea58711e 1087 pgoff_t end_offset, count;
9eaeba70 1088
fbfa2cc5 1089 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1090 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
fbfa2cc5 1091 if (err) {
ea58711e 1092 if (err == -ENOENT) {
4d57b86d
CY
1093 pg_start = f2fs_get_next_page_offset(&dn,
1094 pg_start);
fbfa2cc5 1095 continue;
ea58711e 1096 }
fbfa2cc5
JK
1097 return err;
1098 }
1099
81ca7350 1100 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
ea58711e
CY
1101 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1102
1103 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1104
4d57b86d 1105 f2fs_truncate_data_blocks_range(&dn, count);
fbfa2cc5 1106 f2fs_put_dnode(&dn);
ea58711e
CY
1107
1108 pg_start += count;
fbfa2cc5
JK
1109 }
1110 return 0;
1111}
1112
1cd75654 1113static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
fbfa2cc5
JK
1114{
1115 pgoff_t pg_start, pg_end;
1116 loff_t off_start, off_end;
b9d777b8 1117 int ret;
fbfa2cc5 1118
b9d777b8
JK
1119 ret = f2fs_convert_inline_inode(inode);
1120 if (ret)
1121 return ret;
9ffe0fb5 1122
09cbfeaf
KS
1123 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1124 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
fbfa2cc5 1125
09cbfeaf
KS
1126 off_start = offset & (PAGE_SIZE - 1);
1127 off_end = (offset + len) & (PAGE_SIZE - 1);
fbfa2cc5
JK
1128
1129 if (pg_start == pg_end) {
6394328a 1130 ret = fill_zero(inode, pg_start, off_start,
fbfa2cc5 1131 off_end - off_start);
6394328a
CY
1132 if (ret)
1133 return ret;
fbfa2cc5 1134 } else {
6394328a
CY
1135 if (off_start) {
1136 ret = fill_zero(inode, pg_start++, off_start,
09cbfeaf 1137 PAGE_SIZE - off_start);
6394328a
CY
1138 if (ret)
1139 return ret;
1140 }
1141 if (off_end) {
1142 ret = fill_zero(inode, pg_end, 0, off_end);
1143 if (ret)
1144 return ret;
1145 }
fbfa2cc5
JK
1146
1147 if (pg_start < pg_end) {
fbfa2cc5 1148 loff_t blk_start, blk_end;
4081363f 1149 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1127a3d4 1150
2c4db1a6 1151 f2fs_balance_fs(sbi, true);
fbfa2cc5 1152
09cbfeaf
KS
1153 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1154 blk_end = (loff_t)pg_end << PAGE_SHIFT;
a33c1502 1155
e4544b63 1156 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6abaa83c 1157 filemap_invalidate_lock(inode->i_mapping);
a33c1502 1158
c8dc3047 1159 truncate_pagecache_range(inode, blk_start, blk_end - 1);
39936837 1160
e479556b 1161 f2fs_lock_op(sbi);
4d57b86d 1162 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
e479556b 1163 f2fs_unlock_op(sbi);
a33c1502 1164
6abaa83c 1165 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 1166 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
fbfa2cc5
JK
1167 }
1168 }
1169
fbfa2cc5
JK
1170 return ret;
1171}
1172
0a2aa8fb
JK
1173static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1174 int *do_replace, pgoff_t off, pgoff_t len)
b4ace337
CY
1175{
1176 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1177 struct dnode_of_data dn;
0a2aa8fb 1178 int ret, done, i;
ecbaa406 1179
0a2aa8fb 1180next_dnode:
6e2c64ad 1181 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1182 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
6e2c64ad
JK
1183 if (ret && ret != -ENOENT) {
1184 return ret;
1185 } else if (ret == -ENOENT) {
0a2aa8fb
JK
1186 if (dn.max_level == 0)
1187 return -ENOENT;
4c8ff709
CY
1188 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1189 dn.ofs_in_node, len);
0a2aa8fb
JK
1190 blkaddr += done;
1191 do_replace += done;
1192 goto next;
1193 }
1194
1195 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1196 dn.ofs_in_node, len);
1197 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
a2ced1ce 1198 *blkaddr = f2fs_data_blkaddr(&dn);
93770ab7
CY
1199
1200 if (__is_valid_data_blkaddr(*blkaddr) &&
1201 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1202 DATA_GENERIC_ENHANCE)) {
1203 f2fs_put_dnode(&dn);
95fa90c9 1204 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
10f966bb 1205 return -EFSCORRUPTED;
93770ab7
CY
1206 }
1207
4d57b86d 1208 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
0a2aa8fb 1209
b0332a0f 1210 if (f2fs_lfs_mode(sbi)) {
0a2aa8fb 1211 f2fs_put_dnode(&dn);
fd114ab2 1212 return -EOPNOTSUPP;
0a2aa8fb
JK
1213 }
1214
6e2c64ad 1215 /* do not invalidate this block address */
f28b3434 1216 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
0a2aa8fb 1217 *do_replace = 1;
b4ace337 1218 }
6e2c64ad 1219 }
0a2aa8fb
JK
1220 f2fs_put_dnode(&dn);
1221next:
1222 len -= done;
1223 off += done;
1224 if (len)
1225 goto next_dnode;
1226 return 0;
1227}
b4ace337 1228
0a2aa8fb
JK
1229static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1230 int *do_replace, pgoff_t off, int len)
1231{
1232 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1233 struct dnode_of_data dn;
1234 int ret, i;
b4ace337 1235
0a2aa8fb
JK
1236 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1237 if (*do_replace == 0)
1238 continue;
b4ace337 1239
0a2aa8fb 1240 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1241 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
0a2aa8fb
JK
1242 if (ret) {
1243 dec_valid_block_count(sbi, inode, 1);
4d57b86d 1244 f2fs_invalidate_blocks(sbi, *blkaddr);
0a2aa8fb
JK
1245 } else {
1246 f2fs_update_data_blkaddr(&dn, *blkaddr);
36abef4e 1247 }
0a2aa8fb
JK
1248 f2fs_put_dnode(&dn);
1249 }
1250 return 0;
1251}
1252
1253static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1254 block_t *blkaddr, int *do_replace,
1255 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1256{
1257 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1258 pgoff_t i = 0;
1259 int ret;
36abef4e 1260
0a2aa8fb
JK
1261 while (i < len) {
1262 if (blkaddr[i] == NULL_ADDR && !full) {
1263 i++;
1264 continue;
6e2c64ad 1265 }
b4ace337 1266
0a2aa8fb
JK
1267 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1268 struct dnode_of_data dn;
1269 struct node_info ni;
1270 size_t new_size;
1271 pgoff_t ilen;
b4ace337 1272
0a2aa8fb 1273 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
4d57b86d 1274 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
0a2aa8fb
JK
1275 if (ret)
1276 return ret;
b4ace337 1277
a9419b63 1278 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
7735730d
CY
1279 if (ret) {
1280 f2fs_put_dnode(&dn);
1281 return ret;
1282 }
1283
0a2aa8fb
JK
1284 ilen = min((pgoff_t)
1285 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1286 dn.ofs_in_node, len - i);
1287 do {
a2ced1ce 1288 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
4d57b86d 1289 f2fs_truncate_data_blocks_range(&dn, 1);
0a2aa8fb
JK
1290
1291 if (do_replace[i]) {
1292 f2fs_i_blocks_write(src_inode,
0abd675e 1293 1, false, false);
0a2aa8fb 1294 f2fs_i_blocks_write(dst_inode,
0abd675e 1295 1, true, false);
0a2aa8fb
JK
1296 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1297 blkaddr[i], ni.version, true, false);
1298
1299 do_replace[i] = 0;
1300 }
1301 dn.ofs_in_node++;
1302 i++;
1f0d5c91 1303 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
0a2aa8fb
JK
1304 if (dst_inode->i_size < new_size)
1305 f2fs_i_size_write(dst_inode, new_size);
e87f7329 1306 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
6e2c64ad 1307
0a2aa8fb
JK
1308 f2fs_put_dnode(&dn);
1309 } else {
1310 struct page *psrc, *pdst;
1311
4d57b86d
CY
1312 psrc = f2fs_get_lock_data_page(src_inode,
1313 src + i, true);
0a2aa8fb
JK
1314 if (IS_ERR(psrc))
1315 return PTR_ERR(psrc);
4d57b86d 1316 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
0a2aa8fb
JK
1317 true);
1318 if (IS_ERR(pdst)) {
1319 f2fs_put_page(psrc, 1);
1320 return PTR_ERR(pdst);
1321 }
1dd55358 1322 memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
0a2aa8fb
JK
1323 set_page_dirty(pdst);
1324 f2fs_put_page(pdst, 1);
6e2c64ad 1325 f2fs_put_page(psrc, 1);
b4ace337 1326
4d57b86d
CY
1327 ret = f2fs_truncate_hole(src_inode,
1328 src + i, src + i + 1);
0a2aa8fb
JK
1329 if (ret)
1330 return ret;
1331 i++;
1332 }
6e2c64ad
JK
1333 }
1334 return 0;
0a2aa8fb 1335}
b4ace337 1336
0a2aa8fb
JK
1337static int __exchange_data_block(struct inode *src_inode,
1338 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
363cad7f 1339 pgoff_t len, bool full)
0a2aa8fb
JK
1340{
1341 block_t *src_blkaddr;
1342 int *do_replace;
363cad7f 1343 pgoff_t olen;
0a2aa8fb
JK
1344 int ret;
1345
363cad7f 1346 while (len) {
d02a6e61 1347 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
0a2aa8fb 1348
628b3d14 1349 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
9d2a789c 1350 array_size(olen, sizeof(block_t)),
4f4460c0 1351 GFP_NOFS);
363cad7f
JK
1352 if (!src_blkaddr)
1353 return -ENOMEM;
0a2aa8fb 1354
628b3d14 1355 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
9d2a789c 1356 array_size(olen, sizeof(int)),
4f4460c0 1357 GFP_NOFS);
363cad7f
JK
1358 if (!do_replace) {
1359 kvfree(src_blkaddr);
1360 return -ENOMEM;
1361 }
0a2aa8fb 1362
363cad7f
JK
1363 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1364 do_replace, src, olen);
1365 if (ret)
1366 goto roll_back;
0a2aa8fb 1367
363cad7f
JK
1368 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1369 do_replace, src, dst, olen, full);
1370 if (ret)
1371 goto roll_back;
1372
1373 src += olen;
1374 dst += olen;
1375 len -= olen;
1376
1377 kvfree(src_blkaddr);
1378 kvfree(do_replace);
1379 }
0a2aa8fb
JK
1380 return 0;
1381
1382roll_back:
9fd62605 1383 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
0a2aa8fb
JK
1384 kvfree(src_blkaddr);
1385 kvfree(do_replace);
6e2c64ad
JK
1386 return ret;
1387}
b4ace337 1388
6f8d4455 1389static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
6e2c64ad
JK
1390{
1391 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
f91108b8 1392 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
6f8d4455
JK
1393 pgoff_t start = offset >> PAGE_SHIFT;
1394 pgoff_t end = (offset + len) >> PAGE_SHIFT;
0a2aa8fb 1395 int ret;
6e2c64ad 1396
0a2aa8fb 1397 f2fs_balance_fs(sbi, true);
5f281fab 1398
6f8d4455 1399 /* avoid gc operation during block exchange */
e4544b63 1400 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 1401 filemap_invalidate_lock(inode->i_mapping);
5f281fab 1402
6f8d4455
JK
1403 f2fs_lock_op(sbi);
1404 f2fs_drop_extent_tree(inode);
1405 truncate_pagecache(inode, offset);
0a2aa8fb
JK
1406 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1407 f2fs_unlock_op(sbi);
6f8d4455 1408
edc6d01b 1409 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 1410 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
b4ace337
CY
1411 return ret;
1412}
1413
1414static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1415{
b4ace337
CY
1416 loff_t new_size;
1417 int ret;
1418
b4ace337
CY
1419 if (offset + len >= i_size_read(inode))
1420 return -EINVAL;
1421
1422 /* collapse range should be aligned to block size of f2fs. */
1423 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1424 return -EINVAL;
1425
b9d777b8
JK
1426 ret = f2fs_convert_inline_inode(inode);
1427 if (ret)
1428 return ret;
97a7b2c2 1429
b4ace337
CY
1430 /* write out all dirty pages from offset */
1431 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1432 if (ret)
6f8d4455 1433 return ret;
b4ace337 1434
6f8d4455 1435 ret = f2fs_do_collapse(inode, offset, len);
b4ace337 1436 if (ret)
6f8d4455 1437 return ret;
b4ace337 1438
6e2c64ad 1439 /* write out all moved pages, if possible */
edc6d01b 1440 filemap_invalidate_lock(inode->i_mapping);
6e2c64ad
JK
1441 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1442 truncate_pagecache(inode, offset);
1443
b4ace337 1444 new_size = i_size_read(inode) - len;
c42d28ce 1445 ret = f2fs_truncate_blocks(inode, new_size, true);
edc6d01b 1446 filemap_invalidate_unlock(inode->i_mapping);
b4ace337 1447 if (!ret)
fc9581c8 1448 f2fs_i_size_write(inode, new_size);
b4ace337
CY
1449 return ret;
1450}
1451
6e961949
CY
1452static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1453 pgoff_t end)
1454{
1455 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1456 pgoff_t index = start;
1457 unsigned int ofs_in_node = dn->ofs_in_node;
1458 blkcnt_t count = 0;
1459 int ret;
1460
1461 for (; index < end; index++, dn->ofs_in_node++) {
a2ced1ce 1462 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
6e961949
CY
1463 count++;
1464 }
1465
1466 dn->ofs_in_node = ofs_in_node;
4d57b86d 1467 ret = f2fs_reserve_new_blocks(dn, count);
6e961949
CY
1468 if (ret)
1469 return ret;
1470
1471 dn->ofs_in_node = ofs_in_node;
1472 for (index = start; index < end; index++, dn->ofs_in_node++) {
a2ced1ce 1473 dn->data_blkaddr = f2fs_data_blkaddr(dn);
6e961949 1474 /*
4d57b86d 1475 * f2fs_reserve_new_blocks will not guarantee entire block
6e961949
CY
1476 * allocation.
1477 */
1478 if (dn->data_blkaddr == NULL_ADDR) {
1479 ret = -ENOSPC;
1480 break;
1481 }
25f82362
CY
1482
1483 if (dn->data_blkaddr == NEW_ADDR)
1484 continue;
1485
1486 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1487 DATA_GENERIC_ENHANCE)) {
1488 ret = -EFSCORRUPTED;
95fa90c9 1489 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
25f82362 1490 break;
6e961949 1491 }
25f82362
CY
1492
1493 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1494 dn->data_blkaddr = NEW_ADDR;
1495 f2fs_set_data_blkaddr(dn);
6e961949
CY
1496 }
1497
e7547dac 1498 f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
6e961949
CY
1499
1500 return ret;
1501}
1502
75cd4e09
CY
1503static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1504 int mode)
1505{
1506 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1507 struct address_space *mapping = inode->i_mapping;
1508 pgoff_t index, pg_start, pg_end;
1509 loff_t new_size = i_size_read(inode);
1510 loff_t off_start, off_end;
1511 int ret = 0;
1512
75cd4e09
CY
1513 ret = inode_newsize_ok(inode, (len + offset));
1514 if (ret)
1515 return ret;
1516
b9d777b8
JK
1517 ret = f2fs_convert_inline_inode(inode);
1518 if (ret)
1519 return ret;
75cd4e09
CY
1520
1521 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1522 if (ret)
6f8d4455 1523 return ret;
75cd4e09 1524
09cbfeaf
KS
1525 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1526 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
75cd4e09 1527
09cbfeaf
KS
1528 off_start = offset & (PAGE_SIZE - 1);
1529 off_end = (offset + len) & (PAGE_SIZE - 1);
75cd4e09
CY
1530
1531 if (pg_start == pg_end) {
6394328a
CY
1532 ret = fill_zero(inode, pg_start, off_start,
1533 off_end - off_start);
1534 if (ret)
6f8d4455 1535 return ret;
6394328a 1536
75cd4e09
CY
1537 new_size = max_t(loff_t, new_size, offset + len);
1538 } else {
1539 if (off_start) {
6394328a 1540 ret = fill_zero(inode, pg_start++, off_start,
09cbfeaf 1541 PAGE_SIZE - off_start);
6394328a 1542 if (ret)
6f8d4455 1543 return ret;
6394328a 1544
75cd4e09 1545 new_size = max_t(loff_t, new_size,
09cbfeaf 1546 (loff_t)pg_start << PAGE_SHIFT);
75cd4e09
CY
1547 }
1548
6e961949 1549 for (index = pg_start; index < pg_end;) {
75cd4e09 1550 struct dnode_of_data dn;
6e961949
CY
1551 unsigned int end_offset;
1552 pgoff_t end;
75cd4e09 1553
e4544b63 1554 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 1555 filemap_invalidate_lock(mapping);
c7079853
CY
1556
1557 truncate_pagecache_range(inode,
1558 (loff_t)index << PAGE_SHIFT,
1559 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1560
75cd4e09
CY
1561 f2fs_lock_op(sbi);
1562
6e961949 1563 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1564 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
75cd4e09
CY
1565 if (ret) {
1566 f2fs_unlock_op(sbi);
edc6d01b 1567 filemap_invalidate_unlock(mapping);
e4544b63 1568 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
75cd4e09
CY
1569 goto out;
1570 }
1571
6e961949
CY
1572 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1573 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1574
1575 ret = f2fs_do_zero_range(&dn, index, end);
75cd4e09 1576 f2fs_put_dnode(&dn);
c7079853 1577
75cd4e09 1578 f2fs_unlock_op(sbi);
edc6d01b 1579 filemap_invalidate_unlock(mapping);
e4544b63 1580 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
9434fcde
CY
1581
1582 f2fs_balance_fs(sbi, dn.node_changed);
1583
6e961949
CY
1584 if (ret)
1585 goto out;
75cd4e09 1586
6e961949 1587 index = end;
75cd4e09 1588 new_size = max_t(loff_t, new_size,
6e961949 1589 (loff_t)index << PAGE_SHIFT);
75cd4e09
CY
1590 }
1591
1592 if (off_end) {
6394328a
CY
1593 ret = fill_zero(inode, pg_end, 0, off_end);
1594 if (ret)
1595 goto out;
1596
75cd4e09
CY
1597 new_size = max_t(loff_t, new_size, offset + len);
1598 }
1599 }
1600
1601out:
17cd07ae
CY
1602 if (new_size > i_size_read(inode)) {
1603 if (mode & FALLOC_FL_KEEP_SIZE)
1604 file_set_keep_isize(inode);
1605 else
1606 f2fs_i_size_write(inode, new_size);
1607 }
75cd4e09
CY
1608 return ret;
1609}
1610
f62185d0
CY
1611static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1612{
1613 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
edc6d01b 1614 struct address_space *mapping = inode->i_mapping;
0a2aa8fb 1615 pgoff_t nr, pg_start, pg_end, delta, idx;
f62185d0 1616 loff_t new_size;
6e2c64ad 1617 int ret = 0;
f62185d0 1618
f62185d0 1619 new_size = i_size_read(inode) + len;
46e82fb1
KM
1620 ret = inode_newsize_ok(inode, new_size);
1621 if (ret)
1622 return ret;
f62185d0
CY
1623
1624 if (offset >= i_size_read(inode))
1625 return -EINVAL;
1626
1627 /* insert range should be aligned to block size of f2fs. */
1628 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1629 return -EINVAL;
1630
b9d777b8
JK
1631 ret = f2fs_convert_inline_inode(inode);
1632 if (ret)
1633 return ret;
97a7b2c2 1634
2c4db1a6 1635 f2fs_balance_fs(sbi, true);
2a340760 1636
edc6d01b 1637 filemap_invalidate_lock(mapping);
c42d28ce 1638 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
edc6d01b 1639 filemap_invalidate_unlock(mapping);
f62185d0 1640 if (ret)
6f8d4455 1641 return ret;
f62185d0
CY
1642
1643 /* write out all dirty pages from offset */
edc6d01b 1644 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
f62185d0 1645 if (ret)
6f8d4455 1646 return ret;
f62185d0 1647
09cbfeaf
KS
1648 pg_start = offset >> PAGE_SHIFT;
1649 pg_end = (offset + len) >> PAGE_SHIFT;
f62185d0 1650 delta = pg_end - pg_start;
f91108b8 1651 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
0a2aa8fb 1652
6f8d4455 1653 /* avoid gc operation during block exchange */
e4544b63 1654 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 1655 filemap_invalidate_lock(mapping);
6f8d4455
JK
1656 truncate_pagecache(inode, offset);
1657
0a2aa8fb
JK
1658 while (!ret && idx > pg_start) {
1659 nr = idx - pg_start;
1660 if (nr > delta)
1661 nr = delta;
1662 idx -= nr;
f62185d0 1663
f62185d0 1664 f2fs_lock_op(sbi);
5f281fab
JK
1665 f2fs_drop_extent_tree(inode);
1666
0a2aa8fb
JK
1667 ret = __exchange_data_block(inode, inode, idx,
1668 idx + delta, nr, false);
f62185d0
CY
1669 f2fs_unlock_op(sbi);
1670 }
edc6d01b 1671 filemap_invalidate_unlock(mapping);
e4544b63 1672 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f62185d0 1673
6e2c64ad 1674 /* write out all moved pages, if possible */
edc6d01b
JK
1675 filemap_invalidate_lock(mapping);
1676 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
6e2c64ad 1677 truncate_pagecache(inode, offset);
edc6d01b 1678 filemap_invalidate_unlock(mapping);
6e2c64ad
JK
1679
1680 if (!ret)
fc9581c8 1681 f2fs_i_size_write(inode, new_size);
f62185d0
CY
1682 return ret;
1683}
1684
1cd75654 1685static int f2fs_expand_inode_data(struct inode *inode, loff_t offset,
fbfa2cc5
JK
1686 loff_t len, int mode)
1687{
4081363f 1688 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
d5097be5 1689 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
f9d6d059
CY
1690 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1691 .m_may_create = true };
d147ea4a
JK
1692 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1693 .init_gc_type = FG_GC,
1694 .should_migrate_blocks = false,
c81d5bae
JK
1695 .err_gc_skipped = true,
1696 .nr_free_secs = 0 };
88f2cfc5 1697 pgoff_t pg_start, pg_end;
39bee2e6 1698 loff_t new_size;
e12dd7bd 1699 loff_t off_end;
88f2cfc5 1700 block_t expanded = 0;
a7de6086 1701 int err;
fbfa2cc5 1702
a7de6086
JK
1703 err = inode_newsize_ok(inode, (len + offset));
1704 if (err)
1705 return err;
fbfa2cc5 1706
a7de6086
JK
1707 err = f2fs_convert_inline_inode(inode);
1708 if (err)
1709 return err;
9e09fc85 1710
2c4db1a6 1711 f2fs_balance_fs(sbi, true);
2a340760 1712
88f2cfc5 1713 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
e12dd7bd 1714 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
09cbfeaf 1715 off_end = (offset + len) & (PAGE_SIZE - 1);
fbfa2cc5 1716
88f2cfc5
CY
1717 map.m_lblk = pg_start;
1718 map.m_len = pg_end - pg_start;
e12dd7bd
JK
1719 if (off_end)
1720 map.m_len++;
ead43275 1721
f5a53edc
JK
1722 if (!map.m_len)
1723 return 0;
1724
1725 if (f2fs_is_pinned_file(inode)) {
074b5ea2 1726 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
e1175f02 1727 block_t sec_len = roundup(map.m_len, sec_blks);
f5a53edc 1728
e1175f02 1729 map.m_len = sec_blks;
f5a53edc
JK
1730next_alloc:
1731 if (has_not_enough_free_secs(sbi, 0,
1732 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
e4544b63 1733 f2fs_down_write(&sbi->gc_lock);
d147ea4a 1734 err = f2fs_gc(sbi, &gc_control);
2e42b7f8 1735 if (err && err != -ENODATA)
f5a53edc
JK
1736 goto out_err;
1737 }
1738
e4544b63 1739 f2fs_down_write(&sbi->pin_sem);
fd612648
DJ
1740
1741 f2fs_lock_op(sbi);
509f1010 1742 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
fd612648
DJ
1743 f2fs_unlock_op(sbi);
1744
d0b9e42a 1745 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
cd8fc522 1746 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO);
d4dd19ec 1747 file_dont_truncate(inode);
d0b9e42a 1748
e4544b63 1749 f2fs_up_write(&sbi->pin_sem);
cad3836f 1750
88f2cfc5 1751 expanded += map.m_len;
e1175f02 1752 sec_len -= map.m_len;
f5a53edc 1753 map.m_lblk += map.m_len;
e1175f02 1754 if (!err && sec_len)
f5a53edc
JK
1755 goto next_alloc;
1756
88f2cfc5 1757 map.m_len = expanded;
f5a53edc 1758 } else {
cd8fc522 1759 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO);
88f2cfc5 1760 expanded = map.m_len;
f5a53edc
JK
1761 }
1762out_err:
a7de6086 1763 if (err) {
e12dd7bd 1764 pgoff_t last_off;
fbfa2cc5 1765
88f2cfc5 1766 if (!expanded)
a7de6086 1767 return err;
98397ff3 1768
88f2cfc5 1769 last_off = pg_start + expanded - 1;
e12dd7bd
JK
1770
1771 /* update new size to the failed position */
1061fd48 1772 new_size = (last_off == pg_end) ? offset + len :
e12dd7bd
JK
1773 (loff_t)(last_off + 1) << PAGE_SHIFT;
1774 } else {
1775 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
fbfa2cc5
JK
1776 }
1777
e8ed90a6
CY
1778 if (new_size > i_size_read(inode)) {
1779 if (mode & FALLOC_FL_KEEP_SIZE)
1780 file_set_keep_isize(inode);
1781 else
1782 f2fs_i_size_write(inode, new_size);
1783 }
fbfa2cc5 1784
a7de6086 1785 return err;
fbfa2cc5
JK
1786}
1787
1788static long f2fs_fallocate(struct file *file, int mode,
1789 loff_t offset, loff_t len)
1790{
6131ffaa 1791 struct inode *inode = file_inode(file);
587c0a42 1792 long ret = 0;
fbfa2cc5 1793
1f227a3e
JK
1794 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1795 return -EIO;
00e09c0b
CY
1796 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1797 return -ENOSPC;
4c8ff709
CY
1798 if (!f2fs_is_compress_backend_ready(inode))
1799 return -EOPNOTSUPP;
1f227a3e 1800
c998012b
CY
1801 /* f2fs only support ->fallocate for regular file */
1802 if (!S_ISREG(inode->i_mode))
1803 return -EINVAL;
1804
62230e0d 1805 if (IS_ENCRYPTED(inode) &&
f62185d0 1806 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
fcc85a4d
JK
1807 return -EOPNOTSUPP;
1808
5fed0be8
JK
1809 /*
1810 * Pinned file should not support partial trucation since the block
1811 * can be used by applications.
1812 */
1813 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
4c8ff709
CY
1814 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1815 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1816 return -EOPNOTSUPP;
1817
b4ace337 1818 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
f62185d0
CY
1819 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1820 FALLOC_FL_INSERT_RANGE))
fbfa2cc5
JK
1821 return -EOPNOTSUPP;
1822
5955102c 1823 inode_lock(inode);
3375f696 1824
958ed929
CY
1825 ret = file_modified(file);
1826 if (ret)
1827 goto out;
1828
587c0a42
TY
1829 if (mode & FALLOC_FL_PUNCH_HOLE) {
1830 if (offset >= inode->i_size)
1831 goto out;
1832
1cd75654 1833 ret = f2fs_punch_hole(inode, offset, len);
b4ace337
CY
1834 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1835 ret = f2fs_collapse_range(inode, offset, len);
75cd4e09
CY
1836 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1837 ret = f2fs_zero_range(inode, offset, len, mode);
f62185d0
CY
1838 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1839 ret = f2fs_insert_range(inode, offset, len);
b4ace337 1840 } else {
1cd75654 1841 ret = f2fs_expand_inode_data(inode, offset, len, mode);
b4ace337 1842 }
fbfa2cc5 1843
3af60a49 1844 if (!ret) {
078cd827 1845 inode->i_mtime = inode->i_ctime = current_time(inode);
7c45729a 1846 f2fs_mark_inode_dirty_sync(inode, false);
d0239e1b 1847 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3af60a49 1848 }
3375f696 1849
587c0a42 1850out:
5955102c 1851 inode_unlock(inode);
3375f696 1852
c01e2853 1853 trace_f2fs_fallocate(inode, mode, offset, len, ret);
fbfa2cc5
JK
1854 return ret;
1855}
1856
1e84371f
JK
1857static int f2fs_release_file(struct inode *inode, struct file *filp)
1858{
de5307e4
JK
1859 /*
1860 * f2fs_relase_file is called at every close calls. So we should
1861 * not drop any inmemory pages by close called by other process.
1862 */
1863 if (!(filp->f_mode & FMODE_WRITE) ||
1864 atomic_read(&inode->i_writecount) != 1)
1865 return 0;
1866
e53f8643 1867 f2fs_abort_atomic_write(inode, true);
1e84371f
JK
1868 return 0;
1869}
1870
7a10f017 1871static int f2fs_file_flush(struct file *file, fl_owner_t id)
fbfa2cc5 1872{
7a10f017
JK
1873 struct inode *inode = file_inode(file);
1874
1875 /*
1876 * If the process doing a transaction is crashed, we should do
1877 * roll-back. Otherwise, other reader/write can see corrupted database
1878 * until all the writers close its file. Since this should be done
1879 * before dropping file lock, it needs to do in ->flush.
1880 */
e53f8643 1881 if (F2FS_I(inode)->atomic_write_task == current)
3db1de0e 1882 f2fs_abort_atomic_write(inode, true);
7a10f017 1883 return 0;
fbfa2cc5
JK
1884}
1885
36098557 1886static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
2c1d0305
CY
1887{
1888 struct f2fs_inode_info *fi = F2FS_I(inode);
99eabb91
JK
1889 u32 masked_flags = fi->i_flags & mask;
1890
a7531039
JK
1891 /* mask can be shrunk by flags_valid selector */
1892 iflags &= mask;
2c1d0305
CY
1893
1894 /* Is it quota file? Do not allow user to mess with it */
1895 if (IS_NOQUOTA(inode))
1896 return -EPERM;
1897
99eabb91 1898 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
2c2eb7a3
DR
1899 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1900 return -EOPNOTSUPP;
1901 if (!f2fs_empty_dir(inode))
1902 return -ENOTEMPTY;
1903 }
1904
4c8ff709
CY
1905 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1906 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1907 return -EOPNOTSUPP;
1908 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1909 return -EINVAL;
1910 }
1911
99eabb91 1912 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
aa576970 1913 if (masked_flags & F2FS_COMPR_FL) {
78134d03 1914 if (!f2fs_disable_compressed_file(inode))
2536ac68 1915 return -EINVAL;
8ee236dc 1916 } else {
a995627e
JK
1917 /* try to convert inline_data to support compression */
1918 int err = f2fs_convert_inline_inode(inode);
1919 if (err)
1920 return err;
4c8ff709
CY
1921 if (!f2fs_may_compress(inode))
1922 return -EINVAL;
a8634ccf 1923 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
519a5a2f 1924 return -EINVAL;
912f0d65
JK
1925 if (set_compress_context(inode))
1926 return -EOPNOTSUPP;
4c8ff709
CY
1927 }
1928 }
4c8ff709 1929
d5e5efa2 1930 fi->i_flags = iflags | (fi->i_flags & ~mask);
4c8ff709
CY
1931 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1932 (fi->i_flags & F2FS_NOCOMP_FL));
2c1d0305 1933
59c84408 1934 if (fi->i_flags & F2FS_PROJINHERIT_FL)
2c1d0305
CY
1935 set_inode_flag(inode, FI_PROJ_INHERIT);
1936 else
1937 clear_inode_flag(inode, FI_PROJ_INHERIT);
1938
1939 inode->i_ctime = current_time(inode);
1940 f2fs_set_inode_flags(inode);
b32e0190 1941 f2fs_mark_inode_dirty_sync(inode, true);
2c1d0305
CY
1942 return 0;
1943}
1944
9b1bb01c 1945/* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
36098557
EB
1946
1947/*
1948 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1949 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1950 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1951 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
9b1bb01c
MS
1952 *
1953 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1954 * FS_IOC_FSSETXATTR is done by the VFS.
36098557
EB
1955 */
1956
1957static const struct {
1958 u32 iflag;
1959 u32 fsflag;
1960} f2fs_fsflags_map[] = {
4c8ff709 1961 { F2FS_COMPR_FL, FS_COMPR_FL },
36098557
EB
1962 { F2FS_SYNC_FL, FS_SYNC_FL },
1963 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1964 { F2FS_APPEND_FL, FS_APPEND_FL },
1965 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1966 { F2FS_NOATIME_FL, FS_NOATIME_FL },
4c8ff709 1967 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
36098557
EB
1968 { F2FS_INDEX_FL, FS_INDEX_FL },
1969 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1970 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
2c2eb7a3 1971 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
36098557
EB
1972};
1973
1974#define F2FS_GETTABLE_FS_FL ( \
4c8ff709 1975 FS_COMPR_FL | \
36098557
EB
1976 FS_SYNC_FL | \
1977 FS_IMMUTABLE_FL | \
1978 FS_APPEND_FL | \
1979 FS_NODUMP_FL | \
1980 FS_NOATIME_FL | \
4c8ff709 1981 FS_NOCOMP_FL | \
36098557
EB
1982 FS_INDEX_FL | \
1983 FS_DIRSYNC_FL | \
1984 FS_PROJINHERIT_FL | \
1985 FS_ENCRYPT_FL | \
1986 FS_INLINE_DATA_FL | \
95ae251f 1987 FS_NOCOW_FL | \
fbc246a1 1988 FS_VERITY_FL | \
2c2eb7a3 1989 FS_CASEFOLD_FL)
36098557
EB
1990
1991#define F2FS_SETTABLE_FS_FL ( \
4c8ff709 1992 FS_COMPR_FL | \
36098557
EB
1993 FS_SYNC_FL | \
1994 FS_IMMUTABLE_FL | \
1995 FS_APPEND_FL | \
1996 FS_NODUMP_FL | \
1997 FS_NOATIME_FL | \
4c8ff709 1998 FS_NOCOMP_FL | \
36098557 1999 FS_DIRSYNC_FL | \
2c2eb7a3
DR
2000 FS_PROJINHERIT_FL | \
2001 FS_CASEFOLD_FL)
36098557
EB
2002
2003/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2004static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2005{
2006 u32 fsflags = 0;
2007 int i;
2008
2009 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2010 if (iflags & f2fs_fsflags_map[i].iflag)
2011 fsflags |= f2fs_fsflags_map[i].fsflag;
2012
2013 return fsflags;
2014}
2015
2016/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2017static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2018{
2019 u32 iflags = 0;
2020 int i;
2021
2022 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2023 if (fsflags & f2fs_fsflags_map[i].fsflag)
2024 iflags |= f2fs_fsflags_map[i].iflag;
2025
2026 return iflags;
2027}
2028
d49f3e89
CY
2029static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2030{
2031 struct inode *inode = file_inode(filp);
2032
2033 return put_user(inode->i_generation, (int __user *)arg);
2034}
2035
41e8f85a 2036static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
88b88a66
JK
2037{
2038 struct inode *inode = file_inode(filp);
984fc4e7 2039 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
743b620c
JK
2040 struct f2fs_inode_info *fi = F2FS_I(inode);
2041 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3db1de0e 2042 struct inode *pinode;
4d8d45df 2043 loff_t isize;
f4c9c743 2044 int ret;
88b88a66 2045
984fc4e7 2046 if (!inode_owner_or_capable(mnt_userns, inode))
88b88a66
JK
2047 return -EACCES;
2048
e811898c
JK
2049 if (!S_ISREG(inode->i_mode))
2050 return -EINVAL;
2051
038d0698
CY
2052 if (filp->f_flags & O_DIRECT)
2053 return -EINVAL;
2054
7fb17fe4
CY
2055 ret = mnt_want_write_file(filp);
2056 if (ret)
2057 return ret;
2058
0fac558b
CY
2059 inode_lock(inode);
2060
9b56adcf
FC
2061 if (!f2fs_disable_compressed_file(inode)) {
2062 ret = -EINVAL;
2063 goto out;
2064 }
4c8ff709 2065
3db1de0e 2066 if (f2fs_is_atomic_file(inode))
7fb17fe4 2067 goto out;
88b88a66 2068
f4c9c743
CY
2069 ret = f2fs_convert_inline_inode(inode);
2070 if (ret)
7fb17fe4 2071 goto out;
88b88a66 2072
054cb289 2073 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
6f8d4455 2074
31867b23
JK
2075 /*
2076 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2077 * f2fs_is_atomic_file.
2078 */
2079 if (get_dirty_pages(inode))
054cb289 2080 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
dcbb4c10 2081 inode->i_ino, get_dirty_pages(inode));
c27753d6 2082 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
6f8d4455 2083 if (ret) {
054cb289 2084 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
684ca7e5 2085 goto out;
6f8d4455 2086 }
31867b23 2087
3db1de0e
DJ
2088 /* Create a COW inode for atomic write */
2089 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2090 if (IS_ERR(pinode)) {
054cb289 2091 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3db1de0e
DJ
2092 ret = PTR_ERR(pinode);
2093 goto out;
2094 }
2095
2096 ret = f2fs_get_tmpfile(mnt_userns, pinode, &fi->cow_inode);
2097 iput(pinode);
2098 if (ret) {
054cb289 2099 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
684ca7e5 2100 goto out;
6f8d4455 2101 }
4d8d45df
DJ
2102
2103 f2fs_write_inode(inode, NULL);
31867b23 2104
b4dac120 2105 stat_inc_atomic_inode(inode);
743b620c 2106
054afda9 2107 set_inode_flag(inode, FI_ATOMIC_FILE);
4a2c5b79 2108 set_inode_flag(fi->cow_inode, FI_COW_FILE);
3db1de0e 2109 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
41e8f85a
DJ
2110
2111 isize = i_size_read(inode);
2112 fi->original_i_size = isize;
2113 if (truncate) {
2114 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2115 truncate_inode_pages_final(inode->i_mapping);
2116 f2fs_i_size_write(inode, 0);
2117 isize = 0;
2118 }
2119 f2fs_i_size_write(fi->cow_inode, isize);
2120
054cb289 2121 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
684ca7e5 2122
054cb289
YY
2123 f2fs_update_time(sbi, REQ_TIME);
2124 fi->atomic_write_task = current;
26a28a0c 2125 stat_update_max_atomic_write(inode);
f8e2f32b 2126 fi->atomic_write_cnt = 0;
684ca7e5 2127out:
0fac558b 2128 inode_unlock(inode);
7fb17fe4 2129 mnt_drop_write_file(filp);
c27753d6 2130 return ret;
88b88a66
JK
2131}
2132
2133static int f2fs_ioc_commit_atomic_write(struct file *filp)
2134{
2135 struct inode *inode = file_inode(filp);
984fc4e7 2136 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
88b88a66
JK
2137 int ret;
2138
984fc4e7 2139 if (!inode_owner_or_capable(mnt_userns, inode))
88b88a66
JK
2140 return -EACCES;
2141
2142 ret = mnt_want_write_file(filp);
2143 if (ret)
2144 return ret;
2145
6f8d4455 2146 f2fs_balance_fs(F2FS_I_SB(inode), true);
0fac558b 2147
6f8d4455 2148 inode_lock(inode);
1dc0f899 2149
6282adbf 2150 if (f2fs_is_atomic_file(inode)) {
3db1de0e 2151 ret = f2fs_commit_atomic_write(inode);
743b620c 2152 if (!ret)
4d8d45df
DJ
2153 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2154
2155 f2fs_abort_atomic_write(inode, ret);
26a28a0c 2156 } else {
774e1b78 2157 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
6282adbf 2158 }
4d8d45df 2159
0fac558b 2160 inode_unlock(inode);
1e84371f
JK
2161 mnt_drop_write_file(filp);
2162 return ret;
2163}
2164
23339e57
DJ
2165static int f2fs_ioc_abort_atomic_write(struct file *filp)
2166{
2167 struct inode *inode = file_inode(filp);
2168 struct user_namespace *mnt_userns = file_mnt_user_ns(filp);
2169 int ret;
2170
2171 if (!inode_owner_or_capable(mnt_userns, inode))
2172 return -EACCES;
2173
2174 ret = mnt_want_write_file(filp);
2175 if (ret)
2176 return ret;
2177
2178 inode_lock(inode);
2179
e53f8643 2180 f2fs_abort_atomic_write(inode, true);
23339e57
DJ
2181
2182 inode_unlock(inode);
2183
2184 mnt_drop_write_file(filp);
2185 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2186 return ret;
2187}
2188
1abff93d
JK
2189static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2190{
2191 struct inode *inode = file_inode(filp);
2192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2193 struct super_block *sb = sbi->sb;
2194 __u32 in;
2a96d8ad 2195 int ret = 0;
1abff93d
JK
2196
2197 if (!capable(CAP_SYS_ADMIN))
2198 return -EPERM;
2199
2200 if (get_user(in, (__u32 __user *)arg))
2201 return -EFAULT;
2202
60b2b4ee
ST
2203 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2204 ret = mnt_want_write_file(filp);
8626441f
CY
2205 if (ret) {
2206 if (ret == -EROFS) {
2207 ret = 0;
a9cfee0e
CY
2208 f2fs_stop_checkpoint(sbi, false,
2209 STOP_CP_REASON_SHUTDOWN);
8626441f
CY
2210 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2211 trace_f2fs_shutdown(sbi, in, ret);
2212 }
60b2b4ee 2213 return ret;
8626441f 2214 }
60b2b4ee 2215 }
7fb17fe4 2216
1abff93d
JK
2217 switch (in) {
2218 case F2FS_GOING_DOWN_FULLSYNC:
040f04bd
CH
2219 ret = freeze_bdev(sb->s_bdev);
2220 if (ret)
d027c484 2221 goto out;
a9cfee0e 2222 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
040f04bd
CH
2223 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2224 thaw_bdev(sb->s_bdev);
1abff93d
JK
2225 break;
2226 case F2FS_GOING_DOWN_METASYNC:
2227 /* do checkpoint only */
d027c484
CY
2228 ret = f2fs_sync_fs(sb, 1);
2229 if (ret)
2230 goto out;
a9cfee0e 2231 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
83a3bfdb 2232 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1abff93d
JK
2233 break;
2234 case F2FS_GOING_DOWN_NOSYNC:
a9cfee0e 2235 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
83a3bfdb 2236 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1abff93d 2237 break;
c912a829 2238 case F2FS_GOING_DOWN_METAFLUSH:
4d57b86d 2239 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
a9cfee0e 2240 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
83a3bfdb 2241 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
c912a829 2242 break;
0cd6d9b0
JK
2243 case F2FS_GOING_DOWN_NEED_FSCK:
2244 set_sbi_flag(sbi, SBI_NEED_FSCK);
db610a64
JK
2245 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2246 set_sbi_flag(sbi, SBI_IS_DIRTY);
0cd6d9b0
JK
2247 /* do checkpoint only */
2248 ret = f2fs_sync_fs(sb, 1);
db610a64 2249 goto out;
1abff93d 2250 default:
7fb17fe4
CY
2251 ret = -EINVAL;
2252 goto out;
1abff93d 2253 }
7950e9ac 2254
4d57b86d
CY
2255 f2fs_stop_gc_thread(sbi);
2256 f2fs_stop_discard_thread(sbi);
7950e9ac 2257
4d57b86d 2258 f2fs_drop_discard_cmd(sbi);
7950e9ac
CY
2259 clear_opt(sbi, DISCARD);
2260
d0239e1b 2261 f2fs_update_time(sbi, REQ_TIME);
7fb17fe4 2262out:
60b2b4ee
ST
2263 if (in != F2FS_GOING_DOWN_FULLSYNC)
2264 mnt_drop_write_file(filp);
559e87c4
CY
2265
2266 trace_f2fs_shutdown(sbi, in, ret);
2267
7fb17fe4 2268 return ret;
1abff93d
JK
2269}
2270
52656e6c
JK
2271static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2272{
2273 struct inode *inode = file_inode(filp);
2274 struct super_block *sb = inode->i_sb;
52656e6c
JK
2275 struct fstrim_range range;
2276 int ret;
4b2fecc8 2277
52656e6c
JK
2278 if (!capable(CAP_SYS_ADMIN))
2279 return -EPERM;
4b2fecc8 2280
7d20c8ab 2281 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
52656e6c 2282 return -EOPNOTSUPP;
4b2fecc8 2283
52656e6c
JK
2284 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2285 sizeof(range)))
2286 return -EFAULT;
4b2fecc8 2287
7fb17fe4
CY
2288 ret = mnt_want_write_file(filp);
2289 if (ret)
2290 return ret;
2291
52656e6c 2292 range.minlen = max((unsigned int)range.minlen,
7b47ef52 2293 bdev_discard_granularity(sb->s_bdev));
52656e6c 2294 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
7fb17fe4 2295 mnt_drop_write_file(filp);
52656e6c
JK
2296 if (ret < 0)
2297 return ret;
4b2fecc8 2298
52656e6c
JK
2299 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2300 sizeof(range)))
2301 return -EFAULT;
d0239e1b 2302 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
52656e6c
JK
2303 return 0;
2304}
2305
f424f664
JK
2306static bool uuid_is_nonzero(__u8 u[16])
2307{
2308 int i;
2309
2310 for (i = 0; i < 16; i++)
2311 if (u[i])
2312 return true;
2313 return false;
2314}
2315
2316static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2317{
f424f664
JK
2318 struct inode *inode = file_inode(filp);
2319
7beb01f7 2320 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
ead710b7
CY
2321 return -EOPNOTSUPP;
2322
d0239e1b 2323 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
7fb17fe4 2324
db717d8e 2325 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
f424f664
JK
2326}
2327
2328static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2329{
7beb01f7 2330 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
ead710b7 2331 return -EOPNOTSUPP;
db717d8e 2332 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
f424f664
JK
2333}
2334
2335static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2336{
2337 struct inode *inode = file_inode(filp);
2338 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5eaac835 2339 u8 encrypt_pw_salt[16];
f424f664
JK
2340 int err;
2341
7beb01f7 2342 if (!f2fs_sb_has_encrypt(sbi))
f424f664
JK
2343 return -EOPNOTSUPP;
2344
f424f664
JK
2345 err = mnt_want_write_file(filp);
2346 if (err)
2347 return err;
2348
e4544b63 2349 f2fs_down_write(&sbi->sb_lock);
d0d3f1b3
CY
2350
2351 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2352 goto got_it;
2353
f424f664
JK
2354 /* update superblock with uuid */
2355 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2356
c5bda1c8 2357 err = f2fs_commit_super(sbi, false);
f424f664
JK
2358 if (err) {
2359 /* undo new data */
2360 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
d0d3f1b3 2361 goto out_err;
f424f664
JK
2362 }
2363got_it:
5eaac835 2364 memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16);
d0d3f1b3 2365out_err:
e4544b63 2366 f2fs_up_write(&sbi->sb_lock);
d0d3f1b3 2367 mnt_drop_write_file(filp);
5eaac835
CY
2368
2369 if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16))
2370 err = -EFAULT;
2371
d0d3f1b3 2372 return err;
f424f664
JK
2373}
2374
8ce589c7
EB
2375static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2376 unsigned long arg)
2377{
2378 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2379 return -EOPNOTSUPP;
2380
2381 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2382}
2383
2384static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2385{
2386 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2387 return -EOPNOTSUPP;
2388
2389 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2390}
2391
2392static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2393{
2394 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2395 return -EOPNOTSUPP;
2396
2397 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2398}
2399
2400static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2401 unsigned long arg)
2402{
2403 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2404 return -EOPNOTSUPP;
2405
2406 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2407}
2408
2409static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2410 unsigned long arg)
2411{
2412 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2413 return -EOPNOTSUPP;
2414
2415 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2416}
2417
ee446e1a
EB
2418static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2419{
2420 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2421 return -EOPNOTSUPP;
2422
2423 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2424}
2425
c1c1b583
CY
2426static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2427{
2428 struct inode *inode = file_inode(filp);
2429 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
d147ea4a
JK
2430 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2431 .no_bg_gc = false,
c81d5bae
JK
2432 .should_migrate_blocks = false,
2433 .nr_free_secs = 0 };
d530d4d8 2434 __u32 sync;
7fb17fe4 2435 int ret;
c1c1b583
CY
2436
2437 if (!capable(CAP_SYS_ADMIN))
2438 return -EPERM;
2439
d530d4d8 2440 if (get_user(sync, (__u32 __user *)arg))
c1c1b583
CY
2441 return -EFAULT;
2442
d530d4d8
CY
2443 if (f2fs_readonly(sbi->sb))
2444 return -EROFS;
c1c1b583 2445
7fb17fe4
CY
2446 ret = mnt_want_write_file(filp);
2447 if (ret)
2448 return ret;
2449
d530d4d8 2450 if (!sync) {
e4544b63 2451 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
7fb17fe4
CY
2452 ret = -EBUSY;
2453 goto out;
2454 }
d530d4d8 2455 } else {
e4544b63 2456 f2fs_down_write(&sbi->gc_lock);
c1c1b583
CY
2457 }
2458
d147ea4a
JK
2459 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2460 gc_control.err_gc_skipped = sync;
2461 ret = f2fs_gc(sbi, &gc_control);
7fb17fe4
CY
2462out:
2463 mnt_drop_write_file(filp);
2464 return ret;
c1c1b583
CY
2465}
2466
34178b1b 2467static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
34dc77ad 2468{
34178b1b 2469 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
d147ea4a
JK
2470 struct f2fs_gc_control gc_control = {
2471 .init_gc_type = range->sync ? FG_GC : BG_GC,
2472 .no_bg_gc = false,
2473 .should_migrate_blocks = false,
c81d5bae
JK
2474 .err_gc_skipped = range->sync,
2475 .nr_free_secs = 0 };
34dc77ad
JK
2476 u64 end;
2477 int ret;
2478
2479 if (!capable(CAP_SYS_ADMIN))
2480 return -EPERM;
34dc77ad
JK
2481 if (f2fs_readonly(sbi->sb))
2482 return -EROFS;
2483
34178b1b
CY
2484 end = range->start + range->len;
2485 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
fbbf7799 2486 end >= MAX_BLKADDR(sbi))
b82f6e34 2487 return -EINVAL;
b82f6e34 2488
34dc77ad
JK
2489 ret = mnt_want_write_file(filp);
2490 if (ret)
2491 return ret;
2492
34dc77ad 2493do_more:
34178b1b 2494 if (!range->sync) {
e4544b63 2495 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
34dc77ad
JK
2496 ret = -EBUSY;
2497 goto out;
2498 }
2499 } else {
e4544b63 2500 f2fs_down_write(&sbi->gc_lock);
34dc77ad
JK
2501 }
2502
d147ea4a
JK
2503 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2504 ret = f2fs_gc(sbi, &gc_control);
97767500
QZ
2505 if (ret) {
2506 if (ret == -EBUSY)
2507 ret = -EAGAIN;
2508 goto out;
2509 }
074b5ea2 2510 range->start += CAP_BLKS_PER_SEC(sbi);
34178b1b 2511 if (range->start <= end)
34dc77ad
JK
2512 goto do_more;
2513out:
2514 mnt_drop_write_file(filp);
2515 return ret;
2516}
2517
34178b1b
CY
2518static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2519{
2520 struct f2fs_gc_range range;
2521
2522 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2523 sizeof(range)))
2524 return -EFAULT;
2525 return __f2fs_ioc_gc_range(filp, &range);
2526}
2527
059c0648 2528static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
456b88e4
CY
2529{
2530 struct inode *inode = file_inode(filp);
2531 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
7fb17fe4 2532 int ret;
456b88e4
CY
2533
2534 if (!capable(CAP_SYS_ADMIN))
2535 return -EPERM;
2536
2537 if (f2fs_readonly(sbi->sb))
2538 return -EROFS;
2539
4354994f 2540 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
dcbb4c10 2541 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
4354994f
DR
2542 return -EINVAL;
2543 }
2544
7fb17fe4
CY
2545 ret = mnt_want_write_file(filp);
2546 if (ret)
2547 return ret;
2548
2549 ret = f2fs_sync_fs(sbi->sb, 1);
2550
2551 mnt_drop_write_file(filp);
2552 return ret;
456b88e4
CY
2553}
2554
d323d005
CY
2555static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2556 struct file *filp,
2557 struct f2fs_defragment *range)
2558{
2559 struct inode *inode = file_inode(filp);
f3d98e74 2560 struct f2fs_map_blocks map = { .m_next_extent = NULL,
5f029c04 2561 .m_seg_type = NO_CHECK_TYPE,
f4f0b677 2562 .m_may_create = false };
fe59109a 2563 struct extent_info ei = {};
f3d98e74 2564 pgoff_t pg_start, pg_end, next_pgofs;
3519e3f9 2565 unsigned int blk_per_seg = sbi->blocks_per_seg;
d323d005 2566 unsigned int total = 0, sec_num;
d323d005
CY
2567 block_t blk_end = 0;
2568 bool fragmented = false;
2569 int err;
2570
09cbfeaf
KS
2571 pg_start = range->start >> PAGE_SHIFT;
2572 pg_end = (range->start + range->len) >> PAGE_SHIFT;
d323d005 2573
2c4db1a6 2574 f2fs_balance_fs(sbi, true);
d323d005 2575
5955102c 2576 inode_lock(inode);
d323d005 2577
1018a546
CY
2578 /* if in-place-update policy is enabled, don't waste time here */
2579 set_inode_flag(inode, FI_OPU_WRITE);
2580 if (f2fs_should_update_inplace(inode, NULL)) {
2581 err = -EINVAL;
2582 goto out;
2583 }
2584
d323d005
CY
2585 /* writeback all dirty pages in the range */
2586 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
d8fe4f0e 2587 range->start + range->len - 1);
d323d005
CY
2588 if (err)
2589 goto out;
2590
2591 /*
2592 * lookup mapping info in extent cache, skip defragmenting if physical
2593 * block addresses are continuous.
2594 */
e7547dac 2595 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
d323d005
CY
2596 if (ei.fofs + ei.len >= pg_end)
2597 goto out;
2598 }
2599
2600 map.m_lblk = pg_start;
f3d98e74 2601 map.m_next_pgofs = &next_pgofs;
d323d005
CY
2602
2603 /*
2604 * lookup mapping info in dnode page cache, skip defragmenting if all
2605 * physical block addresses are continuous even if there are hole(s)
2606 * in logical blocks.
2607 */
2608 while (map.m_lblk < pg_end) {
a1c1e9b7 2609 map.m_len = pg_end - map.m_lblk;
cd8fc522 2610 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
d323d005
CY
2611 if (err)
2612 goto out;
2613
2614 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
f3d98e74 2615 map.m_lblk = next_pgofs;
d323d005
CY
2616 continue;
2617 }
2618
25a912e5 2619 if (blk_end && blk_end != map.m_pblk)
d323d005 2620 fragmented = true;
25a912e5
CY
2621
2622 /* record total count of block that we're going to move */
2623 total += map.m_len;
2624
d323d005
CY
2625 blk_end = map.m_pblk + map.m_len;
2626
2627 map.m_lblk += map.m_len;
d323d005
CY
2628 }
2629
d3a1a0e1
CY
2630 if (!fragmented) {
2631 total = 0;
d323d005 2632 goto out;
d3a1a0e1 2633 }
d323d005 2634
074b5ea2 2635 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
d323d005
CY
2636
2637 /*
2638 * make sure there are enough free section for LFS allocation, this can
2639 * avoid defragment running in SSR mode when free section are allocated
2640 * intensively
2641 */
7f3037a5 2642 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
d323d005
CY
2643 err = -EAGAIN;
2644 goto out;
2645 }
2646
25a912e5
CY
2647 map.m_lblk = pg_start;
2648 map.m_len = pg_end - pg_start;
2649 total = 0;
2650
d323d005
CY
2651 while (map.m_lblk < pg_end) {
2652 pgoff_t idx;
2653 int cnt = 0;
2654
2655do_map:
a1c1e9b7 2656 map.m_len = pg_end - map.m_lblk;
cd8fc522 2657 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
d323d005
CY
2658 if (err)
2659 goto clear_out;
2660
2661 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
f3d98e74 2662 map.m_lblk = next_pgofs;
d3a1a0e1 2663 goto check;
d323d005
CY
2664 }
2665
1018a546 2666 set_inode_flag(inode, FI_SKIP_WRITES);
d323d005
CY
2667
2668 idx = map.m_lblk;
2669 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2670 struct page *page;
2671
4d57b86d 2672 page = f2fs_get_lock_data_page(inode, idx, true);
d323d005
CY
2673 if (IS_ERR(page)) {
2674 err = PTR_ERR(page);
2675 goto clear_out;
2676 }
2677
2678 set_page_dirty(page);
2d1fe8a8 2679 set_page_private_gcing(page);
d323d005
CY
2680 f2fs_put_page(page, 1);
2681
2682 idx++;
2683 cnt++;
2684 total++;
2685 }
2686
2687 map.m_lblk = idx;
d3a1a0e1
CY
2688check:
2689 if (map.m_lblk < pg_end && cnt < blk_per_seg)
d323d005
CY
2690 goto do_map;
2691
1018a546 2692 clear_inode_flag(inode, FI_SKIP_WRITES);
d323d005
CY
2693
2694 err = filemap_fdatawrite(inode->i_mapping);
2695 if (err)
2696 goto out;
2697 }
2698clear_out:
1018a546 2699 clear_inode_flag(inode, FI_SKIP_WRITES);
d323d005 2700out:
1018a546 2701 clear_inode_flag(inode, FI_OPU_WRITE);
5955102c 2702 inode_unlock(inode);
d323d005 2703 if (!err)
09cbfeaf 2704 range->len = (u64)total << PAGE_SHIFT;
d323d005
CY
2705 return err;
2706}
2707
2708static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2709{
2710 struct inode *inode = file_inode(filp);
2711 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2712 struct f2fs_defragment range;
2713 int err;
2714
2715 if (!capable(CAP_SYS_ADMIN))
2716 return -EPERM;
2717
7eab0c0d 2718 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
d323d005
CY
2719 return -EINVAL;
2720
d7563861
KM
2721 if (f2fs_readonly(sbi->sb))
2722 return -EROFS;
d323d005
CY
2723
2724 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
d7563861
KM
2725 sizeof(range)))
2726 return -EFAULT;
d323d005
CY
2727
2728 /* verify alignment of offset & size */
d7563861
KM
2729 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2730 return -EINVAL;
d323d005 2731
1941d7bc 2732 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
6d1451bf 2733 max_file_blocks(inode)))
d7563861
KM
2734 return -EINVAL;
2735
2736 err = mnt_want_write_file(filp);
2737 if (err)
2738 return err;
1941d7bc 2739
d323d005 2740 err = f2fs_defragment_range(sbi, filp, &range);
d7563861
KM
2741 mnt_drop_write_file(filp);
2742
d0239e1b 2743 f2fs_update_time(sbi, REQ_TIME);
d323d005 2744 if (err < 0)
d7563861 2745 return err;
d323d005
CY
2746
2747 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2748 sizeof(range)))
d7563861
KM
2749 return -EFAULT;
2750
2751 return 0;
d323d005
CY
2752}
2753
4dd6f977
JK
2754static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2755 struct file *file_out, loff_t pos_out, size_t len)
2756{
2757 struct inode *src = file_inode(file_in);
2758 struct inode *dst = file_inode(file_out);
2759 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2760 size_t olen = len, dst_max_i_size = 0;
2761 size_t dst_osize;
2762 int ret;
2763
2764 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2765 src->i_sb != dst->i_sb)
2766 return -EXDEV;
2767
2768 if (unlikely(f2fs_readonly(src->i_sb)))
2769 return -EROFS;
2770
fe8494bf
CY
2771 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2772 return -EINVAL;
4dd6f977 2773
62230e0d 2774 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
4dd6f977
JK
2775 return -EOPNOTSUPP;
2776
aad1383c
DR
2777 if (pos_out < 0 || pos_in < 0)
2778 return -EINVAL;
2779
d95fd91c
FL
2780 if (src == dst) {
2781 if (pos_in == pos_out)
2782 return 0;
2783 if (pos_out > pos_in && pos_out < pos_in + len)
2784 return -EINVAL;
2785 }
2786
4dd6f977 2787 inode_lock(src);
20a3d61d 2788 if (src != dst) {
bb06664a
CY
2789 ret = -EBUSY;
2790 if (!inode_trylock(dst))
2791 goto out;
20a3d61d 2792 }
4dd6f977
JK
2793
2794 ret = -EINVAL;
2795 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2796 goto out_unlock;
2797 if (len == 0)
2798 olen = len = src->i_size - pos_in;
2799 if (pos_in + len == src->i_size)
2800 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2801 if (len == 0) {
2802 ret = 0;
2803 goto out_unlock;
2804 }
2805
2806 dst_osize = dst->i_size;
2807 if (pos_out + olen > dst->i_size)
2808 dst_max_i_size = pos_out + olen;
2809
2810 /* verify the end result is block aligned */
2811 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2812 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2813 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2814 goto out_unlock;
2815
2816 ret = f2fs_convert_inline_inode(src);
2817 if (ret)
2818 goto out_unlock;
2819
2820 ret = f2fs_convert_inline_inode(dst);
2821 if (ret)
2822 goto out_unlock;
2823
2824 /* write out all dirty pages from offset */
2825 ret = filemap_write_and_wait_range(src->i_mapping,
2826 pos_in, pos_in + len);
2827 if (ret)
2828 goto out_unlock;
2829
2830 ret = filemap_write_and_wait_range(dst->i_mapping,
2831 pos_out, pos_out + len);
2832 if (ret)
2833 goto out_unlock;
2834
2835 f2fs_balance_fs(sbi, true);
6f8d4455 2836
e4544b63 2837 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
6f8d4455
JK
2838 if (src != dst) {
2839 ret = -EBUSY;
e4544b63 2840 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
6f8d4455
JK
2841 goto out_src;
2842 }
2843
4dd6f977 2844 f2fs_lock_op(sbi);
61e4da11
FL
2845 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2846 pos_out >> F2FS_BLKSIZE_BITS,
2847 len >> F2FS_BLKSIZE_BITS, false);
4dd6f977
JK
2848
2849 if (!ret) {
2850 if (dst_max_i_size)
2851 f2fs_i_size_write(dst, dst_max_i_size);
2852 else if (dst_osize != dst->i_size)
2853 f2fs_i_size_write(dst, dst_osize);
2854 }
2855 f2fs_unlock_op(sbi);
6f8d4455
JK
2856
2857 if (src != dst)
e4544b63 2858 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
6f8d4455 2859out_src:
e4544b63 2860 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
6f8d4455
JK
2861out_unlock:
2862 if (src != dst)
4dd6f977 2863 inode_unlock(dst);
20a3d61d 2864out:
4dd6f977
JK
2865 inode_unlock(src);
2866 return ret;
2867}
2868
34178b1b
CY
2869static int __f2fs_ioc_move_range(struct file *filp,
2870 struct f2fs_move_range *range)
4dd6f977 2871{
4dd6f977
JK
2872 struct fd dst;
2873 int err;
2874
2875 if (!(filp->f_mode & FMODE_READ) ||
2876 !(filp->f_mode & FMODE_WRITE))
2877 return -EBADF;
2878
34178b1b 2879 dst = fdget(range->dst_fd);
4dd6f977
JK
2880 if (!dst.file)
2881 return -EBADF;
2882
2883 if (!(dst.file->f_mode & FMODE_WRITE)) {
2884 err = -EBADF;
2885 goto err_out;
2886 }
2887
2888 err = mnt_want_write_file(filp);
2889 if (err)
2890 goto err_out;
2891
34178b1b
CY
2892 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2893 range->pos_out, range->len);
4dd6f977
JK
2894
2895 mnt_drop_write_file(filp);
4dd6f977
JK
2896err_out:
2897 fdput(dst);
2898 return err;
2899}
2900
34178b1b
CY
2901static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2902{
2903 struct f2fs_move_range range;
2904
2905 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2906 sizeof(range)))
2907 return -EFAULT;
2908 return __f2fs_ioc_move_range(filp, &range);
2909}
2910
e066b83c
JK
2911static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2912{
2913 struct inode *inode = file_inode(filp);
2914 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2915 struct sit_info *sm = SIT_I(sbi);
2916 unsigned int start_segno = 0, end_segno = 0;
2917 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2918 struct f2fs_flush_device range;
d147ea4a
JK
2919 struct f2fs_gc_control gc_control = {
2920 .init_gc_type = FG_GC,
2921 .should_migrate_blocks = true,
c81d5bae
JK
2922 .err_gc_skipped = true,
2923 .nr_free_secs = 0 };
e066b83c
JK
2924 int ret;
2925
2926 if (!capable(CAP_SYS_ADMIN))
2927 return -EPERM;
2928
2929 if (f2fs_readonly(sbi->sb))
2930 return -EROFS;
2931
4354994f
DR
2932 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2933 return -EINVAL;
2934
e066b83c
JK
2935 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2936 sizeof(range)))
2937 return -EFAULT;
2938
0916878d 2939 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2c70c5e3 2940 __is_large_section(sbi)) {
dcbb4c10
JP
2941 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2942 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
e066b83c
JK
2943 return -EINVAL;
2944 }
2945
2946 ret = mnt_want_write_file(filp);
2947 if (ret)
2948 return ret;
2949
2950 if (range.dev_num != 0)
2951 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2952 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2953
2954 start_segno = sm->last_victim[FLUSH_DEVICE];
2955 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2956 start_segno = dev_start_segno;
2957 end_segno = min(start_segno + range.segments, dev_end_segno);
2958
2959 while (start_segno < end_segno) {
e4544b63 2960 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
e066b83c
JK
2961 ret = -EBUSY;
2962 goto out;
2963 }
2964 sm->last_victim[GC_CB] = end_segno + 1;
2965 sm->last_victim[GC_GREEDY] = end_segno + 1;
2966 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
d147ea4a
JK
2967
2968 gc_control.victim_segno = start_segno;
2969 ret = f2fs_gc(sbi, &gc_control);
e066b83c
JK
2970 if (ret == -EAGAIN)
2971 ret = 0;
2972 else if (ret < 0)
2973 break;
2974 start_segno++;
2975 }
2976out:
2977 mnt_drop_write_file(filp);
2978 return ret;
2979}
2980
e65ef207
JK
2981static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2982{
2983 struct inode *inode = file_inode(filp);
2984 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2985
2986 /* Must validate to set it with SQLite behavior in Android. */
2987 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2988
2989 return put_user(sb_feature, (u32 __user *)arg);
2990}
e066b83c 2991
2c1d0305 2992#ifdef CONFIG_QUOTA
78130819
CY
2993int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2994{
2995 struct dquot *transfer_to[MAXQUOTAS] = {};
2996 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2997 struct super_block *sb = sbi->sb;
2998 int err = 0;
2999
3000 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3001 if (!IS_ERR(transfer_to[PRJQUOTA])) {
3002 err = __dquot_transfer(inode, transfer_to);
3003 if (err)
3004 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3005 dqput(transfer_to[PRJQUOTA]);
3006 }
3007 return err;
3008}
3009
9b1bb01c 3010static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2c1d0305 3011{
2c1d0305
CY
3012 struct f2fs_inode_info *fi = F2FS_I(inode);
3013 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
d13732cc 3014 struct f2fs_inode *ri = NULL;
2c1d0305
CY
3015 kprojid_t kprojid;
3016 int err;
3017
7beb01f7 3018 if (!f2fs_sb_has_project_quota(sbi)) {
2c1d0305
CY
3019 if (projid != F2FS_DEF_PROJID)
3020 return -EOPNOTSUPP;
3021 else
3022 return 0;
3023 }
3024
3025 if (!f2fs_has_extra_attr(inode))
3026 return -EOPNOTSUPP;
3027
3028 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3029
054cb289 3030 if (projid_eq(kprojid, fi->i_projid))
2c1d0305
CY
3031 return 0;
3032
2c1d0305 3033 err = -EPERM;
2c1d0305
CY
3034 /* Is it quota file? Do not allow user to mess with it */
3035 if (IS_NOQUOTA(inode))
c8e92757 3036 return err;
2c1d0305 3037
d13732cc
JY
3038 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3039 return -EOVERFLOW;
2c1d0305 3040
10a26878 3041 err = f2fs_dquot_initialize(inode);
c22aecd7 3042 if (err)
c8e92757 3043 return err;
2c1d0305 3044
78130819
CY
3045 f2fs_lock_op(sbi);
3046 err = f2fs_transfer_project_quota(inode, kprojid);
3047 if (err)
3048 goto out_unlock;
2c1d0305 3049
054cb289 3050 fi->i_projid = kprojid;
2c1d0305 3051 inode->i_ctime = current_time(inode);
2c1d0305 3052 f2fs_mark_inode_dirty_sync(inode, true);
78130819
CY
3053out_unlock:
3054 f2fs_unlock_op(sbi);
2c1d0305
CY
3055 return err;
3056}
3057#else
78130819
CY
3058int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3059{
3060 return 0;
3061}
3062
9b1bb01c 3063static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
2c1d0305
CY
3064{
3065 if (projid != F2FS_DEF_PROJID)
3066 return -EOPNOTSUPP;
3067 return 0;
3068}
3069#endif
3070
9b1bb01c 3071int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
2c1d0305 3072{
9b1bb01c 3073 struct inode *inode = d_inode(dentry);
2c1d0305 3074 struct f2fs_inode_info *fi = F2FS_I(inode);
9b1bb01c 3075 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2c1d0305 3076
9b1bb01c
MS
3077 if (IS_ENCRYPTED(inode))
3078 fsflags |= FS_ENCRYPT_FL;
3079 if (IS_VERITY(inode))
3080 fsflags |= FS_VERITY_FL;
3081 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3082 fsflags |= FS_INLINE_DATA_FL;
3083 if (is_inode_flag_set(inode, FI_PIN_FILE))
3084 fsflags |= FS_NOCOW_FL;
3085
3086 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
2c1d0305 3087
7beb01f7 3088 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
6fc93c4e 3089 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
2c1d0305 3090
c8e92757
WS
3091 return 0;
3092}
3093
9b1bb01c
MS
3094int f2fs_fileattr_set(struct user_namespace *mnt_userns,
3095 struct dentry *dentry, struct fileattr *fa)
2c1d0305 3096{
9b1bb01c
MS
3097 struct inode *inode = d_inode(dentry);
3098 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
36098557 3099 u32 iflags;
2c1d0305
CY
3100 int err;
3101
9b1bb01c
MS
3102 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3103 return -EIO;
3104 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3105 return -ENOSPC;
3106 if (fsflags & ~F2FS_GETTABLE_FS_FL)
2c1d0305 3107 return -EOPNOTSUPP;
9b1bb01c
MS
3108 fsflags &= F2FS_SETTABLE_FS_FL;
3109 if (!fa->flags_valid)
3110 mask &= FS_COMMON_FL;
2c1d0305 3111
9b1bb01c 3112 iflags = f2fs_fsflags_to_iflags(fsflags);
36098557 3113 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2c1d0305
CY
3114 return -EOPNOTSUPP;
3115
9b1bb01c
MS
3116 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3117 if (!err)
3118 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
2c1d0305 3119
c8e92757 3120 return err;
2c1d0305 3121}
e066b83c 3122
1ad71a27
JK
3123int f2fs_pin_file_control(struct inode *inode, bool inc)
3124{
3125 struct f2fs_inode_info *fi = F2FS_I(inode);
3126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3127
3128 /* Use i_gc_failures for normal file as a risk signal. */
3129 if (inc)
2ef79ecb
CY
3130 f2fs_i_gc_failures_write(inode,
3131 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
1ad71a27 3132
2ef79ecb 3133 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
dcbb4c10
JP
3134 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3135 __func__, inode->i_ino,
3136 fi->i_gc_failures[GC_FAILURE_PIN]);
1ad71a27
JK
3137 clear_inode_flag(inode, FI_PIN_FILE);
3138 return -EAGAIN;
3139 }
3140 return 0;
3141}
3142
3143static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3144{
3145 struct inode *inode = file_inode(filp);
3146 __u32 pin;
3147 int ret = 0;
3148
1ad71a27
JK
3149 if (get_user(pin, (__u32 __user *)arg))
3150 return -EFAULT;
3151
3152 if (!S_ISREG(inode->i_mode))
3153 return -EINVAL;
3154
3155 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3156 return -EROFS;
3157
3158 ret = mnt_want_write_file(filp);
3159 if (ret)
3160 return ret;
3161
3162 inode_lock(inode);
3163
3164 if (!pin) {
3165 clear_inode_flag(inode, FI_PIN_FILE);
30933364 3166 f2fs_i_gc_failures_write(inode, 0);
1ad71a27
JK
3167 goto done;
3168 }
3169
19bdba52
JK
3170 if (f2fs_should_update_outplace(inode, NULL)) {
3171 ret = -EINVAL;
3172 goto out;
3173 }
3174
1ad71a27
JK
3175 if (f2fs_pin_file_control(inode, false)) {
3176 ret = -EAGAIN;
3177 goto out;
3178 }
4c8ff709 3179
1ad71a27
JK
3180 ret = f2fs_convert_inline_inode(inode);
3181 if (ret)
3182 goto out;
3183
78134d03 3184 if (!f2fs_disable_compressed_file(inode)) {
4c8ff709
CY
3185 ret = -EOPNOTSUPP;
3186 goto out;
3187 }
3188
1ad71a27 3189 set_inode_flag(inode, FI_PIN_FILE);
2ef79ecb 3190 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
1ad71a27
JK
3191done:
3192 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3193out:
3194 inode_unlock(inode);
3195 mnt_drop_write_file(filp);
3196 return ret;
3197}
3198
3199static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3200{
3201 struct inode *inode = file_inode(filp);
3202 __u32 pin = 0;
3203
3204 if (is_inode_flag_set(inode, FI_PIN_FILE))
2ef79ecb 3205 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
1ad71a27
JK
3206 return put_user(pin, (u32 __user *)arg);
3207}
3208
c4020b2d
CY
3209int f2fs_precache_extents(struct inode *inode)
3210{
3211 struct f2fs_inode_info *fi = F2FS_I(inode);
3212 struct f2fs_map_blocks map;
3213 pgoff_t m_next_extent;
3214 loff_t end;
3215 int err;
3216
3217 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3218 return -EOPNOTSUPP;
3219
3220 map.m_lblk = 0;
3221 map.m_next_pgofs = NULL;
3222 map.m_next_extent = &m_next_extent;
3223 map.m_seg_type = NO_CHECK_TYPE;
f4f0b677 3224 map.m_may_create = false;
6d1451bf 3225 end = max_file_blocks(inode);
c4020b2d
CY
3226
3227 while (map.m_lblk < end) {
3228 map.m_len = end - map.m_lblk;
3229
e4544b63 3230 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
cd8fc522 3231 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
e4544b63 3232 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
c4020b2d
CY
3233 if (err)
3234 return err;
3235
3236 map.m_lblk = m_next_extent;
3237 }
3238
4f55dc2a 3239 return 0;
c4020b2d
CY
3240}
3241
3242static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3243{
3244 return f2fs_precache_extents(file_inode(filp));
3245}
3246
04f0b2ea
QS
3247static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3248{
3249 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3250 __u64 block_count;
04f0b2ea
QS
3251
3252 if (!capable(CAP_SYS_ADMIN))
3253 return -EPERM;
3254
3255 if (f2fs_readonly(sbi->sb))
3256 return -EROFS;
3257
3258 if (copy_from_user(&block_count, (void __user *)arg,
3259 sizeof(block_count)))
3260 return -EFAULT;
3261
b4b10061 3262 return f2fs_resize_fs(sbi, block_count);
04f0b2ea
QS
3263}
3264
95ae251f
EB
3265static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3266{
3267 struct inode *inode = file_inode(filp);
3268
3269 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3270
3271 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3272 f2fs_warn(F2FS_I_SB(inode),
833dcd35 3273 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
95ae251f
EB
3274 inode->i_ino);
3275 return -EOPNOTSUPP;
3276 }
3277
3278 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3279}
3280
3281static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3282{
3283 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3284 return -EOPNOTSUPP;
3285
3286 return fsverity_ioctl_measure(filp, (void __user *)arg);
3287}
3288
e17fe657
EB
3289static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3290{
3291 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3292 return -EOPNOTSUPP;
3293
3294 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3295}
3296
3357af8f 3297static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
4507847c
CY
3298{
3299 struct inode *inode = file_inode(filp);
3300 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3301 char *vbuf;
3302 int count;
3303 int err = 0;
3304
3305 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3306 if (!vbuf)
3307 return -ENOMEM;
3308
e4544b63 3309 f2fs_down_read(&sbi->sb_lock);
4507847c
CY
3310 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3311 ARRAY_SIZE(sbi->raw_super->volume_name),
3312 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
e4544b63 3313 f2fs_up_read(&sbi->sb_lock);
4507847c
CY
3314
3315 if (copy_to_user((char __user *)arg, vbuf,
3316 min(FSLABEL_MAX, count)))
3317 err = -EFAULT;
3318
c8eb7024 3319 kfree(vbuf);
4507847c
CY
3320 return err;
3321}
3322
3357af8f 3323static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
4507847c
CY
3324{
3325 struct inode *inode = file_inode(filp);
3326 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3327 char *vbuf;
3328 int err = 0;
3329
3330 if (!capable(CAP_SYS_ADMIN))
3331 return -EPERM;
3332
3333 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3334 if (IS_ERR(vbuf))
3335 return PTR_ERR(vbuf);
3336
3337 err = mnt_want_write_file(filp);
3338 if (err)
3339 goto out;
3340
e4544b63 3341 f2fs_down_write(&sbi->sb_lock);
4507847c
CY
3342
3343 memset(sbi->raw_super->volume_name, 0,
3344 sizeof(sbi->raw_super->volume_name));
3345 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3346 sbi->raw_super->volume_name,
3347 ARRAY_SIZE(sbi->raw_super->volume_name));
3348
3349 err = f2fs_commit_super(sbi, false);
3350
e4544b63 3351 f2fs_up_write(&sbi->sb_lock);
4507847c
CY
3352
3353 mnt_drop_write_file(filp);
3354out:
3355 kfree(vbuf);
3356 return err;
3357}
3358
439dfb10
CY
3359static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3360{
3361 struct inode *inode = file_inode(filp);
3362 __u64 blocks;
3363
3364 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3365 return -EOPNOTSUPP;
3366
3367 if (!f2fs_compressed_file(inode))
3368 return -EINVAL;
3369
c2759eba 3370 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
439dfb10
CY
3371 return put_user(blocks, (u64 __user *)arg);
3372}
3373
ef8d563f
CY
3374static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3375{
3376 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3377 unsigned int released_blocks = 0;
3378 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3379 block_t blkaddr;
3380 int i;
3381
3382 for (i = 0; i < count; i++) {
3383 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3384 dn->ofs_in_node + i);
3385
3386 if (!__is_valid_data_blkaddr(blkaddr))
3387 continue;
3388 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
95fa90c9
CY
3389 DATA_GENERIC_ENHANCE))) {
3390 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
ef8d563f 3391 return -EFSCORRUPTED;
95fa90c9 3392 }
ef8d563f
CY
3393 }
3394
3395 while (count) {
3396 int compr_blocks = 0;
3397
3398 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3399 blkaddr = f2fs_data_blkaddr(dn);
3400
3401 if (i == 0) {
3402 if (blkaddr == COMPRESS_ADDR)
3403 continue;
3404 dn->ofs_in_node += cluster_size;
3405 goto next;
3406 }
3407
3408 if (__is_valid_data_blkaddr(blkaddr))
3409 compr_blocks++;
3410
3411 if (blkaddr != NEW_ADDR)
3412 continue;
3413
3414 dn->data_blkaddr = NULL_ADDR;
3415 f2fs_set_data_blkaddr(dn);
3416 }
3417
3418 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3419 dec_valid_block_count(sbi, dn->inode,
3420 cluster_size - compr_blocks);
3421
3422 released_blocks += cluster_size - compr_blocks;
3423next:
3424 count -= cluster_size;
3425 }
3426
3427 return released_blocks;
3428}
3429
3430static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3431{
3432 struct inode *inode = file_inode(filp);
3433 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3434 pgoff_t page_idx = 0, last_idx;
3435 unsigned int released_blocks = 0;
3436 int ret;
3437 int writecount;
3438
3439 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3440 return -EOPNOTSUPP;
3441
3442 if (!f2fs_compressed_file(inode))
3443 return -EINVAL;
3444
3445 if (f2fs_readonly(sbi->sb))
3446 return -EROFS;
3447
3448 ret = mnt_want_write_file(filp);
3449 if (ret)
3450 return ret;
3451
3452 f2fs_balance_fs(F2FS_I_SB(inode), true);
3453
3454 inode_lock(inode);
3455
3456 writecount = atomic_read(&inode->i_writecount);
8c8cf26a
DJ
3457 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3458 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
ef8d563f
CY
3459 ret = -EBUSY;
3460 goto out;
3461 }
3462
c6140415 3463 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ef8d563f
CY
3464 ret = -EINVAL;
3465 goto out;
3466 }
3467
3468 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3469 if (ret)
3470 goto out;
3471
c6140415 3472 set_inode_flag(inode, FI_COMPRESS_RELEASED);
ef8d563f
CY
3473 inode->i_ctime = current_time(inode);
3474 f2fs_mark_inode_dirty_sync(inode, true);
3475
c2759eba 3476 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
567c4bf5
DJ
3477 goto out;
3478
e4544b63 3479 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 3480 filemap_invalidate_lock(inode->i_mapping);
ef8d563f
CY
3481
3482 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3483
3484 while (page_idx < last_idx) {
3485 struct dnode_of_data dn;
3486 pgoff_t end_offset, count;
3487
3488 set_new_dnode(&dn, inode, NULL, NULL, 0);
3489 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3490 if (ret) {
3491 if (ret == -ENOENT) {
3492 page_idx = f2fs_get_next_page_offset(&dn,
3493 page_idx);
3494 ret = 0;
3495 continue;
3496 }
3497 break;
3498 }
3499
3500 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3501 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
4fec3fc0 3502 count = round_up(count, F2FS_I(inode)->i_cluster_size);
ef8d563f
CY
3503
3504 ret = release_compress_blocks(&dn, count);
3505
3506 f2fs_put_dnode(&dn);
3507
3508 if (ret < 0)
3509 break;
3510
3511 page_idx += count;
3512 released_blocks += ret;
3513 }
3514
edc6d01b 3515 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 3516 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
ef8d563f
CY
3517out:
3518 inode_unlock(inode);
3519
3520 mnt_drop_write_file(filp);
3521
3522 if (ret >= 0) {
3523 ret = put_user(released_blocks, (u64 __user *)arg);
c2759eba
DJ
3524 } else if (released_blocks &&
3525 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
ef8d563f
CY
3526 set_sbi_flag(sbi, SBI_NEED_FSCK);
3527 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
c2759eba 3528 "iblocks=%llu, released=%u, compr_blocks=%u, "
ef8d563f
CY
3529 "run fsck to fix.",
3530 __func__, inode->i_ino, inode->i_blocks,
3531 released_blocks,
c2759eba 3532 atomic_read(&F2FS_I(inode)->i_compr_blocks));
ef8d563f
CY
3533 }
3534
3535 return ret;
3536}
3537
c75488fb
CY
3538static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3539{
3540 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3541 unsigned int reserved_blocks = 0;
3542 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3543 block_t blkaddr;
3544 int i;
3545
3546 for (i = 0; i < count; i++) {
3547 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3548 dn->ofs_in_node + i);
3549
3550 if (!__is_valid_data_blkaddr(blkaddr))
3551 continue;
3552 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
95fa90c9
CY
3553 DATA_GENERIC_ENHANCE))) {
3554 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
c75488fb 3555 return -EFSCORRUPTED;
95fa90c9 3556 }
c75488fb
CY
3557 }
3558
3559 while (count) {
3560 int compr_blocks = 0;
3561 blkcnt_t reserved;
3562 int ret;
3563
3564 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3565 blkaddr = f2fs_data_blkaddr(dn);
3566
3567 if (i == 0) {
3568 if (blkaddr == COMPRESS_ADDR)
3569 continue;
3570 dn->ofs_in_node += cluster_size;
3571 goto next;
3572 }
3573
3574 if (__is_valid_data_blkaddr(blkaddr)) {
3575 compr_blocks++;
3576 continue;
3577 }
3578
3579 dn->data_blkaddr = NEW_ADDR;
3580 f2fs_set_data_blkaddr(dn);
3581 }
3582
3583 reserved = cluster_size - compr_blocks;
3584 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3585 if (ret)
3586 return ret;
3587
3588 if (reserved != cluster_size - compr_blocks)
3589 return -ENOSPC;
3590
3591 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3592
3593 reserved_blocks += reserved;
3594next:
3595 count -= cluster_size;
3596 }
3597
3598 return reserved_blocks;
3599}
3600
3601static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3602{
3603 struct inode *inode = file_inode(filp);
3604 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3605 pgoff_t page_idx = 0, last_idx;
3606 unsigned int reserved_blocks = 0;
3607 int ret;
3608
3609 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3610 return -EOPNOTSUPP;
3611
3612 if (!f2fs_compressed_file(inode))
3613 return -EINVAL;
3614
3615 if (f2fs_readonly(sbi->sb))
3616 return -EROFS;
3617
3618 ret = mnt_want_write_file(filp);
3619 if (ret)
3620 return ret;
3621
c2759eba 3622 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
c75488fb
CY
3623 goto out;
3624
3625 f2fs_balance_fs(F2FS_I_SB(inode), true);
3626
3627 inode_lock(inode);
3628
c6140415 3629 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
c75488fb
CY
3630 ret = -EINVAL;
3631 goto unlock_inode;
3632 }
3633
e4544b63 3634 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 3635 filemap_invalidate_lock(inode->i_mapping);
c75488fb
CY
3636
3637 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3638
3639 while (page_idx < last_idx) {
3640 struct dnode_of_data dn;
3641 pgoff_t end_offset, count;
3642
3643 set_new_dnode(&dn, inode, NULL, NULL, 0);
3644 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3645 if (ret) {
3646 if (ret == -ENOENT) {
3647 page_idx = f2fs_get_next_page_offset(&dn,
3648 page_idx);
3649 ret = 0;
3650 continue;
3651 }
3652 break;
3653 }
3654
3655 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3656 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
4fec3fc0 3657 count = round_up(count, F2FS_I(inode)->i_cluster_size);
c75488fb
CY
3658
3659 ret = reserve_compress_blocks(&dn, count);
3660
3661 f2fs_put_dnode(&dn);
3662
3663 if (ret < 0)
3664 break;
3665
3666 page_idx += count;
3667 reserved_blocks += ret;
3668 }
3669
edc6d01b 3670 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 3671 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
c75488fb
CY
3672
3673 if (ret >= 0) {
c6140415 3674 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
c75488fb
CY
3675 inode->i_ctime = current_time(inode);
3676 f2fs_mark_inode_dirty_sync(inode, true);
3677 }
3678unlock_inode:
3679 inode_unlock(inode);
3680out:
3681 mnt_drop_write_file(filp);
3682
3683 if (ret >= 0) {
3684 ret = put_user(reserved_blocks, (u64 __user *)arg);
c2759eba
DJ
3685 } else if (reserved_blocks &&
3686 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
c75488fb
CY
3687 set_sbi_flag(sbi, SBI_NEED_FSCK);
3688 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
c2759eba 3689 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
c75488fb
CY
3690 "run fsck to fix.",
3691 __func__, inode->i_ino, inode->i_blocks,
3692 reserved_blocks,
c2759eba 3693 atomic_read(&F2FS_I(inode)->i_compr_blocks));
c75488fb
CY
3694 }
3695
3696 return ret;
3697}
3698
9af84648
DJ
3699static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3700 pgoff_t off, block_t block, block_t len, u32 flags)
3701{
9af84648
DJ
3702 sector_t sector = SECTOR_FROM_BLOCK(block);
3703 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3704 int ret = 0;
3705
44abff2c
CH
3706 if (flags & F2FS_TRIM_FILE_DISCARD) {
3707 if (bdev_max_secure_erase_sectors(bdev))
3708 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3709 GFP_NOFS);
3710 else
3711 ret = blkdev_issue_discard(bdev, sector, nr_sects,
3712 GFP_NOFS);
3713 }
9af84648
DJ
3714
3715 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3716 if (IS_ENCRYPTED(inode))
3717 ret = fscrypt_zeroout_range(inode, off, block, len);
3718 else
3719 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3720 GFP_NOFS, 0);
3721 }
3722
3723 return ret;
3724}
3725
3726static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3727{
3728 struct inode *inode = file_inode(filp);
3729 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3730 struct address_space *mapping = inode->i_mapping;
3731 struct block_device *prev_bdev = NULL;
3732 struct f2fs_sectrim_range range;
3733 pgoff_t index, pg_end, prev_index = 0;
3734 block_t prev_block = 0, len = 0;
3735 loff_t end_addr;
3736 bool to_end = false;
3737 int ret = 0;
3738
3739 if (!(filp->f_mode & FMODE_WRITE))
3740 return -EBADF;
3741
3742 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3743 sizeof(range)))
3744 return -EFAULT;
3745
3746 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3747 !S_ISREG(inode->i_mode))
3748 return -EINVAL;
3749
3750 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3751 !f2fs_hw_support_discard(sbi)) ||
3752 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3753 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3754 return -EOPNOTSUPP;
3755
3756 file_start_write(filp);
3757 inode_lock(inode);
3758
3759 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3760 range.start >= inode->i_size) {
3761 ret = -EINVAL;
3762 goto err;
3763 }
3764
3765 if (range.len == 0)
3766 goto err;
3767
3768 if (inode->i_size - range.start > range.len) {
3769 end_addr = range.start + range.len;
3770 } else {
3771 end_addr = range.len == (u64)-1 ?
3772 sbi->sb->s_maxbytes : inode->i_size;
3773 to_end = true;
3774 }
3775
3776 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3777 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3778 ret = -EINVAL;
3779 goto err;
3780 }
3781
3782 index = F2FS_BYTES_TO_BLK(range.start);
3783 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3784
3785 ret = f2fs_convert_inline_inode(inode);
3786 if (ret)
3787 goto err;
3788
e4544b63 3789 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
edc6d01b 3790 filemap_invalidate_lock(mapping);
9af84648
DJ
3791
3792 ret = filemap_write_and_wait_range(mapping, range.start,
3793 to_end ? LLONG_MAX : end_addr - 1);
3794 if (ret)
3795 goto out;
3796
3797 truncate_inode_pages_range(mapping, range.start,
3798 to_end ? -1 : end_addr - 1);
3799
3800 while (index < pg_end) {
3801 struct dnode_of_data dn;
3802 pgoff_t end_offset, count;
3803 int i;
3804
3805 set_new_dnode(&dn, inode, NULL, NULL, 0);
3806 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3807 if (ret) {
3808 if (ret == -ENOENT) {
3809 index = f2fs_get_next_page_offset(&dn, index);
3810 continue;
3811 }
3812 goto out;
3813 }
3814
3815 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3816 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3817 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3818 struct block_device *cur_bdev;
3819 block_t blkaddr = f2fs_data_blkaddr(&dn);
3820
3821 if (!__is_valid_data_blkaddr(blkaddr))
3822 continue;
3823
3824 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3825 DATA_GENERIC_ENHANCE)) {
3826 ret = -EFSCORRUPTED;
3827 f2fs_put_dnode(&dn);
95fa90c9
CY
3828 f2fs_handle_error(sbi,
3829 ERROR_INVALID_BLKADDR);
9af84648
DJ
3830 goto out;
3831 }
3832
3833 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3834 if (f2fs_is_multi_device(sbi)) {
3835 int di = f2fs_target_device_index(sbi, blkaddr);
3836
3837 blkaddr -= FDEV(di).start_blk;
3838 }
3839
3840 if (len) {
3841 if (prev_bdev == cur_bdev &&
3842 index == prev_index + len &&
3843 blkaddr == prev_block + len) {
3844 len++;
3845 } else {
3846 ret = f2fs_secure_erase(prev_bdev,
3847 inode, prev_index, prev_block,
3848 len, range.flags);
3849 if (ret) {
3850 f2fs_put_dnode(&dn);
3851 goto out;
3852 }
3853
3854 len = 0;
3855 }
3856 }
3857
3858 if (!len) {
3859 prev_bdev = cur_bdev;
3860 prev_index = index;
3861 prev_block = blkaddr;
3862 len = 1;
3863 }
3864 }
3865
3866 f2fs_put_dnode(&dn);
3867
3868 if (fatal_signal_pending(current)) {
3869 ret = -EINTR;
3870 goto out;
3871 }
3872 cond_resched();
3873 }
3874
3875 if (len)
3876 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3877 prev_block, len, range.flags);
3878out:
edc6d01b 3879 filemap_invalidate_unlock(mapping);
e4544b63 3880 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
9af84648
DJ
3881err:
3882 inode_unlock(inode);
3883 file_end_write(filp);
3884
3885 return ret;
3886}
3887
9e2a5f8c 3888static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
52656e6c 3889{
9e2a5f8c
DJ
3890 struct inode *inode = file_inode(filp);
3891 struct f2fs_comp_option option;
3892
3893 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3894 return -EOPNOTSUPP;
3895
3896 inode_lock_shared(inode);
3897
3898 if (!f2fs_compressed_file(inode)) {
3899 inode_unlock_shared(inode);
3900 return -ENODATA;
3901 }
3902
3903 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3904 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3905
3906 inode_unlock_shared(inode);
3907
3908 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3909 sizeof(option)))
3910 return -EFAULT;
3911
3912 return 0;
3913}
3914
e1e8debe
DJ
3915static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3916{
3917 struct inode *inode = file_inode(filp);
3918 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3919 struct f2fs_comp_option option;
3920 int ret = 0;
1f227a3e 3921
e1e8debe
DJ
3922 if (!f2fs_sb_has_compression(sbi))
3923 return -EOPNOTSUPP;
3924
3925 if (!(filp->f_mode & FMODE_WRITE))
3926 return -EBADF;
3927
3928 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3929 sizeof(option)))
3930 return -EFAULT;
3931
3932 if (!f2fs_compressed_file(inode) ||
3933 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3934 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3935 option.algorithm >= COMPRESS_MAX)
3936 return -EINVAL;
3937
3938 file_start_write(filp);
3939 inode_lock(inode);
3940
3941 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3942 ret = -EBUSY;
3943 goto out;
3944 }
3945
3946 if (inode->i_size != 0) {
3947 ret = -EFBIG;
3948 goto out;
3949 }
3950
3951 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3952 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3953 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3954 f2fs_mark_inode_dirty_sync(inode, true);
3955
3956 if (!f2fs_is_compress_backend_ready(inode))
3957 f2fs_warn(sbi, "compression algorithm is successfully set, "
3958 "but current kernel doesn't support this algorithm.");
3959out:
3960 inode_unlock(inode);
3961 file_end_write(filp);
3962
3963 return ret;
3964}
3965
5fdb322f
DJ
3966static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3967{
fcd9ae4f 3968 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
5fdb322f
DJ
3969 struct address_space *mapping = inode->i_mapping;
3970 struct page *page;
3971 pgoff_t redirty_idx = page_idx;
3972 int i, page_len = 0, ret = 0;
3973
3974 page_cache_ra_unbounded(&ractl, len, 0);
3975
3976 for (i = 0; i < len; i++, page_idx++) {
3977 page = read_cache_page(mapping, page_idx, NULL, NULL);
3978 if (IS_ERR(page)) {
3979 ret = PTR_ERR(page);
3980 break;
3981 }
3982 page_len++;
3983 }
3984
3985 for (i = 0; i < page_len; i++, redirty_idx++) {
3986 page = find_lock_page(mapping, redirty_idx);
a4a0e16d
JQ
3987
3988 /* It will never fail, when page has pinned above */
3989 f2fs_bug_on(F2FS_I_SB(inode), !page);
3990
5fdb322f
DJ
3991 set_page_dirty(page);
3992 f2fs_put_page(page, 1);
3993 f2fs_put_page(page, 0);
3994 }
3995
3996 return ret;
3997}
3998
3999static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4000{
4001 struct inode *inode = file_inode(filp);
4002 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4003 struct f2fs_inode_info *fi = F2FS_I(inode);
4004 pgoff_t page_idx = 0, last_idx;
4005 unsigned int blk_per_seg = sbi->blocks_per_seg;
054cb289 4006 int cluster_size = fi->i_cluster_size;
5fdb322f
DJ
4007 int count, ret;
4008
4009 if (!f2fs_sb_has_compression(sbi) ||
4010 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4011 return -EOPNOTSUPP;
4012
4013 if (!(filp->f_mode & FMODE_WRITE))
4014 return -EBADF;
4015
4016 if (!f2fs_compressed_file(inode))
4017 return -EINVAL;
4018
4019 f2fs_balance_fs(F2FS_I_SB(inode), true);
4020
4021 file_start_write(filp);
4022 inode_lock(inode);
4023
4024 if (!f2fs_is_compress_backend_ready(inode)) {
4025 ret = -EOPNOTSUPP;
4026 goto out;
4027 }
4028
90be48bd
JK
4029 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4030 ret = -EINVAL;
4031 goto out;
4032 }
4033
5fdb322f
DJ
4034 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4035 if (ret)
4036 goto out;
4037
4038 if (!atomic_read(&fi->i_compr_blocks))
4039 goto out;
4040
4041 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4042
4043 count = last_idx - page_idx;
4044 while (count) {
4045 int len = min(cluster_size, count);
4046
4047 ret = redirty_blocks(inode, page_idx, len);
4048 if (ret < 0)
4049 break;
4050
4051 if (get_dirty_pages(inode) >= blk_per_seg)
4052 filemap_fdatawrite(inode->i_mapping);
4053
4054 count -= len;
4055 page_idx += len;
4056 }
4057
4058 if (!ret)
4059 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4060 LLONG_MAX);
4061
4062 if (ret)
833dcd35
JP
4063 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4064 __func__, ret);
5fdb322f
DJ
4065out:
4066 inode_unlock(inode);
4067 file_end_write(filp);
4068
4069 return ret;
4070}
4071
4072static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4073{
4074 struct inode *inode = file_inode(filp);
4075 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4076 pgoff_t page_idx = 0, last_idx;
4077 unsigned int blk_per_seg = sbi->blocks_per_seg;
4078 int cluster_size = F2FS_I(inode)->i_cluster_size;
4079 int count, ret;
4080
4081 if (!f2fs_sb_has_compression(sbi) ||
4082 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4083 return -EOPNOTSUPP;
4084
4085 if (!(filp->f_mode & FMODE_WRITE))
4086 return -EBADF;
4087
4088 if (!f2fs_compressed_file(inode))
4089 return -EINVAL;
4090
4091 f2fs_balance_fs(F2FS_I_SB(inode), true);
4092
4093 file_start_write(filp);
4094 inode_lock(inode);
4095
4096 if (!f2fs_is_compress_backend_ready(inode)) {
4097 ret = -EOPNOTSUPP;
4098 goto out;
4099 }
4100
90be48bd
JK
4101 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4102 ret = -EINVAL;
4103 goto out;
4104 }
4105
5fdb322f
DJ
4106 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4107 if (ret)
4108 goto out;
4109
4110 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4111
4112 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4113
4114 count = last_idx - page_idx;
4115 while (count) {
4116 int len = min(cluster_size, count);
4117
4118 ret = redirty_blocks(inode, page_idx, len);
4119 if (ret < 0)
4120 break;
4121
4122 if (get_dirty_pages(inode) >= blk_per_seg)
4123 filemap_fdatawrite(inode->i_mapping);
4124
4125 count -= len;
4126 page_idx += len;
4127 }
4128
4129 if (!ret)
4130 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4131 LLONG_MAX);
4132
4133 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4134
4135 if (ret)
833dcd35
JP
4136 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4137 __func__, ret);
5fdb322f
DJ
4138out:
4139 inode_unlock(inode);
4140 file_end_write(filp);
4141
4142 return ret;
4143}
4144
34178b1b 4145static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
52656e6c
JK
4146{
4147 switch (cmd) {
3357af8f 4148 case FS_IOC_GETVERSION:
d49f3e89 4149 return f2fs_ioc_getversion(filp, arg);
88b88a66 4150 case F2FS_IOC_START_ATOMIC_WRITE:
41e8f85a
DJ
4151 return f2fs_ioc_start_atomic_write(filp, false);
4152 case F2FS_IOC_START_ATOMIC_REPLACE:
4153 return f2fs_ioc_start_atomic_write(filp, true);
88b88a66
JK
4154 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4155 return f2fs_ioc_commit_atomic_write(filp);
23339e57
DJ
4156 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4157 return f2fs_ioc_abort_atomic_write(filp);
02a1335f 4158 case F2FS_IOC_START_VOLATILE_WRITE:
1e84371f 4159 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
7bc155fe 4160 return -EOPNOTSUPP;
1abff93d
JK
4161 case F2FS_IOC_SHUTDOWN:
4162 return f2fs_ioc_shutdown(filp, arg);
52656e6c
JK
4163 case FITRIM:
4164 return f2fs_ioc_fitrim(filp, arg);
3357af8f 4165 case FS_IOC_SET_ENCRYPTION_POLICY:
f424f664 4166 return f2fs_ioc_set_encryption_policy(filp, arg);
3357af8f 4167 case FS_IOC_GET_ENCRYPTION_POLICY:
f424f664 4168 return f2fs_ioc_get_encryption_policy(filp, arg);
3357af8f 4169 case FS_IOC_GET_ENCRYPTION_PWSALT:
f424f664 4170 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
8ce589c7
EB
4171 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4172 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4173 case FS_IOC_ADD_ENCRYPTION_KEY:
4174 return f2fs_ioc_add_encryption_key(filp, arg);
4175 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4176 return f2fs_ioc_remove_encryption_key(filp, arg);
4177 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4178 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4179 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4180 return f2fs_ioc_get_encryption_key_status(filp, arg);
ee446e1a
EB
4181 case FS_IOC_GET_ENCRYPTION_NONCE:
4182 return f2fs_ioc_get_encryption_nonce(filp, arg);
c1c1b583
CY
4183 case F2FS_IOC_GARBAGE_COLLECT:
4184 return f2fs_ioc_gc(filp, arg);
34dc77ad
JK
4185 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4186 return f2fs_ioc_gc_range(filp, arg);
456b88e4 4187 case F2FS_IOC_WRITE_CHECKPOINT:
059c0648 4188 return f2fs_ioc_write_checkpoint(filp, arg);
d323d005
CY
4189 case F2FS_IOC_DEFRAGMENT:
4190 return f2fs_ioc_defragment(filp, arg);
4dd6f977
JK
4191 case F2FS_IOC_MOVE_RANGE:
4192 return f2fs_ioc_move_range(filp, arg);
e066b83c
JK
4193 case F2FS_IOC_FLUSH_DEVICE:
4194 return f2fs_ioc_flush_device(filp, arg);
e65ef207
JK
4195 case F2FS_IOC_GET_FEATURES:
4196 return f2fs_ioc_get_features(filp, arg);
1ad71a27
JK
4197 case F2FS_IOC_GET_PIN_FILE:
4198 return f2fs_ioc_get_pin_file(filp, arg);
4199 case F2FS_IOC_SET_PIN_FILE:
4200 return f2fs_ioc_set_pin_file(filp, arg);
c4020b2d
CY
4201 case F2FS_IOC_PRECACHE_EXTENTS:
4202 return f2fs_ioc_precache_extents(filp, arg);
04f0b2ea
QS
4203 case F2FS_IOC_RESIZE_FS:
4204 return f2fs_ioc_resize_fs(filp, arg);
95ae251f
EB
4205 case FS_IOC_ENABLE_VERITY:
4206 return f2fs_ioc_enable_verity(filp, arg);
4207 case FS_IOC_MEASURE_VERITY:
4208 return f2fs_ioc_measure_verity(filp, arg);
e17fe657
EB
4209 case FS_IOC_READ_VERITY_METADATA:
4210 return f2fs_ioc_read_verity_metadata(filp, arg);
3357af8f
EB
4211 case FS_IOC_GETFSLABEL:
4212 return f2fs_ioc_getfslabel(filp, arg);
4213 case FS_IOC_SETFSLABEL:
4214 return f2fs_ioc_setfslabel(filp, arg);
439dfb10
CY
4215 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4216 return f2fs_get_compress_blocks(filp, arg);
ef8d563f
CY
4217 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4218 return f2fs_release_compress_blocks(filp, arg);
c75488fb
CY
4219 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4220 return f2fs_reserve_compress_blocks(filp, arg);
9af84648
DJ
4221 case F2FS_IOC_SEC_TRIM_FILE:
4222 return f2fs_sec_trim_file(filp, arg);
9e2a5f8c
DJ
4223 case F2FS_IOC_GET_COMPRESS_OPTION:
4224 return f2fs_ioc_get_compress_option(filp, arg);
e1e8debe
DJ
4225 case F2FS_IOC_SET_COMPRESS_OPTION:
4226 return f2fs_ioc_set_compress_option(filp, arg);
5fdb322f
DJ
4227 case F2FS_IOC_DECOMPRESS_FILE:
4228 return f2fs_ioc_decompress_file(filp, arg);
4229 case F2FS_IOC_COMPRESS_FILE:
4230 return f2fs_ioc_compress_file(filp, arg);
fbfa2cc5
JK
4231 default:
4232 return -ENOTTY;
4233 }
4234}
4235
34178b1b
CY
4236long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4237{
4238 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4239 return -EIO;
4240 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4241 return -ENOSPC;
4242
4243 return __f2fs_ioctl(filp, cmd, arg);
4244}
4245
a1e09b03
EB
4246/*
4247 * Return %true if the given read or write request should use direct I/O, or
4248 * %false if it should use buffered I/O.
4249 */
4250static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4251 struct iov_iter *iter)
4252{
4253 unsigned int align;
4254
4255 if (!(iocb->ki_flags & IOCB_DIRECT))
4256 return false;
4257
bd367329 4258 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
a1e09b03
EB
4259 return false;
4260
4261 /*
4262 * Direct I/O not aligned to the disk's logical_block_size will be
4263 * attempted, but will fail with -EINVAL.
4264 *
4265 * f2fs additionally requires that direct I/O be aligned to the
4266 * filesystem block size, which is often a stricter requirement.
4267 * However, f2fs traditionally falls back to buffered I/O on requests
4268 * that are logical_block_size-aligned but not fs-block aligned.
4269 *
4270 * The below logic implements this behavior.
4271 */
4272 align = iocb->ki_pos | iov_iter_alignment(iter);
4273 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4274 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4275 return false;
4276
4277 return true;
4278}
4279
4280static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4281 unsigned int flags)
4282{
4283 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4284
4285 dec_page_count(sbi, F2FS_DIO_READ);
4286 if (error)
4287 return error;
34a23525 4288 f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
a1e09b03
EB
4289 return 0;
4290}
4291
4292static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4293 .end_io = f2fs_dio_read_end_io,
4294};
4295
4296static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4c8ff709
CY
4297{
4298 struct file *file = iocb->ki_filp;
4299 struct inode *inode = file_inode(file);
a1e09b03
EB
4300 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4301 struct f2fs_inode_info *fi = F2FS_I(inode);
4302 const loff_t pos = iocb->ki_pos;
4303 const size_t count = iov_iter_count(to);
4304 struct iomap_dio *dio;
4305 ssize_t ret;
4306
4307 if (count == 0)
4308 return 0; /* skip atime update */
4309
bd984c03 4310 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
a1e09b03
EB
4311
4312 if (iocb->ki_flags & IOCB_NOWAIT) {
e4544b63 4313 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
a1e09b03
EB
4314 ret = -EAGAIN;
4315 goto out;
4316 }
4317 } else {
e4544b63 4318 f2fs_down_read(&fi->i_gc_rwsem[READ]);
a1e09b03
EB
4319 }
4320
4321 /*
4322 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4323 * the higher-level function iomap_dio_rw() in order to ensure that the
4324 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4325 */
4326 inc_page_count(sbi, F2FS_DIO_READ);
4327 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
786f847f 4328 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
a1e09b03
EB
4329 if (IS_ERR_OR_NULL(dio)) {
4330 ret = PTR_ERR_OR_ZERO(dio);
4331 if (ret != -EIOCBQUEUED)
4332 dec_page_count(sbi, F2FS_DIO_READ);
4333 } else {
4334 ret = iomap_dio_complete(dio);
4335 }
4336
e4544b63 4337 f2fs_up_read(&fi->i_gc_rwsem[READ]);
a1e09b03
EB
4338
4339 file_accessed(file);
4340out:
4341 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4342 return ret;
4343}
4344
4345static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4346{
4347 struct inode *inode = file_inode(iocb->ki_filp);
c277f141 4348 const loff_t pos = iocb->ki_pos;
a1e09b03 4349 ssize_t ret;
4c8ff709
CY
4350
4351 if (!f2fs_is_compress_backend_ready(inode))
4352 return -EOPNOTSUPP;
4353
c277f141
JK
4354 if (trace_f2fs_dataread_start_enabled()) {
4355 char *p = f2fs_kmalloc(F2FS_I_SB(inode), PATH_MAX, GFP_KERNEL);
4356 char *path;
4357
4358 if (!p)
4359 goto skip_read_trace;
4360
4361 path = dentry_path_raw(file_dentry(iocb->ki_filp), p, PATH_MAX);
4362 if (IS_ERR(path)) {
4363 kfree(p);
4364 goto skip_read_trace;
4365 }
8b83ac81 4366
c277f141
JK
4367 trace_f2fs_dataread_start(inode, pos, iov_iter_count(to),
4368 current->pid, path, current->comm);
4369 kfree(p);
4370 }
4371skip_read_trace:
4372 if (f2fs_should_use_dio(inode, iocb, to)) {
4373 ret = f2fs_dio_read_iter(iocb, to);
4374 } else {
4375 ret = filemap_read(iocb, to, 0);
4376 if (ret > 0)
34a23525
CY
4377 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4378 APP_BUFFERED_READ_IO, ret);
c277f141
JK
4379 }
4380 if (trace_f2fs_dataread_end_enabled())
4381 trace_f2fs_dataread_end(inode, pos, ret);
8b83ac81 4382 return ret;
4c8ff709
CY
4383}
4384
a1e09b03
EB
4385static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4386{
4387 struct file *file = iocb->ki_filp;
4388 struct inode *inode = file_inode(file);
4389 ssize_t count;
4390 int err;
4391
4392 if (IS_IMMUTABLE(inode))
4393 return -EPERM;
4394
4395 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4396 return -EPERM;
4397
4398 count = generic_write_checks(iocb, from);
4399 if (count <= 0)
4400 return count;
4401
4402 err = file_modified(file);
4403 if (err)
4404 return err;
4405 return count;
4406}
4407
3d697a4a
EB
4408/*
4409 * Preallocate blocks for a write request, if it is possible and helpful to do
4410 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4411 * blocks were preallocated, or a negative errno value if something went
4412 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4413 * requested blocks (not just some of them) have been allocated.
4414 */
a1e09b03
EB
4415static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4416 bool dio)
3d697a4a
EB
4417{
4418 struct inode *inode = file_inode(iocb->ki_filp);
4419 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4420 const loff_t pos = iocb->ki_pos;
4421 const size_t count = iov_iter_count(iter);
4422 struct f2fs_map_blocks map = {};
3d697a4a
EB
4423 int flag;
4424 int ret;
4425
4426 /* If it will be an out-of-place direct write, don't bother. */
4427 if (dio && f2fs_lfs_mode(sbi))
4428 return 0;
d4dd19ec
JK
4429 /*
4430 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4431 * buffered IO, if DIO meets any holes.
4432 */
4433 if (dio && i_size_read(inode) &&
4434 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4435 return 0;
3d697a4a
EB
4436
4437 /* No-wait I/O can't allocate blocks. */
4438 if (iocb->ki_flags & IOCB_NOWAIT)
4439 return 0;
4440
4441 /* If it will be a short write, don't bother. */
4442 if (fault_in_iov_iter_readable(iter, count))
4443 return 0;
4444
4445 if (f2fs_has_inline_data(inode)) {
4446 /* If the data will fit inline, don't bother. */
4447 if (pos + count <= MAX_INLINE_DATA(inode))
4448 return 0;
4449 ret = f2fs_convert_inline_inode(inode);
4450 if (ret)
4451 return ret;
4452 }
4453
4454 /* Do not preallocate blocks that will be written partially in 4KB. */
4455 map.m_lblk = F2FS_BLK_ALIGN(pos);
4456 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4457 if (map.m_len > map.m_lblk)
4458 map.m_len -= map.m_lblk;
4459 else
4460 map.m_len = 0;
4461 map.m_may_create = true;
4462 if (dio) {
4463 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4464 flag = F2FS_GET_BLOCK_PRE_DIO;
4465 } else {
4466 map.m_seg_type = NO_CHECK_TYPE;
4467 flag = F2FS_GET_BLOCK_PRE_AIO;
4468 }
4469
cd8fc522 4470 ret = f2fs_map_blocks(inode, &map, flag);
d4dd19ec
JK
4471 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4472 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
3d697a4a
EB
4473 return ret;
4474 if (ret == 0)
4475 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4476 return map.m_len;
4477}
4478
a1e09b03
EB
4479static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4480 struct iov_iter *from)
fcc85a4d 4481{
b439b103
JK
4482 struct file *file = iocb->ki_filp;
4483 struct inode *inode = file_inode(file);
a1e09b03
EB
4484 ssize_t ret;
4485
4486 if (iocb->ki_flags & IOCB_NOWAIT)
4487 return -EOPNOTSUPP;
4488
4489 current->backing_dev_info = inode_to_bdi(inode);
800ba295 4490 ret = generic_perform_write(iocb, from);
a1e09b03
EB
4491 current->backing_dev_info = NULL;
4492
4493 if (ret > 0) {
4494 iocb->ki_pos += ret;
34a23525
CY
4495 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4496 APP_BUFFERED_IO, ret);
a1e09b03
EB
4497 }
4498 return ret;
4499}
4500
4501static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4502 unsigned int flags)
4503{
4504 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4505
4506 dec_page_count(sbi, F2FS_DIO_WRITE);
4507 if (error)
4508 return error;
34a23525 4509 f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
a1e09b03
EB
4510 return 0;
4511}
4512
4513static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4514 .end_io = f2fs_dio_write_end_io,
4515};
4516
4517static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4518 bool *may_need_sync)
4519{
4520 struct file *file = iocb->ki_filp;
4521 struct inode *inode = file_inode(file);
4522 struct f2fs_inode_info *fi = F2FS_I(inode);
4523 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4524 const bool do_opu = f2fs_lfs_mode(sbi);
a1e09b03
EB
4525 const loff_t pos = iocb->ki_pos;
4526 const ssize_t count = iov_iter_count(from);
a1e09b03
EB
4527 unsigned int dio_flags;
4528 struct iomap_dio *dio;
4529 ssize_t ret;
4530
bd984c03 4531 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
a1e09b03
EB
4532
4533 if (iocb->ki_flags & IOCB_NOWAIT) {
4534 /* f2fs_convert_inline_inode() and block allocation can block */
4535 if (f2fs_has_inline_data(inode) ||
4536 !f2fs_overwrite_io(inode, pos, count)) {
4537 ret = -EAGAIN;
4538 goto out;
4539 }
4540
e4544b63 4541 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
a1e09b03
EB
4542 ret = -EAGAIN;
4543 goto out;
4544 }
e4544b63
TM
4545 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4546 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
a1e09b03
EB
4547 ret = -EAGAIN;
4548 goto out;
4549 }
4550 } else {
4551 ret = f2fs_convert_inline_inode(inode);
4552 if (ret)
4553 goto out;
4554
e4544b63 4555 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
a1e09b03 4556 if (do_opu)
e4544b63 4557 f2fs_down_read(&fi->i_gc_rwsem[READ]);
a1e09b03 4558 }
a1e09b03
EB
4559
4560 /*
4561 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4562 * the higher-level function iomap_dio_rw() in order to ensure that the
4563 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4564 */
4565 inc_page_count(sbi, F2FS_DIO_WRITE);
4566 dio_flags = 0;
4567 if (pos + count > inode->i_size)
4568 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4569 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
786f847f 4570 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
a1e09b03
EB
4571 if (IS_ERR_OR_NULL(dio)) {
4572 ret = PTR_ERR_OR_ZERO(dio);
4573 if (ret == -ENOTBLK)
4574 ret = 0;
4575 if (ret != -EIOCBQUEUED)
4576 dec_page_count(sbi, F2FS_DIO_WRITE);
4577 } else {
4578 ret = iomap_dio_complete(dio);
4579 }
4580
a1e09b03 4581 if (do_opu)
e4544b63
TM
4582 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4583 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
a1e09b03
EB
4584
4585 if (ret < 0)
4586 goto out;
4587 if (pos + ret > inode->i_size)
4588 f2fs_i_size_write(inode, pos + ret);
4589 if (!do_opu)
4590 set_inode_flag(inode, FI_UPDATE_WRITE);
4591
4592 if (iov_iter_count(from)) {
4593 ssize_t ret2;
4594 loff_t bufio_start_pos = iocb->ki_pos;
4595
4596 /*
4597 * The direct write was partial, so we need to fall back to a
4598 * buffered write for the remainder.
4599 */
4600
4601 ret2 = f2fs_buffered_write_iter(iocb, from);
4602 if (iov_iter_count(from))
4603 f2fs_write_failed(inode, iocb->ki_pos);
4604 if (ret2 < 0)
4605 goto out;
4606
4607 /*
4608 * Ensure that the pagecache pages are written to disk and
4609 * invalidated to preserve the expected O_DIRECT semantics.
4610 */
4611 if (ret2 > 0) {
4612 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4613
4614 ret += ret2;
4615
4616 ret2 = filemap_write_and_wait_range(file->f_mapping,
4617 bufio_start_pos,
4618 bufio_end_pos);
4619 if (ret2 < 0)
4620 goto out;
4621 invalidate_mapping_pages(file->f_mapping,
4622 bufio_start_pos >> PAGE_SHIFT,
4623 bufio_end_pos >> PAGE_SHIFT);
4624 }
4625 } else {
4626 /* iomap_dio_rw() already handled the generic_write_sync(). */
4627 *may_need_sync = false;
4628 }
4629out:
4630 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4631 return ret;
4632}
4633
4634static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4635{
4636 struct inode *inode = file_inode(iocb->ki_filp);
ccf7cf92
EB
4637 const loff_t orig_pos = iocb->ki_pos;
4638 const size_t orig_count = iov_iter_count(from);
3d697a4a 4639 loff_t target_size;
a1e09b03
EB
4640 bool dio;
4641 bool may_need_sync = true;
3d697a4a 4642 int preallocated;
b439b103 4643 ssize_t ret;
fcc85a4d 4644
126ce721
CY
4645 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4646 ret = -EIO;
4647 goto out;
4648 }
1f227a3e 4649
7bd29358
CY
4650 if (!f2fs_is_compress_backend_ready(inode)) {
4651 ret = -EOPNOTSUPP;
4652 goto out;
4653 }
4c8ff709 4654
cb8434f1
GR
4655 if (iocb->ki_flags & IOCB_NOWAIT) {
4656 if (!inode_trylock(inode)) {
126ce721
CY
4657 ret = -EAGAIN;
4658 goto out;
4659 }
cb8434f1 4660 } else {
b91050a8
HL
4661 inode_lock(inode);
4662 }
4663
a1e09b03 4664 ret = f2fs_write_checks(iocb, from);
b31bf0f9
EB
4665 if (ret <= 0)
4666 goto out_unlock;
4667
a1e09b03
EB
4668 /* Determine whether we will do a direct write or a buffered write. */
4669 dio = f2fs_should_use_dio(inode, iocb, from);
3d697a4a 4670
b31bf0f9
EB
4671 /* Possibly preallocate the blocks for the write. */
4672 target_size = iocb->ki_pos + iov_iter_count(from);
a1e09b03 4673 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
c277f141 4674 if (preallocated < 0) {
b31bf0f9 4675 ret = preallocated;
c277f141
JK
4676 } else {
4677 if (trace_f2fs_datawrite_start_enabled()) {
4678 char *p = f2fs_kmalloc(F2FS_I_SB(inode),
4679 PATH_MAX, GFP_KERNEL);
4680 char *path;
4681
4682 if (!p)
4683 goto skip_write_trace;
4684 path = dentry_path_raw(file_dentry(iocb->ki_filp),
4685 p, PATH_MAX);
4686 if (IS_ERR(path)) {
4687 kfree(p);
4688 goto skip_write_trace;
4689 }
4690 trace_f2fs_datawrite_start(inode, orig_pos, orig_count,
4691 current->pid, path, current->comm);
4692 kfree(p);
4693 }
4694skip_write_trace:
a1e09b03
EB
4695 /* Do the actual write. */
4696 ret = dio ?
544b53da 4697 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
a1e09b03 4698 f2fs_buffered_write_iter(iocb, from);
dc7a10dd 4699
c277f141
JK
4700 if (trace_f2fs_datawrite_end_enabled())
4701 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4702 }
4703
b31bf0f9 4704 /* Don't leave any preallocated blocks around past i_size. */
d4dd19ec 4705 if (preallocated && i_size_read(inode) < target_size) {
e4544b63 4706 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
b31bf0f9 4707 filemap_invalidate_lock(inode->i_mapping);
d4dd19ec
JK
4708 if (!f2fs_truncate(inode))
4709 file_dont_truncate(inode);
b31bf0f9 4710 filemap_invalidate_unlock(inode->i_mapping);
e4544b63 4711 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
d4dd19ec
JK
4712 } else {
4713 file_dont_truncate(inode);
b439b103 4714 }
b31bf0f9
EB
4715
4716 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
3d697a4a 4717out_unlock:
b439b103 4718 inode_unlock(inode);
126ce721 4719out:
ccf7cf92 4720 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
a1e09b03 4721 if (ret > 0 && may_need_sync)
e2592217 4722 ret = generic_write_sync(iocb, ret);
b439b103 4723 return ret;
fcc85a4d
JK
4724}
4725
0f6b56ec
DJ
4726static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4727 int advice)
4728{
0f6b56ec
DJ
4729 struct address_space *mapping;
4730 struct backing_dev_info *bdi;
e64347ae
FC
4731 struct inode *inode = file_inode(filp);
4732 int err;
0f6b56ec
DJ
4733
4734 if (advice == POSIX_FADV_SEQUENTIAL) {
0f6b56ec
DJ
4735 if (S_ISFIFO(inode->i_mode))
4736 return -ESPIPE;
4737
4738 mapping = filp->f_mapping;
4739 if (!mapping || len < 0)
4740 return -EINVAL;
4741
4742 bdi = inode_to_bdi(mapping->host);
4743 filp->f_ra.ra_pages = bdi->ra_pages *
4744 F2FS_I_SB(inode)->seq_file_ra_mul;
4745 spin_lock(&filp->f_lock);
4746 filp->f_mode &= ~FMODE_RANDOM;
4747 spin_unlock(&filp->f_lock);
4748 return 0;
4749 }
4750
e64347ae
FC
4751 err = generic_fadvise(filp, offset, len, advice);
4752 if (!err && advice == POSIX_FADV_DONTNEED &&
4753 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4754 f2fs_compressed_file(inode))
4755 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4756
4757 return err;
0f6b56ec
DJ
4758}
4759
e9750824 4760#ifdef CONFIG_COMPAT
34178b1b
CY
4761struct compat_f2fs_gc_range {
4762 u32 sync;
4763 compat_u64 start;
4764 compat_u64 len;
4765};
4766#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4767 struct compat_f2fs_gc_range)
4768
4769static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4770{
4771 struct compat_f2fs_gc_range __user *urange;
4772 struct f2fs_gc_range range;
4773 int err;
4774
4775 urange = compat_ptr(arg);
4776 err = get_user(range.sync, &urange->sync);
4777 err |= get_user(range.start, &urange->start);
4778 err |= get_user(range.len, &urange->len);
4779 if (err)
4780 return -EFAULT;
4781
4782 return __f2fs_ioc_gc_range(file, &range);
4783}
4784
4785struct compat_f2fs_move_range {
4786 u32 dst_fd;
4787 compat_u64 pos_in;
4788 compat_u64 pos_out;
4789 compat_u64 len;
4790};
4791#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4792 struct compat_f2fs_move_range)
4793
4794static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4795{
4796 struct compat_f2fs_move_range __user *urange;
4797 struct f2fs_move_range range;
4798 int err;
4799
4800 urange = compat_ptr(arg);
4801 err = get_user(range.dst_fd, &urange->dst_fd);
4802 err |= get_user(range.pos_in, &urange->pos_in);
4803 err |= get_user(range.pos_out, &urange->pos_out);
4804 err |= get_user(range.len, &urange->len);
4805 if (err)
4806 return -EFAULT;
4807
4808 return __f2fs_ioc_move_range(file, &range);
4809}
4810
e9750824
NJ
4811long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4812{
34178b1b
CY
4813 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4814 return -EIO;
4815 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4816 return -ENOSPC;
4817
e9750824 4818 switch (cmd) {
3357af8f
EB
4819 case FS_IOC32_GETVERSION:
4820 cmd = FS_IOC_GETVERSION;
04ef4b62 4821 break;
34178b1b
CY
4822 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4823 return f2fs_compat_ioc_gc_range(file, arg);
4824 case F2FS_IOC32_MOVE_RANGE:
4825 return f2fs_compat_ioc_move_range(file, arg);
04ef4b62
CY
4826 case F2FS_IOC_START_ATOMIC_WRITE:
4827 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4828 case F2FS_IOC_START_VOLATILE_WRITE:
4829 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
23339e57 4830 case F2FS_IOC_ABORT_ATOMIC_WRITE:
04ef4b62 4831 case F2FS_IOC_SHUTDOWN:
314999dc 4832 case FITRIM:
3357af8f
EB
4833 case FS_IOC_SET_ENCRYPTION_POLICY:
4834 case FS_IOC_GET_ENCRYPTION_PWSALT:
4835 case FS_IOC_GET_ENCRYPTION_POLICY:
8ce589c7
EB
4836 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4837 case FS_IOC_ADD_ENCRYPTION_KEY:
4838 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4839 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4840 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
ee446e1a 4841 case FS_IOC_GET_ENCRYPTION_NONCE:
04ef4b62
CY
4842 case F2FS_IOC_GARBAGE_COLLECT:
4843 case F2FS_IOC_WRITE_CHECKPOINT:
4844 case F2FS_IOC_DEFRAGMENT:
e066b83c 4845 case F2FS_IOC_FLUSH_DEVICE:
e65ef207 4846 case F2FS_IOC_GET_FEATURES:
1ad71a27
JK
4847 case F2FS_IOC_GET_PIN_FILE:
4848 case F2FS_IOC_SET_PIN_FILE:
c4020b2d 4849 case F2FS_IOC_PRECACHE_EXTENTS:
04f0b2ea 4850 case F2FS_IOC_RESIZE_FS:
95ae251f
EB
4851 case FS_IOC_ENABLE_VERITY:
4852 case FS_IOC_MEASURE_VERITY:
e17fe657 4853 case FS_IOC_READ_VERITY_METADATA:
3357af8f
EB
4854 case FS_IOC_GETFSLABEL:
4855 case FS_IOC_SETFSLABEL:
439dfb10 4856 case F2FS_IOC_GET_COMPRESS_BLOCKS:
ef8d563f 4857 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
c75488fb 4858 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
9af84648 4859 case F2FS_IOC_SEC_TRIM_FILE:
9e2a5f8c 4860 case F2FS_IOC_GET_COMPRESS_OPTION:
e1e8debe 4861 case F2FS_IOC_SET_COMPRESS_OPTION:
5fdb322f
DJ
4862 case F2FS_IOC_DECOMPRESS_FILE:
4863 case F2FS_IOC_COMPRESS_FILE:
4dd6f977 4864 break;
e9750824
NJ
4865 default:
4866 return -ENOIOCTLCMD;
4867 }
34178b1b 4868 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
e9750824
NJ
4869}
4870#endif
4871
fbfa2cc5 4872const struct file_operations f2fs_file_operations = {
267378d4 4873 .llseek = f2fs_llseek,
4c8ff709 4874 .read_iter = f2fs_file_read_iter,
fcc85a4d
JK
4875 .write_iter = f2fs_file_write_iter,
4876 .open = f2fs_file_open,
12662234 4877 .release = f2fs_release_file,
fbfa2cc5 4878 .mmap = f2fs_file_mmap,
7a10f017 4879 .flush = f2fs_file_flush,
fbfa2cc5
JK
4880 .fsync = f2fs_sync_file,
4881 .fallocate = f2fs_fallocate,
4882 .unlocked_ioctl = f2fs_ioctl,
e9750824
NJ
4883#ifdef CONFIG_COMPAT
4884 .compat_ioctl = f2fs_compat_ioctl,
4885#endif
fbfa2cc5 4886 .splice_read = generic_file_splice_read,
8d020765 4887 .splice_write = iter_file_splice_write,
0f6b56ec 4888 .fadvise = f2fs_file_fadvise,
fbfa2cc5 4889};