1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fileattr.h>
26 #include <linux/fadvise.h>
27 #include <linux/iomap.h>
36 #include <trace/events/f2fs.h>
37 #include <uapi/linux/f2fs.h>
39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
41 struct inode *inode = file_inode(vmf->vma->vm_file);
44 ret = filemap_fault(vmf);
46 f2fs_update_iostat(F2FS_I_SB(inode), inode,
47 APP_MAPPED_READ_IO, F2FS_BLKSIZE);
49 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
56 struct page *page = vmf->page;
57 struct inode *inode = file_inode(vmf->vma->vm_file);
58 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 struct dnode_of_data dn;
60 bool need_alloc = true;
63 if (unlikely(IS_IMMUTABLE(inode)))
64 return VM_FAULT_SIGBUS;
66 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
67 return VM_FAULT_SIGBUS;
69 if (unlikely(f2fs_cp_error(sbi))) {
74 if (!f2fs_is_checkpoint_ready(sbi)) {
79 err = f2fs_convert_inline_inode(inode);
83 #ifdef CONFIG_F2FS_FS_COMPRESSION
84 if (f2fs_compressed_file(inode)) {
85 int ret = f2fs_is_compressed_cluster(inode, page->index);
95 /* should do out of any locked page */
97 f2fs_balance_fs(sbi, true);
99 sb_start_pagefault(inode->i_sb);
101 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
103 file_update_time(vmf->vma->vm_file);
104 filemap_invalidate_lock_shared(inode->i_mapping);
106 if (unlikely(page->mapping != inode->i_mapping ||
107 page_offset(page) > i_size_read(inode) ||
108 !PageUptodate(page))) {
115 /* block allocation */
116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
117 set_new_dnode(&dn, inode, NULL, NULL, 0);
118 err = f2fs_get_block(&dn, page->index);
119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
122 #ifdef CONFIG_F2FS_FS_COMPRESSION
124 set_new_dnode(&dn, inode, NULL, NULL, 0);
125 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
134 f2fs_wait_on_page_writeback(page, DATA, false, true);
136 /* wait for GCed page writeback via META_MAPPING */
137 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
140 * check to see if the page is mapped already (no holes)
142 if (PageMappedToDisk(page))
145 /* page is wholly or partially inside EOF */
146 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
147 i_size_read(inode)) {
150 offset = i_size_read(inode) & ~PAGE_MASK;
151 zero_user_segment(page, offset, PAGE_SIZE);
153 set_page_dirty(page);
154 if (!PageUptodate(page))
155 SetPageUptodate(page);
157 f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE);
158 f2fs_update_time(sbi, REQ_TIME);
160 trace_f2fs_vm_page_mkwrite(page, DATA);
162 filemap_invalidate_unlock_shared(inode->i_mapping);
164 sb_end_pagefault(inode->i_sb);
166 return block_page_mkwrite_return(err);
169 static const struct vm_operations_struct f2fs_file_vm_ops = {
170 .fault = f2fs_filemap_fault,
171 .map_pages = filemap_map_pages,
172 .page_mkwrite = f2fs_vm_page_mkwrite,
175 static int get_parent_ino(struct inode *inode, nid_t *pino)
177 struct dentry *dentry;
180 * Make sure to get the non-deleted alias. The alias associated with
181 * the open file descriptor being fsync()'ed may be deleted already.
183 dentry = d_find_alias(inode);
187 *pino = parent_ino(dentry);
192 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
195 enum cp_reason_type cp_reason = CP_NO_NEEDED;
197 if (!S_ISREG(inode->i_mode))
198 cp_reason = CP_NON_REGULAR;
199 else if (f2fs_compressed_file(inode))
200 cp_reason = CP_COMPRESSED;
201 else if (inode->i_nlink != 1)
202 cp_reason = CP_HARDLINK;
203 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
204 cp_reason = CP_SB_NEED_CP;
205 else if (file_wrong_pino(inode))
206 cp_reason = CP_WRONG_PINO;
207 else if (!f2fs_space_for_roll_forward(sbi))
208 cp_reason = CP_NO_SPC_ROLL;
209 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
210 cp_reason = CP_NODE_NEED_CP;
211 else if (test_opt(sbi, FASTBOOT))
212 cp_reason = CP_FASTBOOT_MODE;
213 else if (F2FS_OPTION(sbi).active_logs == 2)
214 cp_reason = CP_SPEC_LOG_NUM;
215 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
216 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
217 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
219 cp_reason = CP_RECOVER_DIR;
224 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
226 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
228 /* But we need to avoid that there are some inode updates */
229 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
235 static void try_to_fix_pino(struct inode *inode)
237 struct f2fs_inode_info *fi = F2FS_I(inode);
240 f2fs_down_write(&fi->i_sem);
241 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
242 get_parent_ino(inode, &pino)) {
243 f2fs_i_pino_write(inode, pino);
244 file_got_pino(inode);
246 f2fs_up_write(&fi->i_sem);
249 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
250 int datasync, bool atomic)
252 struct inode *inode = file->f_mapping->host;
253 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
254 nid_t ino = inode->i_ino;
256 enum cp_reason_type cp_reason = 0;
257 struct writeback_control wbc = {
258 .sync_mode = WB_SYNC_ALL,
259 .nr_to_write = LONG_MAX,
262 unsigned int seq_id = 0;
264 if (unlikely(f2fs_readonly(inode->i_sb)))
267 trace_f2fs_sync_file_enter(inode);
269 if (S_ISDIR(inode->i_mode))
272 /* if fdatasync is triggered, let's do in-place-update */
273 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
274 set_inode_flag(inode, FI_NEED_IPU);
275 ret = file_write_and_wait_range(file, start, end);
276 clear_inode_flag(inode, FI_NEED_IPU);
278 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
279 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
283 /* if the inode is dirty, let's recover all the time */
284 if (!f2fs_skip_inode_update(inode, datasync)) {
285 f2fs_write_inode(inode, NULL);
290 * if there is no written data, don't waste time to write recovery info.
292 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
293 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
295 /* it may call write_inode just prior to fsync */
296 if (need_inode_page_update(sbi, ino))
299 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
300 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
305 * for OPU case, during fsync(), node can be persisted before
306 * data when lower device doesn't support write barrier, result
307 * in data corruption after SPO.
308 * So for strict fsync mode, force to use atomic write sematics
309 * to keep write order in between data/node and last node to
310 * avoid potential data corruption.
312 if (F2FS_OPTION(sbi).fsync_mode ==
313 FSYNC_MODE_STRICT && !atomic)
318 * Both of fdatasync() and fsync() are able to be recovered from
321 f2fs_down_read(&F2FS_I(inode)->i_sem);
322 cp_reason = need_do_checkpoint(inode);
323 f2fs_up_read(&F2FS_I(inode)->i_sem);
326 /* all the dirty node pages should be flushed for POR */
327 ret = f2fs_sync_fs(inode->i_sb, 1);
330 * We've secured consistency through sync_fs. Following pino
331 * will be used only for fsynced inodes after checkpoint.
333 try_to_fix_pino(inode);
334 clear_inode_flag(inode, FI_APPEND_WRITE);
335 clear_inode_flag(inode, FI_UPDATE_WRITE);
339 atomic_inc(&sbi->wb_sync_req[NODE]);
340 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
341 atomic_dec(&sbi->wb_sync_req[NODE]);
345 /* if cp_error was enabled, we should avoid infinite loop */
346 if (unlikely(f2fs_cp_error(sbi))) {
351 if (f2fs_need_inode_block_update(sbi, ino)) {
352 f2fs_mark_inode_dirty_sync(inode, true);
353 f2fs_write_inode(inode, NULL);
358 * If it's atomic_write, it's just fine to keep write ordering. So
359 * here we don't need to wait for node write completion, since we use
360 * node chain which serializes node blocks. If one of node writes are
361 * reordered, we can see simply broken chain, resulting in stopping
362 * roll-forward recovery. It means we'll recover all or none node blocks
366 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
371 /* once recovery info is written, don't need to tack this */
372 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
373 clear_inode_flag(inode, FI_APPEND_WRITE);
375 if ((!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ||
376 (atomic && !test_opt(sbi, NOBARRIER) && f2fs_sb_has_blkzoned(sbi)))
377 ret = f2fs_issue_flush(sbi, inode->i_ino);
379 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
380 clear_inode_flag(inode, FI_UPDATE_WRITE);
381 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
383 f2fs_update_time(sbi, REQ_TIME);
385 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
389 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
391 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
393 return f2fs_do_sync_file(file, start, end, datasync, false);
396 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
397 pgoff_t index, int whence)
401 if (__is_valid_data_blkaddr(blkaddr))
403 if (blkaddr == NEW_ADDR &&
404 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
408 if (blkaddr == NULL_ADDR)
415 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
417 struct inode *inode = file->f_mapping->host;
418 loff_t maxbytes = inode->i_sb->s_maxbytes;
419 struct dnode_of_data dn;
420 pgoff_t pgofs, end_offset;
421 loff_t data_ofs = offset;
427 isize = i_size_read(inode);
431 /* handle inline data case */
432 if (f2fs_has_inline_data(inode)) {
433 if (whence == SEEK_HOLE) {
436 } else if (whence == SEEK_DATA) {
442 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
444 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
445 set_new_dnode(&dn, inode, NULL, NULL, 0);
446 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
447 if (err && err != -ENOENT) {
449 } else if (err == -ENOENT) {
450 /* direct node does not exists */
451 if (whence == SEEK_DATA) {
452 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
459 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
461 /* find data/hole in dnode block */
462 for (; dn.ofs_in_node < end_offset;
463 dn.ofs_in_node++, pgofs++,
464 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
467 blkaddr = f2fs_data_blkaddr(&dn);
469 if (__is_valid_data_blkaddr(blkaddr) &&
470 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
471 blkaddr, DATA_GENERIC_ENHANCE)) {
476 if (__found_offset(file->f_mapping, blkaddr,
485 if (whence == SEEK_DATA)
488 if (whence == SEEK_HOLE && data_ofs > isize)
491 return vfs_setpos(file, data_ofs, maxbytes);
497 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
499 struct inode *inode = file->f_mapping->host;
500 loff_t maxbytes = inode->i_sb->s_maxbytes;
502 if (f2fs_compressed_file(inode))
503 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
509 return generic_file_llseek_size(file, offset, whence,
510 maxbytes, i_size_read(inode));
515 return f2fs_seek_block(file, offset, whence);
521 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
523 struct inode *inode = file_inode(file);
525 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
528 if (!f2fs_is_compress_backend_ready(inode))
532 vma->vm_ops = &f2fs_file_vm_ops;
533 set_inode_flag(inode, FI_MMAP_FILE);
537 static int f2fs_file_open(struct inode *inode, struct file *filp)
539 int err = fscrypt_file_open(inode, filp);
544 if (!f2fs_is_compress_backend_ready(inode))
547 err = fsverity_file_open(inode, filp);
551 filp->f_mode |= FMODE_NOWAIT;
553 return dquot_file_open(inode, filp);
556 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
558 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
559 struct f2fs_node *raw_node;
560 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
563 bool compressed_cluster = false;
564 int cluster_index = 0, valid_blocks = 0;
565 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
566 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
568 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
569 base = get_extra_isize(dn->inode);
571 raw_node = F2FS_NODE(dn->node_page);
572 addr = blkaddr_in_node(raw_node) + base + ofs;
574 /* Assumption: truncation starts with cluster */
575 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
576 block_t blkaddr = le32_to_cpu(*addr);
578 if (f2fs_compressed_file(dn->inode) &&
579 !(cluster_index & (cluster_size - 1))) {
580 if (compressed_cluster)
581 f2fs_i_compr_blocks_update(dn->inode,
582 valid_blocks, false);
583 compressed_cluster = (blkaddr == COMPRESS_ADDR);
587 if (blkaddr == NULL_ADDR)
590 dn->data_blkaddr = NULL_ADDR;
591 f2fs_set_data_blkaddr(dn);
593 if (__is_valid_data_blkaddr(blkaddr)) {
594 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
595 DATA_GENERIC_ENHANCE))
597 if (compressed_cluster)
601 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
602 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
604 f2fs_invalidate_blocks(sbi, blkaddr);
606 if (!released || blkaddr != COMPRESS_ADDR)
610 if (compressed_cluster)
611 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
616 * once we invalidate valid blkaddr in range [ofs, ofs + count],
617 * we will invalidate all blkaddr in the whole range.
619 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
621 f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
622 f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
623 dec_valid_block_count(sbi, dn->inode, nr_free);
625 dn->ofs_in_node = ofs;
627 f2fs_update_time(sbi, REQ_TIME);
628 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
629 dn->ofs_in_node, nr_free);
632 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
634 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
637 static int truncate_partial_data_page(struct inode *inode, u64 from,
640 loff_t offset = from & (PAGE_SIZE - 1);
641 pgoff_t index = from >> PAGE_SHIFT;
642 struct address_space *mapping = inode->i_mapping;
645 if (!offset && !cache_only)
649 page = find_lock_page(mapping, index);
650 if (page && PageUptodate(page))
652 f2fs_put_page(page, 1);
656 page = f2fs_get_lock_data_page(inode, index, true);
658 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
660 f2fs_wait_on_page_writeback(page, DATA, true, true);
661 zero_user(page, offset, PAGE_SIZE - offset);
663 /* An encrypted inode should have a key and truncate the last page. */
664 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
666 set_page_dirty(page);
667 f2fs_put_page(page, 1);
671 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
673 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
674 struct dnode_of_data dn;
676 int count = 0, err = 0;
678 bool truncate_page = false;
680 trace_f2fs_truncate_blocks_enter(inode, from);
682 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
684 if (free_from >= max_file_blocks(inode))
690 ipage = f2fs_get_node_page(sbi, inode->i_ino);
692 err = PTR_ERR(ipage);
696 if (f2fs_has_inline_data(inode)) {
697 f2fs_truncate_inline_inode(inode, ipage, from);
698 f2fs_put_page(ipage, 1);
699 truncate_page = true;
703 set_new_dnode(&dn, inode, ipage, NULL, 0);
704 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
711 count = ADDRS_PER_PAGE(dn.node_page, inode);
713 count -= dn.ofs_in_node;
714 f2fs_bug_on(sbi, count < 0);
716 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
717 f2fs_truncate_data_blocks_range(&dn, count);
723 err = f2fs_truncate_inode_blocks(inode, free_from);
728 /* lastly zero out the first data page */
730 err = truncate_partial_data_page(inode, from, truncate_page);
732 trace_f2fs_truncate_blocks_exit(inode, err);
736 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
738 u64 free_from = from;
741 #ifdef CONFIG_F2FS_FS_COMPRESSION
743 * for compressed file, only support cluster size
744 * aligned truncation.
746 if (f2fs_compressed_file(inode))
747 free_from = round_up(from,
748 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
751 err = f2fs_do_truncate_blocks(inode, free_from, lock);
755 #ifdef CONFIG_F2FS_FS_COMPRESSION
757 * For compressed file, after release compress blocks, don't allow write
758 * direct, but we should allow write direct after truncate to zero.
760 if (f2fs_compressed_file(inode) && !free_from
761 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
762 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
764 if (from != free_from) {
765 err = f2fs_truncate_partial_cluster(inode, from, lock);
774 int f2fs_truncate(struct inode *inode)
778 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
781 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
782 S_ISLNK(inode->i_mode)))
785 trace_f2fs_truncate(inode);
787 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
788 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
792 err = f2fs_dquot_initialize(inode);
796 /* we should check inline_data size */
797 if (!f2fs_may_inline_data(inode)) {
798 err = f2fs_convert_inline_inode(inode);
803 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
807 inode->i_mtime = inode->i_ctime = current_time(inode);
808 f2fs_mark_inode_dirty_sync(inode, false);
812 static bool f2fs_force_buffered_io(struct inode *inode, int rw)
814 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
816 if (!fscrypt_dio_supported(inode))
818 if (fsverity_active(inode))
820 if (f2fs_compressed_file(inode))
823 /* disallow direct IO if any of devices has unaligned blksize */
824 if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
827 * for blkzoned device, fallback direct IO to buffered IO, so
828 * all IOs can be serialized by log-structured write.
830 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
832 if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
834 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
840 int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path,
841 struct kstat *stat, u32 request_mask, unsigned int query_flags)
843 struct inode *inode = d_inode(path->dentry);
844 struct f2fs_inode_info *fi = F2FS_I(inode);
845 struct f2fs_inode *ri = NULL;
848 if (f2fs_has_extra_attr(inode) &&
849 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
850 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
851 stat->result_mask |= STATX_BTIME;
852 stat->btime.tv_sec = fi->i_crtime.tv_sec;
853 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
857 * Return the DIO alignment restrictions if requested. We only return
858 * this information when requested, since on encrypted files it might
859 * take a fair bit of work to get if the file wasn't opened recently.
861 * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN
862 * cannot represent that, so in that case we report no DIO support.
864 if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) {
865 unsigned int bsize = i_blocksize(inode);
867 stat->result_mask |= STATX_DIOALIGN;
868 if (!f2fs_force_buffered_io(inode, WRITE)) {
869 stat->dio_mem_align = bsize;
870 stat->dio_offset_align = bsize;
875 if (flags & F2FS_COMPR_FL)
876 stat->attributes |= STATX_ATTR_COMPRESSED;
877 if (flags & F2FS_APPEND_FL)
878 stat->attributes |= STATX_ATTR_APPEND;
879 if (IS_ENCRYPTED(inode))
880 stat->attributes |= STATX_ATTR_ENCRYPTED;
881 if (flags & F2FS_IMMUTABLE_FL)
882 stat->attributes |= STATX_ATTR_IMMUTABLE;
883 if (flags & F2FS_NODUMP_FL)
884 stat->attributes |= STATX_ATTR_NODUMP;
885 if (IS_VERITY(inode))
886 stat->attributes |= STATX_ATTR_VERITY;
888 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
890 STATX_ATTR_ENCRYPTED |
891 STATX_ATTR_IMMUTABLE |
895 generic_fillattr(idmap, inode, stat);
897 /* we need to show initial sectors used for inline_data/dentries */
898 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
899 f2fs_has_inline_dentry(inode))
900 stat->blocks += (stat->size + 511) >> 9;
905 #ifdef CONFIG_F2FS_FS_POSIX_ACL
906 static void __setattr_copy(struct mnt_idmap *idmap,
907 struct inode *inode, const struct iattr *attr)
909 unsigned int ia_valid = attr->ia_valid;
910 struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
912 i_uid_update(mnt_userns, attr, inode);
913 i_gid_update(mnt_userns, attr, inode);
914 if (ia_valid & ATTR_ATIME)
915 inode->i_atime = attr->ia_atime;
916 if (ia_valid & ATTR_MTIME)
917 inode->i_mtime = attr->ia_mtime;
918 if (ia_valid & ATTR_CTIME)
919 inode->i_ctime = attr->ia_ctime;
920 if (ia_valid & ATTR_MODE) {
921 umode_t mode = attr->ia_mode;
922 vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
924 if (!vfsgid_in_group_p(vfsgid) &&
925 !capable_wrt_inode_uidgid(idmap, inode, CAP_FSETID))
927 set_acl_inode(inode, mode);
931 #define __setattr_copy setattr_copy
934 int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
937 struct user_namespace *mnt_userns = mnt_idmap_owner(idmap);
938 struct inode *inode = d_inode(dentry);
941 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
944 if (unlikely(IS_IMMUTABLE(inode)))
947 if (unlikely(IS_APPEND(inode) &&
948 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
949 ATTR_GID | ATTR_TIMES_SET))))
952 if ((attr->ia_valid & ATTR_SIZE) &&
953 !f2fs_is_compress_backend_ready(inode))
956 err = setattr_prepare(idmap, dentry, attr);
960 err = fscrypt_prepare_setattr(dentry, attr);
964 err = fsverity_prepare_setattr(dentry, attr);
968 if (is_quota_modification(mnt_userns, inode, attr)) {
969 err = f2fs_dquot_initialize(inode);
973 if (i_uid_needs_update(mnt_userns, attr, inode) ||
974 i_gid_needs_update(mnt_userns, attr, inode)) {
975 f2fs_lock_op(F2FS_I_SB(inode));
976 err = dquot_transfer(mnt_userns, inode, attr);
978 set_sbi_flag(F2FS_I_SB(inode),
979 SBI_QUOTA_NEED_REPAIR);
980 f2fs_unlock_op(F2FS_I_SB(inode));
984 * update uid/gid under lock_op(), so that dquot and inode can
985 * be updated atomically.
987 i_uid_update(mnt_userns, attr, inode);
988 i_gid_update(mnt_userns, attr, inode);
989 f2fs_mark_inode_dirty_sync(inode, true);
990 f2fs_unlock_op(F2FS_I_SB(inode));
993 if (attr->ia_valid & ATTR_SIZE) {
994 loff_t old_size = i_size_read(inode);
996 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
998 * should convert inline inode before i_size_write to
999 * keep smaller than inline_data size with inline flag.
1001 err = f2fs_convert_inline_inode(inode);
1006 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1007 filemap_invalidate_lock(inode->i_mapping);
1009 truncate_setsize(inode, attr->ia_size);
1011 if (attr->ia_size <= old_size)
1012 err = f2fs_truncate(inode);
1014 * do not trim all blocks after i_size if target size is
1015 * larger than i_size.
1017 filemap_invalidate_unlock(inode->i_mapping);
1018 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1022 spin_lock(&F2FS_I(inode)->i_size_lock);
1023 inode->i_mtime = inode->i_ctime = current_time(inode);
1024 F2FS_I(inode)->last_disk_size = i_size_read(inode);
1025 spin_unlock(&F2FS_I(inode)->i_size_lock);
1028 __setattr_copy(idmap, inode, attr);
1030 if (attr->ia_valid & ATTR_MODE) {
1031 err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode));
1033 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
1035 inode->i_mode = F2FS_I(inode)->i_acl_mode;
1036 clear_inode_flag(inode, FI_ACL_MODE);
1040 /* file size may changed here */
1041 f2fs_mark_inode_dirty_sync(inode, true);
1043 /* inode change will produce dirty node pages flushed by checkpoint */
1044 f2fs_balance_fs(F2FS_I_SB(inode), true);
1049 const struct inode_operations f2fs_file_inode_operations = {
1050 .getattr = f2fs_getattr,
1051 .setattr = f2fs_setattr,
1052 .get_inode_acl = f2fs_get_acl,
1053 .set_acl = f2fs_set_acl,
1054 .listxattr = f2fs_listxattr,
1055 .fiemap = f2fs_fiemap,
1056 .fileattr_get = f2fs_fileattr_get,
1057 .fileattr_set = f2fs_fileattr_set,
1060 static int fill_zero(struct inode *inode, pgoff_t index,
1061 loff_t start, loff_t len)
1063 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1069 f2fs_balance_fs(sbi, true);
1072 page = f2fs_get_new_data_page(inode, NULL, index, false);
1073 f2fs_unlock_op(sbi);
1076 return PTR_ERR(page);
1078 f2fs_wait_on_page_writeback(page, DATA, true, true);
1079 zero_user(page, start, len);
1080 set_page_dirty(page);
1081 f2fs_put_page(page, 1);
1085 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1089 while (pg_start < pg_end) {
1090 struct dnode_of_data dn;
1091 pgoff_t end_offset, count;
1093 set_new_dnode(&dn, inode, NULL, NULL, 0);
1094 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1096 if (err == -ENOENT) {
1097 pg_start = f2fs_get_next_page_offset(&dn,
1104 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1105 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1107 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1109 f2fs_truncate_data_blocks_range(&dn, count);
1110 f2fs_put_dnode(&dn);
1117 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1119 pgoff_t pg_start, pg_end;
1120 loff_t off_start, off_end;
1123 ret = f2fs_convert_inline_inode(inode);
1127 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1128 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1130 off_start = offset & (PAGE_SIZE - 1);
1131 off_end = (offset + len) & (PAGE_SIZE - 1);
1133 if (pg_start == pg_end) {
1134 ret = fill_zero(inode, pg_start, off_start,
1135 off_end - off_start);
1140 ret = fill_zero(inode, pg_start++, off_start,
1141 PAGE_SIZE - off_start);
1146 ret = fill_zero(inode, pg_end, 0, off_end);
1151 if (pg_start < pg_end) {
1152 loff_t blk_start, blk_end;
1153 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1155 f2fs_balance_fs(sbi, true);
1157 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1158 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1160 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1161 filemap_invalidate_lock(inode->i_mapping);
1163 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1166 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1167 f2fs_unlock_op(sbi);
1169 filemap_invalidate_unlock(inode->i_mapping);
1170 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1177 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1178 int *do_replace, pgoff_t off, pgoff_t len)
1180 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1181 struct dnode_of_data dn;
1185 set_new_dnode(&dn, inode, NULL, NULL, 0);
1186 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1187 if (ret && ret != -ENOENT) {
1189 } else if (ret == -ENOENT) {
1190 if (dn.max_level == 0)
1192 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1193 dn.ofs_in_node, len);
1199 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1200 dn.ofs_in_node, len);
1201 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1202 *blkaddr = f2fs_data_blkaddr(&dn);
1204 if (__is_valid_data_blkaddr(*blkaddr) &&
1205 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1206 DATA_GENERIC_ENHANCE)) {
1207 f2fs_put_dnode(&dn);
1208 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1209 return -EFSCORRUPTED;
1212 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1214 if (f2fs_lfs_mode(sbi)) {
1215 f2fs_put_dnode(&dn);
1219 /* do not invalidate this block address */
1220 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1224 f2fs_put_dnode(&dn);
1233 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1234 int *do_replace, pgoff_t off, int len)
1236 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1237 struct dnode_of_data dn;
1240 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1241 if (*do_replace == 0)
1244 set_new_dnode(&dn, inode, NULL, NULL, 0);
1245 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1247 dec_valid_block_count(sbi, inode, 1);
1248 f2fs_invalidate_blocks(sbi, *blkaddr);
1250 f2fs_update_data_blkaddr(&dn, *blkaddr);
1252 f2fs_put_dnode(&dn);
1257 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1258 block_t *blkaddr, int *do_replace,
1259 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1261 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1266 if (blkaddr[i] == NULL_ADDR && !full) {
1271 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1272 struct dnode_of_data dn;
1273 struct node_info ni;
1277 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1278 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1282 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1284 f2fs_put_dnode(&dn);
1288 ilen = min((pgoff_t)
1289 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1290 dn.ofs_in_node, len - i);
1292 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1293 f2fs_truncate_data_blocks_range(&dn, 1);
1295 if (do_replace[i]) {
1296 f2fs_i_blocks_write(src_inode,
1298 f2fs_i_blocks_write(dst_inode,
1300 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1301 blkaddr[i], ni.version, true, false);
1307 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1308 if (dst_inode->i_size < new_size)
1309 f2fs_i_size_write(dst_inode, new_size);
1310 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1312 f2fs_put_dnode(&dn);
1314 struct page *psrc, *pdst;
1316 psrc = f2fs_get_lock_data_page(src_inode,
1319 return PTR_ERR(psrc);
1320 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1323 f2fs_put_page(psrc, 1);
1324 return PTR_ERR(pdst);
1326 memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
1327 set_page_dirty(pdst);
1328 f2fs_put_page(pdst, 1);
1329 f2fs_put_page(psrc, 1);
1331 ret = f2fs_truncate_hole(src_inode,
1332 src + i, src + i + 1);
1341 static int __exchange_data_block(struct inode *src_inode,
1342 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1343 pgoff_t len, bool full)
1345 block_t *src_blkaddr;
1351 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1353 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1354 array_size(olen, sizeof(block_t)),
1359 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1360 array_size(olen, sizeof(int)),
1363 kvfree(src_blkaddr);
1367 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1368 do_replace, src, olen);
1372 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1373 do_replace, src, dst, olen, full);
1381 kvfree(src_blkaddr);
1387 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1388 kvfree(src_blkaddr);
1393 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1395 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1396 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1397 pgoff_t start = offset >> PAGE_SHIFT;
1398 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1401 f2fs_balance_fs(sbi, true);
1403 /* avoid gc operation during block exchange */
1404 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1405 filemap_invalidate_lock(inode->i_mapping);
1408 f2fs_drop_extent_tree(inode);
1409 truncate_pagecache(inode, offset);
1410 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1411 f2fs_unlock_op(sbi);
1413 filemap_invalidate_unlock(inode->i_mapping);
1414 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1418 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1423 if (offset + len >= i_size_read(inode))
1426 /* collapse range should be aligned to block size of f2fs. */
1427 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1430 ret = f2fs_convert_inline_inode(inode);
1434 /* write out all dirty pages from offset */
1435 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1439 ret = f2fs_do_collapse(inode, offset, len);
1443 /* write out all moved pages, if possible */
1444 filemap_invalidate_lock(inode->i_mapping);
1445 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1446 truncate_pagecache(inode, offset);
1448 new_size = i_size_read(inode) - len;
1449 ret = f2fs_truncate_blocks(inode, new_size, true);
1450 filemap_invalidate_unlock(inode->i_mapping);
1452 f2fs_i_size_write(inode, new_size);
1456 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1459 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1460 pgoff_t index = start;
1461 unsigned int ofs_in_node = dn->ofs_in_node;
1465 for (; index < end; index++, dn->ofs_in_node++) {
1466 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1470 dn->ofs_in_node = ofs_in_node;
1471 ret = f2fs_reserve_new_blocks(dn, count);
1475 dn->ofs_in_node = ofs_in_node;
1476 for (index = start; index < end; index++, dn->ofs_in_node++) {
1477 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1479 * f2fs_reserve_new_blocks will not guarantee entire block
1482 if (dn->data_blkaddr == NULL_ADDR) {
1487 if (dn->data_blkaddr == NEW_ADDR)
1490 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1491 DATA_GENERIC_ENHANCE)) {
1492 ret = -EFSCORRUPTED;
1493 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
1497 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1498 dn->data_blkaddr = NEW_ADDR;
1499 f2fs_set_data_blkaddr(dn);
1502 f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
1507 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1510 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1511 struct address_space *mapping = inode->i_mapping;
1512 pgoff_t index, pg_start, pg_end;
1513 loff_t new_size = i_size_read(inode);
1514 loff_t off_start, off_end;
1517 ret = inode_newsize_ok(inode, (len + offset));
1521 ret = f2fs_convert_inline_inode(inode);
1525 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1529 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1530 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1532 off_start = offset & (PAGE_SIZE - 1);
1533 off_end = (offset + len) & (PAGE_SIZE - 1);
1535 if (pg_start == pg_end) {
1536 ret = fill_zero(inode, pg_start, off_start,
1537 off_end - off_start);
1541 new_size = max_t(loff_t, new_size, offset + len);
1544 ret = fill_zero(inode, pg_start++, off_start,
1545 PAGE_SIZE - off_start);
1549 new_size = max_t(loff_t, new_size,
1550 (loff_t)pg_start << PAGE_SHIFT);
1553 for (index = pg_start; index < pg_end;) {
1554 struct dnode_of_data dn;
1555 unsigned int end_offset;
1558 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1559 filemap_invalidate_lock(mapping);
1561 truncate_pagecache_range(inode,
1562 (loff_t)index << PAGE_SHIFT,
1563 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1567 set_new_dnode(&dn, inode, NULL, NULL, 0);
1568 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1570 f2fs_unlock_op(sbi);
1571 filemap_invalidate_unlock(mapping);
1572 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1576 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1577 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1579 ret = f2fs_do_zero_range(&dn, index, end);
1580 f2fs_put_dnode(&dn);
1582 f2fs_unlock_op(sbi);
1583 filemap_invalidate_unlock(mapping);
1584 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1586 f2fs_balance_fs(sbi, dn.node_changed);
1592 new_size = max_t(loff_t, new_size,
1593 (loff_t)index << PAGE_SHIFT);
1597 ret = fill_zero(inode, pg_end, 0, off_end);
1601 new_size = max_t(loff_t, new_size, offset + len);
1606 if (new_size > i_size_read(inode)) {
1607 if (mode & FALLOC_FL_KEEP_SIZE)
1608 file_set_keep_isize(inode);
1610 f2fs_i_size_write(inode, new_size);
1615 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1617 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1618 struct address_space *mapping = inode->i_mapping;
1619 pgoff_t nr, pg_start, pg_end, delta, idx;
1623 new_size = i_size_read(inode) + len;
1624 ret = inode_newsize_ok(inode, new_size);
1628 if (offset >= i_size_read(inode))
1631 /* insert range should be aligned to block size of f2fs. */
1632 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1635 ret = f2fs_convert_inline_inode(inode);
1639 f2fs_balance_fs(sbi, true);
1641 filemap_invalidate_lock(mapping);
1642 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1643 filemap_invalidate_unlock(mapping);
1647 /* write out all dirty pages from offset */
1648 ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1652 pg_start = offset >> PAGE_SHIFT;
1653 pg_end = (offset + len) >> PAGE_SHIFT;
1654 delta = pg_end - pg_start;
1655 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1657 /* avoid gc operation during block exchange */
1658 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1659 filemap_invalidate_lock(mapping);
1660 truncate_pagecache(inode, offset);
1662 while (!ret && idx > pg_start) {
1663 nr = idx - pg_start;
1669 f2fs_drop_extent_tree(inode);
1671 ret = __exchange_data_block(inode, inode, idx,
1672 idx + delta, nr, false);
1673 f2fs_unlock_op(sbi);
1675 filemap_invalidate_unlock(mapping);
1676 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1678 /* write out all moved pages, if possible */
1679 filemap_invalidate_lock(mapping);
1680 filemap_write_and_wait_range(mapping, offset, LLONG_MAX);
1681 truncate_pagecache(inode, offset);
1682 filemap_invalidate_unlock(mapping);
1685 f2fs_i_size_write(inode, new_size);
1689 static int expand_inode_data(struct inode *inode, loff_t offset,
1690 loff_t len, int mode)
1692 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1693 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1694 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1695 .m_may_create = true };
1696 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
1697 .init_gc_type = FG_GC,
1698 .should_migrate_blocks = false,
1699 .err_gc_skipped = true,
1700 .nr_free_secs = 0 };
1701 pgoff_t pg_start, pg_end;
1702 loff_t new_size = i_size_read(inode);
1704 block_t expanded = 0;
1707 err = inode_newsize_ok(inode, (len + offset));
1711 err = f2fs_convert_inline_inode(inode);
1715 f2fs_balance_fs(sbi, true);
1717 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1718 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1719 off_end = (offset + len) & (PAGE_SIZE - 1);
1721 map.m_lblk = pg_start;
1722 map.m_len = pg_end - pg_start;
1729 if (f2fs_is_pinned_file(inode)) {
1730 block_t sec_blks = CAP_BLKS_PER_SEC(sbi);
1731 block_t sec_len = roundup(map.m_len, sec_blks);
1733 map.m_len = sec_blks;
1735 if (has_not_enough_free_secs(sbi, 0,
1736 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1737 f2fs_down_write(&sbi->gc_lock);
1738 err = f2fs_gc(sbi, &gc_control);
1739 if (err && err != -ENODATA)
1743 f2fs_down_write(&sbi->pin_sem);
1746 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1747 f2fs_unlock_op(sbi);
1749 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1750 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1751 file_dont_truncate(inode);
1753 f2fs_up_write(&sbi->pin_sem);
1755 expanded += map.m_len;
1756 sec_len -= map.m_len;
1757 map.m_lblk += map.m_len;
1758 if (!err && sec_len)
1761 map.m_len = expanded;
1763 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1764 expanded = map.m_len;
1773 last_off = pg_start + expanded - 1;
1775 /* update new size to the failed position */
1776 new_size = (last_off == pg_end) ? offset + len :
1777 (loff_t)(last_off + 1) << PAGE_SHIFT;
1779 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1782 if (new_size > i_size_read(inode)) {
1783 if (mode & FALLOC_FL_KEEP_SIZE)
1784 file_set_keep_isize(inode);
1786 f2fs_i_size_write(inode, new_size);
1792 static long f2fs_fallocate(struct file *file, int mode,
1793 loff_t offset, loff_t len)
1795 struct inode *inode = file_inode(file);
1798 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1800 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1802 if (!f2fs_is_compress_backend_ready(inode))
1805 /* f2fs only support ->fallocate for regular file */
1806 if (!S_ISREG(inode->i_mode))
1809 if (IS_ENCRYPTED(inode) &&
1810 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1814 * Pinned file should not support partial trucation since the block
1815 * can be used by applications.
1817 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1818 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1819 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1822 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1823 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1824 FALLOC_FL_INSERT_RANGE))
1829 ret = file_modified(file);
1833 if (mode & FALLOC_FL_PUNCH_HOLE) {
1834 if (offset >= inode->i_size)
1837 ret = punch_hole(inode, offset, len);
1838 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1839 ret = f2fs_collapse_range(inode, offset, len);
1840 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1841 ret = f2fs_zero_range(inode, offset, len, mode);
1842 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1843 ret = f2fs_insert_range(inode, offset, len);
1845 ret = expand_inode_data(inode, offset, len, mode);
1849 inode->i_mtime = inode->i_ctime = current_time(inode);
1850 f2fs_mark_inode_dirty_sync(inode, false);
1851 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1855 inode_unlock(inode);
1857 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1861 static int f2fs_release_file(struct inode *inode, struct file *filp)
1864 * f2fs_relase_file is called at every close calls. So we should
1865 * not drop any inmemory pages by close called by other process.
1867 if (!(filp->f_mode & FMODE_WRITE) ||
1868 atomic_read(&inode->i_writecount) != 1)
1871 f2fs_abort_atomic_write(inode, true);
1875 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1877 struct inode *inode = file_inode(file);
1880 * If the process doing a transaction is crashed, we should do
1881 * roll-back. Otherwise, other reader/write can see corrupted database
1882 * until all the writers close its file. Since this should be done
1883 * before dropping file lock, it needs to do in ->flush.
1885 if (F2FS_I(inode)->atomic_write_task == current)
1886 f2fs_abort_atomic_write(inode, true);
1890 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1892 struct f2fs_inode_info *fi = F2FS_I(inode);
1893 u32 masked_flags = fi->i_flags & mask;
1895 /* mask can be shrunk by flags_valid selector */
1898 /* Is it quota file? Do not allow user to mess with it */
1899 if (IS_NOQUOTA(inode))
1902 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1903 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1905 if (!f2fs_empty_dir(inode))
1909 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1910 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1912 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1916 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1917 if (masked_flags & F2FS_COMPR_FL) {
1918 if (!f2fs_disable_compressed_file(inode))
1921 /* try to convert inline_data to support compression */
1922 int err = f2fs_convert_inline_inode(inode);
1925 if (!f2fs_may_compress(inode))
1927 if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
1929 if (set_compress_context(inode))
1934 fi->i_flags = iflags | (fi->i_flags & ~mask);
1935 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1936 (fi->i_flags & F2FS_NOCOMP_FL));
1938 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1939 set_inode_flag(inode, FI_PROJ_INHERIT);
1941 clear_inode_flag(inode, FI_PROJ_INHERIT);
1943 inode->i_ctime = current_time(inode);
1944 f2fs_set_inode_flags(inode);
1945 f2fs_mark_inode_dirty_sync(inode, true);
1949 /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */
1952 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1953 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1954 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1955 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1957 * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and
1958 * FS_IOC_FSSETXATTR is done by the VFS.
1961 static const struct {
1964 } f2fs_fsflags_map[] = {
1965 { F2FS_COMPR_FL, FS_COMPR_FL },
1966 { F2FS_SYNC_FL, FS_SYNC_FL },
1967 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1968 { F2FS_APPEND_FL, FS_APPEND_FL },
1969 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1970 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1971 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1972 { F2FS_INDEX_FL, FS_INDEX_FL },
1973 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1974 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1975 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1978 #define F2FS_GETTABLE_FS_FL ( \
1988 FS_PROJINHERIT_FL | \
1990 FS_INLINE_DATA_FL | \
1995 #define F2FS_SETTABLE_FS_FL ( \
2004 FS_PROJINHERIT_FL | \
2007 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
2008 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
2013 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2014 if (iflags & f2fs_fsflags_map[i].iflag)
2015 fsflags |= f2fs_fsflags_map[i].fsflag;
2020 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
2021 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
2026 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
2027 if (fsflags & f2fs_fsflags_map[i].fsflag)
2028 iflags |= f2fs_fsflags_map[i].iflag;
2033 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2035 struct inode *inode = file_inode(filp);
2037 return put_user(inode->i_generation, (int __user *)arg);
2040 static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate)
2042 struct inode *inode = file_inode(filp);
2043 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2044 struct f2fs_inode_info *fi = F2FS_I(inode);
2045 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2046 struct inode *pinode;
2050 if (!inode_owner_or_capable(idmap, inode))
2053 if (!S_ISREG(inode->i_mode))
2056 if (filp->f_flags & O_DIRECT)
2059 ret = mnt_want_write_file(filp);
2065 if (!f2fs_disable_compressed_file(inode)) {
2070 if (f2fs_is_atomic_file(inode))
2073 ret = f2fs_convert_inline_inode(inode);
2077 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
2080 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2081 * f2fs_is_atomic_file.
2083 if (get_dirty_pages(inode))
2084 f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2085 inode->i_ino, get_dirty_pages(inode));
2086 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2088 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2092 /* Create a COW inode for atomic write */
2093 pinode = f2fs_iget(inode->i_sb, fi->i_pino);
2094 if (IS_ERR(pinode)) {
2095 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2096 ret = PTR_ERR(pinode);
2100 ret = f2fs_get_tmpfile(idmap, pinode, &fi->cow_inode);
2103 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2107 f2fs_write_inode(inode, NULL);
2109 stat_inc_atomic_inode(inode);
2111 set_inode_flag(inode, FI_ATOMIC_FILE);
2112 set_inode_flag(fi->cow_inode, FI_COW_FILE);
2113 clear_inode_flag(fi->cow_inode, FI_INLINE_DATA);
2115 isize = i_size_read(inode);
2116 fi->original_i_size = isize;
2118 set_inode_flag(inode, FI_ATOMIC_REPLACE);
2119 truncate_inode_pages_final(inode->i_mapping);
2120 f2fs_i_size_write(inode, 0);
2123 f2fs_i_size_write(fi->cow_inode, isize);
2125 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
2127 f2fs_update_time(sbi, REQ_TIME);
2128 fi->atomic_write_task = current;
2129 stat_update_max_atomic_write(inode);
2130 fi->atomic_write_cnt = 0;
2132 inode_unlock(inode);
2133 mnt_drop_write_file(filp);
2137 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2139 struct inode *inode = file_inode(filp);
2140 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2143 if (!inode_owner_or_capable(idmap, inode))
2146 ret = mnt_want_write_file(filp);
2150 f2fs_balance_fs(F2FS_I_SB(inode), true);
2154 if (f2fs_is_atomic_file(inode)) {
2155 ret = f2fs_commit_atomic_write(inode);
2157 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2159 f2fs_abort_atomic_write(inode, ret);
2161 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2164 inode_unlock(inode);
2165 mnt_drop_write_file(filp);
2169 static int f2fs_ioc_abort_atomic_write(struct file *filp)
2171 struct inode *inode = file_inode(filp);
2172 struct mnt_idmap *idmap = file_mnt_idmap(filp);
2175 if (!inode_owner_or_capable(idmap, inode))
2178 ret = mnt_want_write_file(filp);
2184 f2fs_abort_atomic_write(inode, true);
2186 inode_unlock(inode);
2188 mnt_drop_write_file(filp);
2189 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2193 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2195 struct inode *inode = file_inode(filp);
2196 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2197 struct super_block *sb = sbi->sb;
2201 if (!capable(CAP_SYS_ADMIN))
2204 if (get_user(in, (__u32 __user *)arg))
2207 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2208 ret = mnt_want_write_file(filp);
2210 if (ret == -EROFS) {
2212 f2fs_stop_checkpoint(sbi, false,
2213 STOP_CP_REASON_SHUTDOWN);
2214 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2215 trace_f2fs_shutdown(sbi, in, ret);
2222 case F2FS_GOING_DOWN_FULLSYNC:
2223 ret = freeze_bdev(sb->s_bdev);
2226 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2227 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2228 thaw_bdev(sb->s_bdev);
2230 case F2FS_GOING_DOWN_METASYNC:
2231 /* do checkpoint only */
2232 ret = f2fs_sync_fs(sb, 1);
2235 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2236 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2238 case F2FS_GOING_DOWN_NOSYNC:
2239 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2240 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2242 case F2FS_GOING_DOWN_METAFLUSH:
2243 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2244 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2245 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2247 case F2FS_GOING_DOWN_NEED_FSCK:
2248 set_sbi_flag(sbi, SBI_NEED_FSCK);
2249 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2250 set_sbi_flag(sbi, SBI_IS_DIRTY);
2251 /* do checkpoint only */
2252 ret = f2fs_sync_fs(sb, 1);
2259 f2fs_stop_gc_thread(sbi);
2260 f2fs_stop_discard_thread(sbi);
2262 f2fs_drop_discard_cmd(sbi);
2263 clear_opt(sbi, DISCARD);
2265 f2fs_update_time(sbi, REQ_TIME);
2267 if (in != F2FS_GOING_DOWN_FULLSYNC)
2268 mnt_drop_write_file(filp);
2270 trace_f2fs_shutdown(sbi, in, ret);
2275 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2277 struct inode *inode = file_inode(filp);
2278 struct super_block *sb = inode->i_sb;
2279 struct fstrim_range range;
2282 if (!capable(CAP_SYS_ADMIN))
2285 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2288 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2292 ret = mnt_want_write_file(filp);
2296 range.minlen = max((unsigned int)range.minlen,
2297 bdev_discard_granularity(sb->s_bdev));
2298 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2299 mnt_drop_write_file(filp);
2303 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2306 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2310 static bool uuid_is_nonzero(__u8 u[16])
2314 for (i = 0; i < 16; i++)
2320 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2322 struct inode *inode = file_inode(filp);
2324 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2327 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2329 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2332 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2334 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2336 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2339 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2341 struct inode *inode = file_inode(filp);
2342 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2345 if (!f2fs_sb_has_encrypt(sbi))
2348 err = mnt_want_write_file(filp);
2352 f2fs_down_write(&sbi->sb_lock);
2354 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2357 /* update superblock with uuid */
2358 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2360 err = f2fs_commit_super(sbi, false);
2363 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2367 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2371 f2fs_up_write(&sbi->sb_lock);
2372 mnt_drop_write_file(filp);
2376 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2379 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2382 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2385 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2387 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2390 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2393 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2395 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2398 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2401 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2404 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2407 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2410 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2413 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2416 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2419 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2421 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2424 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2427 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2429 struct inode *inode = file_inode(filp);
2430 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2431 struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO,
2433 .should_migrate_blocks = false,
2434 .nr_free_secs = 0 };
2438 if (!capable(CAP_SYS_ADMIN))
2441 if (get_user(sync, (__u32 __user *)arg))
2444 if (f2fs_readonly(sbi->sb))
2447 ret = mnt_want_write_file(filp);
2452 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2457 f2fs_down_write(&sbi->gc_lock);
2460 gc_control.init_gc_type = sync ? FG_GC : BG_GC;
2461 gc_control.err_gc_skipped = sync;
2462 ret = f2fs_gc(sbi, &gc_control);
2464 mnt_drop_write_file(filp);
2468 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2470 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2471 struct f2fs_gc_control gc_control = {
2472 .init_gc_type = range->sync ? FG_GC : BG_GC,
2474 .should_migrate_blocks = false,
2475 .err_gc_skipped = range->sync,
2476 .nr_free_secs = 0 };
2480 if (!capable(CAP_SYS_ADMIN))
2482 if (f2fs_readonly(sbi->sb))
2485 end = range->start + range->len;
2486 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2487 end >= MAX_BLKADDR(sbi))
2490 ret = mnt_want_write_file(filp);
2496 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2501 f2fs_down_write(&sbi->gc_lock);
2504 gc_control.victim_segno = GET_SEGNO(sbi, range->start);
2505 ret = f2fs_gc(sbi, &gc_control);
2511 range->start += CAP_BLKS_PER_SEC(sbi);
2512 if (range->start <= end)
2515 mnt_drop_write_file(filp);
2519 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2521 struct f2fs_gc_range range;
2523 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2526 return __f2fs_ioc_gc_range(filp, &range);
2529 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2531 struct inode *inode = file_inode(filp);
2532 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2535 if (!capable(CAP_SYS_ADMIN))
2538 if (f2fs_readonly(sbi->sb))
2541 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2542 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2546 ret = mnt_want_write_file(filp);
2550 ret = f2fs_sync_fs(sbi->sb, 1);
2552 mnt_drop_write_file(filp);
2556 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2558 struct f2fs_defragment *range)
2560 struct inode *inode = file_inode(filp);
2561 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2562 .m_seg_type = NO_CHECK_TYPE,
2563 .m_may_create = false };
2564 struct extent_info ei = {0, };
2565 pgoff_t pg_start, pg_end, next_pgofs;
2566 unsigned int blk_per_seg = sbi->blocks_per_seg;
2567 unsigned int total = 0, sec_num;
2568 block_t blk_end = 0;
2569 bool fragmented = false;
2572 pg_start = range->start >> PAGE_SHIFT;
2573 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2575 f2fs_balance_fs(sbi, true);
2579 /* if in-place-update policy is enabled, don't waste time here */
2580 set_inode_flag(inode, FI_OPU_WRITE);
2581 if (f2fs_should_update_inplace(inode, NULL)) {
2586 /* writeback all dirty pages in the range */
2587 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2588 range->start + range->len - 1);
2593 * lookup mapping info in extent cache, skip defragmenting if physical
2594 * block addresses are continuous.
2596 if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
2597 if (ei.fofs + ei.len >= pg_end)
2601 map.m_lblk = pg_start;
2602 map.m_next_pgofs = &next_pgofs;
2605 * lookup mapping info in dnode page cache, skip defragmenting if all
2606 * physical block addresses are continuous even if there are hole(s)
2607 * in logical blocks.
2609 while (map.m_lblk < pg_end) {
2610 map.m_len = pg_end - map.m_lblk;
2611 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2615 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2616 map.m_lblk = next_pgofs;
2620 if (blk_end && blk_end != map.m_pblk)
2623 /* record total count of block that we're going to move */
2626 blk_end = map.m_pblk + map.m_len;
2628 map.m_lblk += map.m_len;
2636 sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi));
2639 * make sure there are enough free section for LFS allocation, this can
2640 * avoid defragment running in SSR mode when free section are allocated
2643 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2648 map.m_lblk = pg_start;
2649 map.m_len = pg_end - pg_start;
2652 while (map.m_lblk < pg_end) {
2657 map.m_len = pg_end - map.m_lblk;
2658 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2662 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2663 map.m_lblk = next_pgofs;
2667 set_inode_flag(inode, FI_SKIP_WRITES);
2670 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2673 page = f2fs_get_lock_data_page(inode, idx, true);
2675 err = PTR_ERR(page);
2679 set_page_dirty(page);
2680 set_page_private_gcing(page);
2681 f2fs_put_page(page, 1);
2690 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2693 clear_inode_flag(inode, FI_SKIP_WRITES);
2695 err = filemap_fdatawrite(inode->i_mapping);
2700 clear_inode_flag(inode, FI_SKIP_WRITES);
2702 clear_inode_flag(inode, FI_OPU_WRITE);
2703 inode_unlock(inode);
2705 range->len = (u64)total << PAGE_SHIFT;
2709 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2711 struct inode *inode = file_inode(filp);
2712 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2713 struct f2fs_defragment range;
2716 if (!capable(CAP_SYS_ADMIN))
2719 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2722 if (f2fs_readonly(sbi->sb))
2725 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2729 /* verify alignment of offset & size */
2730 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2733 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2734 max_file_blocks(inode)))
2737 err = mnt_want_write_file(filp);
2741 err = f2fs_defragment_range(sbi, filp, &range);
2742 mnt_drop_write_file(filp);
2744 f2fs_update_time(sbi, REQ_TIME);
2748 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2755 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2756 struct file *file_out, loff_t pos_out, size_t len)
2758 struct inode *src = file_inode(file_in);
2759 struct inode *dst = file_inode(file_out);
2760 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2761 size_t olen = len, dst_max_i_size = 0;
2765 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2766 src->i_sb != dst->i_sb)
2769 if (unlikely(f2fs_readonly(src->i_sb)))
2772 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2775 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2778 if (pos_out < 0 || pos_in < 0)
2782 if (pos_in == pos_out)
2784 if (pos_out > pos_in && pos_out < pos_in + len)
2791 if (!inode_trylock(dst))
2796 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2799 olen = len = src->i_size - pos_in;
2800 if (pos_in + len == src->i_size)
2801 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2807 dst_osize = dst->i_size;
2808 if (pos_out + olen > dst->i_size)
2809 dst_max_i_size = pos_out + olen;
2811 /* verify the end result is block aligned */
2812 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2813 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2814 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2817 ret = f2fs_convert_inline_inode(src);
2821 ret = f2fs_convert_inline_inode(dst);
2825 /* write out all dirty pages from offset */
2826 ret = filemap_write_and_wait_range(src->i_mapping,
2827 pos_in, pos_in + len);
2831 ret = filemap_write_and_wait_range(dst->i_mapping,
2832 pos_out, pos_out + len);
2836 f2fs_balance_fs(sbi, true);
2838 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2841 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2846 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2847 pos_out >> F2FS_BLKSIZE_BITS,
2848 len >> F2FS_BLKSIZE_BITS, false);
2852 f2fs_i_size_write(dst, dst_max_i_size);
2853 else if (dst_osize != dst->i_size)
2854 f2fs_i_size_write(dst, dst_osize);
2856 f2fs_unlock_op(sbi);
2859 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2861 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2870 static int __f2fs_ioc_move_range(struct file *filp,
2871 struct f2fs_move_range *range)
2876 if (!(filp->f_mode & FMODE_READ) ||
2877 !(filp->f_mode & FMODE_WRITE))
2880 dst = fdget(range->dst_fd);
2884 if (!(dst.file->f_mode & FMODE_WRITE)) {
2889 err = mnt_want_write_file(filp);
2893 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2894 range->pos_out, range->len);
2896 mnt_drop_write_file(filp);
2902 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2904 struct f2fs_move_range range;
2906 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2909 return __f2fs_ioc_move_range(filp, &range);
2912 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2914 struct inode *inode = file_inode(filp);
2915 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2916 struct sit_info *sm = SIT_I(sbi);
2917 unsigned int start_segno = 0, end_segno = 0;
2918 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2919 struct f2fs_flush_device range;
2920 struct f2fs_gc_control gc_control = {
2921 .init_gc_type = FG_GC,
2922 .should_migrate_blocks = true,
2923 .err_gc_skipped = true,
2924 .nr_free_secs = 0 };
2927 if (!capable(CAP_SYS_ADMIN))
2930 if (f2fs_readonly(sbi->sb))
2933 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2936 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2940 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2941 __is_large_section(sbi)) {
2942 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2943 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2947 ret = mnt_want_write_file(filp);
2951 if (range.dev_num != 0)
2952 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2953 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2955 start_segno = sm->last_victim[FLUSH_DEVICE];
2956 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2957 start_segno = dev_start_segno;
2958 end_segno = min(start_segno + range.segments, dev_end_segno);
2960 while (start_segno < end_segno) {
2961 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2965 sm->last_victim[GC_CB] = end_segno + 1;
2966 sm->last_victim[GC_GREEDY] = end_segno + 1;
2967 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2969 gc_control.victim_segno = start_segno;
2970 ret = f2fs_gc(sbi, &gc_control);
2978 mnt_drop_write_file(filp);
2982 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2984 struct inode *inode = file_inode(filp);
2985 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2987 /* Must validate to set it with SQLite behavior in Android. */
2988 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2990 return put_user(sb_feature, (u32 __user *)arg);
2994 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2996 struct dquot *transfer_to[MAXQUOTAS] = {};
2997 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2998 struct super_block *sb = sbi->sb;
3001 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3002 if (!IS_ERR(transfer_to[PRJQUOTA])) {
3003 err = __dquot_transfer(inode, transfer_to);
3005 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3006 dqput(transfer_to[PRJQUOTA]);
3011 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3013 struct f2fs_inode_info *fi = F2FS_I(inode);
3014 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3015 struct f2fs_inode *ri = NULL;
3019 if (!f2fs_sb_has_project_quota(sbi)) {
3020 if (projid != F2FS_DEF_PROJID)
3026 if (!f2fs_has_extra_attr(inode))
3029 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3031 if (projid_eq(kprojid, fi->i_projid))
3035 /* Is it quota file? Do not allow user to mess with it */
3036 if (IS_NOQUOTA(inode))
3039 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3042 err = f2fs_dquot_initialize(inode);
3047 err = f2fs_transfer_project_quota(inode, kprojid);
3051 fi->i_projid = kprojid;
3052 inode->i_ctime = current_time(inode);
3053 f2fs_mark_inode_dirty_sync(inode, true);
3055 f2fs_unlock_op(sbi);
3059 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3064 static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
3066 if (projid != F2FS_DEF_PROJID)
3072 int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
3074 struct inode *inode = d_inode(dentry);
3075 struct f2fs_inode_info *fi = F2FS_I(inode);
3076 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
3078 if (IS_ENCRYPTED(inode))
3079 fsflags |= FS_ENCRYPT_FL;
3080 if (IS_VERITY(inode))
3081 fsflags |= FS_VERITY_FL;
3082 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
3083 fsflags |= FS_INLINE_DATA_FL;
3084 if (is_inode_flag_set(inode, FI_PIN_FILE))
3085 fsflags |= FS_NOCOW_FL;
3087 fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL);
3089 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3090 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3095 int f2fs_fileattr_set(struct mnt_idmap *idmap,
3096 struct dentry *dentry, struct fileattr *fa)
3098 struct inode *inode = d_inode(dentry);
3099 u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL;
3103 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3105 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
3107 if (fsflags & ~F2FS_GETTABLE_FS_FL)
3109 fsflags &= F2FS_SETTABLE_FS_FL;
3110 if (!fa->flags_valid)
3111 mask &= FS_COMMON_FL;
3113 iflags = f2fs_fsflags_to_iflags(fsflags);
3114 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3117 err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask));
3119 err = f2fs_ioc_setproject(inode, fa->fsx_projid);
3124 int f2fs_pin_file_control(struct inode *inode, bool inc)
3126 struct f2fs_inode_info *fi = F2FS_I(inode);
3127 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3129 /* Use i_gc_failures for normal file as a risk signal. */
3131 f2fs_i_gc_failures_write(inode,
3132 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3134 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3135 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3136 __func__, inode->i_ino,
3137 fi->i_gc_failures[GC_FAILURE_PIN]);
3138 clear_inode_flag(inode, FI_PIN_FILE);
3144 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3146 struct inode *inode = file_inode(filp);
3150 if (get_user(pin, (__u32 __user *)arg))
3153 if (!S_ISREG(inode->i_mode))
3156 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3159 ret = mnt_want_write_file(filp);
3166 clear_inode_flag(inode, FI_PIN_FILE);
3167 f2fs_i_gc_failures_write(inode, 0);
3171 if (f2fs_should_update_outplace(inode, NULL)) {
3176 if (f2fs_pin_file_control(inode, false)) {
3181 ret = f2fs_convert_inline_inode(inode);
3185 if (!f2fs_disable_compressed_file(inode)) {
3190 set_inode_flag(inode, FI_PIN_FILE);
3191 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3193 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3195 inode_unlock(inode);
3196 mnt_drop_write_file(filp);
3200 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3202 struct inode *inode = file_inode(filp);
3205 if (is_inode_flag_set(inode, FI_PIN_FILE))
3206 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3207 return put_user(pin, (u32 __user *)arg);
3210 int f2fs_precache_extents(struct inode *inode)
3212 struct f2fs_inode_info *fi = F2FS_I(inode);
3213 struct f2fs_map_blocks map;
3214 pgoff_t m_next_extent;
3218 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3222 map.m_next_pgofs = NULL;
3223 map.m_next_extent = &m_next_extent;
3224 map.m_seg_type = NO_CHECK_TYPE;
3225 map.m_may_create = false;
3226 end = max_file_blocks(inode);
3228 while (map.m_lblk < end) {
3229 map.m_len = end - map.m_lblk;
3231 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3232 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3233 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3237 map.m_lblk = m_next_extent;
3243 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3245 return f2fs_precache_extents(file_inode(filp));
3248 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3250 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3253 if (!capable(CAP_SYS_ADMIN))
3256 if (f2fs_readonly(sbi->sb))
3259 if (copy_from_user(&block_count, (void __user *)arg,
3260 sizeof(block_count)))
3263 return f2fs_resize_fs(sbi, block_count);
3266 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3268 struct inode *inode = file_inode(filp);
3270 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3272 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3273 f2fs_warn(F2FS_I_SB(inode),
3274 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3279 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3282 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3284 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3287 return fsverity_ioctl_measure(filp, (void __user *)arg);
3290 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3292 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3295 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3298 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3300 struct inode *inode = file_inode(filp);
3301 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3306 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3310 f2fs_down_read(&sbi->sb_lock);
3311 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3312 ARRAY_SIZE(sbi->raw_super->volume_name),
3313 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3314 f2fs_up_read(&sbi->sb_lock);
3316 if (copy_to_user((char __user *)arg, vbuf,
3317 min(FSLABEL_MAX, count)))
3324 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3326 struct inode *inode = file_inode(filp);
3327 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3331 if (!capable(CAP_SYS_ADMIN))
3334 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3336 return PTR_ERR(vbuf);
3338 err = mnt_want_write_file(filp);
3342 f2fs_down_write(&sbi->sb_lock);
3344 memset(sbi->raw_super->volume_name, 0,
3345 sizeof(sbi->raw_super->volume_name));
3346 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3347 sbi->raw_super->volume_name,
3348 ARRAY_SIZE(sbi->raw_super->volume_name));
3350 err = f2fs_commit_super(sbi, false);
3352 f2fs_up_write(&sbi->sb_lock);
3354 mnt_drop_write_file(filp);
3360 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3362 struct inode *inode = file_inode(filp);
3365 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3368 if (!f2fs_compressed_file(inode))
3371 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3372 return put_user(blocks, (u64 __user *)arg);
3375 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3377 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3378 unsigned int released_blocks = 0;
3379 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3383 for (i = 0; i < count; i++) {
3384 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3385 dn->ofs_in_node + i);
3387 if (!__is_valid_data_blkaddr(blkaddr))
3389 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3390 DATA_GENERIC_ENHANCE))) {
3391 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3392 return -EFSCORRUPTED;
3397 int compr_blocks = 0;
3399 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3400 blkaddr = f2fs_data_blkaddr(dn);
3403 if (blkaddr == COMPRESS_ADDR)
3405 dn->ofs_in_node += cluster_size;
3409 if (__is_valid_data_blkaddr(blkaddr))
3412 if (blkaddr != NEW_ADDR)
3415 dn->data_blkaddr = NULL_ADDR;
3416 f2fs_set_data_blkaddr(dn);
3419 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3420 dec_valid_block_count(sbi, dn->inode,
3421 cluster_size - compr_blocks);
3423 released_blocks += cluster_size - compr_blocks;
3425 count -= cluster_size;
3428 return released_blocks;
3431 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3433 struct inode *inode = file_inode(filp);
3434 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3435 pgoff_t page_idx = 0, last_idx;
3436 unsigned int released_blocks = 0;
3440 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3443 if (!f2fs_compressed_file(inode))
3446 if (f2fs_readonly(sbi->sb))
3449 ret = mnt_want_write_file(filp);
3453 f2fs_balance_fs(F2FS_I_SB(inode), true);
3457 writecount = atomic_read(&inode->i_writecount);
3458 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3459 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3464 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3469 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3473 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3474 inode->i_ctime = current_time(inode);
3475 f2fs_mark_inode_dirty_sync(inode, true);
3477 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3480 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3481 filemap_invalidate_lock(inode->i_mapping);
3483 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3485 while (page_idx < last_idx) {
3486 struct dnode_of_data dn;
3487 pgoff_t end_offset, count;
3489 set_new_dnode(&dn, inode, NULL, NULL, 0);
3490 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3492 if (ret == -ENOENT) {
3493 page_idx = f2fs_get_next_page_offset(&dn,
3501 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3502 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3503 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3505 ret = release_compress_blocks(&dn, count);
3507 f2fs_put_dnode(&dn);
3513 released_blocks += ret;
3516 filemap_invalidate_unlock(inode->i_mapping);
3517 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3519 inode_unlock(inode);
3521 mnt_drop_write_file(filp);
3524 ret = put_user(released_blocks, (u64 __user *)arg);
3525 } else if (released_blocks &&
3526 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3527 set_sbi_flag(sbi, SBI_NEED_FSCK);
3528 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3529 "iblocks=%llu, released=%u, compr_blocks=%u, "
3531 __func__, inode->i_ino, inode->i_blocks,
3533 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3539 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3541 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3542 unsigned int reserved_blocks = 0;
3543 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3547 for (i = 0; i < count; i++) {
3548 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3549 dn->ofs_in_node + i);
3551 if (!__is_valid_data_blkaddr(blkaddr))
3553 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3554 DATA_GENERIC_ENHANCE))) {
3555 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
3556 return -EFSCORRUPTED;
3561 int compr_blocks = 0;
3565 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3566 blkaddr = f2fs_data_blkaddr(dn);
3569 if (blkaddr == COMPRESS_ADDR)
3571 dn->ofs_in_node += cluster_size;
3575 if (__is_valid_data_blkaddr(blkaddr)) {
3580 dn->data_blkaddr = NEW_ADDR;
3581 f2fs_set_data_blkaddr(dn);
3584 reserved = cluster_size - compr_blocks;
3585 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3589 if (reserved != cluster_size - compr_blocks)
3592 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3594 reserved_blocks += reserved;
3596 count -= cluster_size;
3599 return reserved_blocks;
3602 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3604 struct inode *inode = file_inode(filp);
3605 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3606 pgoff_t page_idx = 0, last_idx;
3607 unsigned int reserved_blocks = 0;
3610 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3613 if (!f2fs_compressed_file(inode))
3616 if (f2fs_readonly(sbi->sb))
3619 ret = mnt_want_write_file(filp);
3623 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3626 f2fs_balance_fs(F2FS_I_SB(inode), true);
3630 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3635 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3636 filemap_invalidate_lock(inode->i_mapping);
3638 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3640 while (page_idx < last_idx) {
3641 struct dnode_of_data dn;
3642 pgoff_t end_offset, count;
3644 set_new_dnode(&dn, inode, NULL, NULL, 0);
3645 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3647 if (ret == -ENOENT) {
3648 page_idx = f2fs_get_next_page_offset(&dn,
3656 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3657 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3658 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3660 ret = reserve_compress_blocks(&dn, count);
3662 f2fs_put_dnode(&dn);
3668 reserved_blocks += ret;
3671 filemap_invalidate_unlock(inode->i_mapping);
3672 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3675 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3676 inode->i_ctime = current_time(inode);
3677 f2fs_mark_inode_dirty_sync(inode, true);
3680 inode_unlock(inode);
3682 mnt_drop_write_file(filp);
3685 ret = put_user(reserved_blocks, (u64 __user *)arg);
3686 } else if (reserved_blocks &&
3687 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3688 set_sbi_flag(sbi, SBI_NEED_FSCK);
3689 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3690 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3692 __func__, inode->i_ino, inode->i_blocks,
3694 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3700 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3701 pgoff_t off, block_t block, block_t len, u32 flags)
3703 sector_t sector = SECTOR_FROM_BLOCK(block);
3704 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3707 if (flags & F2FS_TRIM_FILE_DISCARD) {
3708 if (bdev_max_secure_erase_sectors(bdev))
3709 ret = blkdev_issue_secure_erase(bdev, sector, nr_sects,
3712 ret = blkdev_issue_discard(bdev, sector, nr_sects,
3716 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3717 if (IS_ENCRYPTED(inode))
3718 ret = fscrypt_zeroout_range(inode, off, block, len);
3720 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3727 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3729 struct inode *inode = file_inode(filp);
3730 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3731 struct address_space *mapping = inode->i_mapping;
3732 struct block_device *prev_bdev = NULL;
3733 struct f2fs_sectrim_range range;
3734 pgoff_t index, pg_end, prev_index = 0;
3735 block_t prev_block = 0, len = 0;
3737 bool to_end = false;
3740 if (!(filp->f_mode & FMODE_WRITE))
3743 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3747 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3748 !S_ISREG(inode->i_mode))
3751 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3752 !f2fs_hw_support_discard(sbi)) ||
3753 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3754 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3757 file_start_write(filp);
3760 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3761 range.start >= inode->i_size) {
3769 if (inode->i_size - range.start > range.len) {
3770 end_addr = range.start + range.len;
3772 end_addr = range.len == (u64)-1 ?
3773 sbi->sb->s_maxbytes : inode->i_size;
3777 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3778 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3783 index = F2FS_BYTES_TO_BLK(range.start);
3784 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3786 ret = f2fs_convert_inline_inode(inode);
3790 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3791 filemap_invalidate_lock(mapping);
3793 ret = filemap_write_and_wait_range(mapping, range.start,
3794 to_end ? LLONG_MAX : end_addr - 1);
3798 truncate_inode_pages_range(mapping, range.start,
3799 to_end ? -1 : end_addr - 1);
3801 while (index < pg_end) {
3802 struct dnode_of_data dn;
3803 pgoff_t end_offset, count;
3806 set_new_dnode(&dn, inode, NULL, NULL, 0);
3807 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3809 if (ret == -ENOENT) {
3810 index = f2fs_get_next_page_offset(&dn, index);
3816 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3817 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3818 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3819 struct block_device *cur_bdev;
3820 block_t blkaddr = f2fs_data_blkaddr(&dn);
3822 if (!__is_valid_data_blkaddr(blkaddr))
3825 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3826 DATA_GENERIC_ENHANCE)) {
3827 ret = -EFSCORRUPTED;
3828 f2fs_put_dnode(&dn);
3829 f2fs_handle_error(sbi,
3830 ERROR_INVALID_BLKADDR);
3834 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3835 if (f2fs_is_multi_device(sbi)) {
3836 int di = f2fs_target_device_index(sbi, blkaddr);
3838 blkaddr -= FDEV(di).start_blk;
3842 if (prev_bdev == cur_bdev &&
3843 index == prev_index + len &&
3844 blkaddr == prev_block + len) {
3847 ret = f2fs_secure_erase(prev_bdev,
3848 inode, prev_index, prev_block,
3851 f2fs_put_dnode(&dn);
3860 prev_bdev = cur_bdev;
3862 prev_block = blkaddr;
3867 f2fs_put_dnode(&dn);
3869 if (fatal_signal_pending(current)) {
3877 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3878 prev_block, len, range.flags);
3880 filemap_invalidate_unlock(mapping);
3881 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3883 inode_unlock(inode);
3884 file_end_write(filp);
3889 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
3891 struct inode *inode = file_inode(filp);
3892 struct f2fs_comp_option option;
3894 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3897 inode_lock_shared(inode);
3899 if (!f2fs_compressed_file(inode)) {
3900 inode_unlock_shared(inode);
3904 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3905 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3907 inode_unlock_shared(inode);
3909 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3916 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3918 struct inode *inode = file_inode(filp);
3919 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3920 struct f2fs_comp_option option;
3923 if (!f2fs_sb_has_compression(sbi))
3926 if (!(filp->f_mode & FMODE_WRITE))
3929 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
3933 if (!f2fs_compressed_file(inode) ||
3934 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
3935 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
3936 option.algorithm >= COMPRESS_MAX)
3939 file_start_write(filp);
3942 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
3947 if (inode->i_size != 0) {
3952 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
3953 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
3954 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
3955 f2fs_mark_inode_dirty_sync(inode, true);
3957 if (!f2fs_is_compress_backend_ready(inode))
3958 f2fs_warn(sbi, "compression algorithm is successfully set, "
3959 "but current kernel doesn't support this algorithm.");
3961 inode_unlock(inode);
3962 file_end_write(filp);
3967 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
3969 DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx);
3970 struct address_space *mapping = inode->i_mapping;
3972 pgoff_t redirty_idx = page_idx;
3973 int i, page_len = 0, ret = 0;
3975 page_cache_ra_unbounded(&ractl, len, 0);
3977 for (i = 0; i < len; i++, page_idx++) {
3978 page = read_cache_page(mapping, page_idx, NULL, NULL);
3980 ret = PTR_ERR(page);
3986 for (i = 0; i < page_len; i++, redirty_idx++) {
3987 page = find_lock_page(mapping, redirty_idx);
3989 /* It will never fail, when page has pinned above */
3990 f2fs_bug_on(F2FS_I_SB(inode), !page);
3992 set_page_dirty(page);
3993 f2fs_put_page(page, 1);
3994 f2fs_put_page(page, 0);
4000 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4002 struct inode *inode = file_inode(filp);
4003 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4004 struct f2fs_inode_info *fi = F2FS_I(inode);
4005 pgoff_t page_idx = 0, last_idx;
4006 unsigned int blk_per_seg = sbi->blocks_per_seg;
4007 int cluster_size = fi->i_cluster_size;
4010 if (!f2fs_sb_has_compression(sbi) ||
4011 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4014 if (!(filp->f_mode & FMODE_WRITE))
4017 if (!f2fs_compressed_file(inode))
4020 f2fs_balance_fs(F2FS_I_SB(inode), true);
4022 file_start_write(filp);
4025 if (!f2fs_is_compress_backend_ready(inode)) {
4030 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4035 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4039 if (!atomic_read(&fi->i_compr_blocks))
4042 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4044 count = last_idx - page_idx;
4046 int len = min(cluster_size, count);
4048 ret = redirty_blocks(inode, page_idx, len);
4052 if (get_dirty_pages(inode) >= blk_per_seg)
4053 filemap_fdatawrite(inode->i_mapping);
4060 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4064 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4067 inode_unlock(inode);
4068 file_end_write(filp);
4073 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4075 struct inode *inode = file_inode(filp);
4076 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4077 pgoff_t page_idx = 0, last_idx;
4078 unsigned int blk_per_seg = sbi->blocks_per_seg;
4079 int cluster_size = F2FS_I(inode)->i_cluster_size;
4082 if (!f2fs_sb_has_compression(sbi) ||
4083 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4086 if (!(filp->f_mode & FMODE_WRITE))
4089 if (!f2fs_compressed_file(inode))
4092 f2fs_balance_fs(F2FS_I_SB(inode), true);
4094 file_start_write(filp);
4097 if (!f2fs_is_compress_backend_ready(inode)) {
4102 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
4107 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4111 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4113 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4115 count = last_idx - page_idx;
4117 int len = min(cluster_size, count);
4119 ret = redirty_blocks(inode, page_idx, len);
4123 if (get_dirty_pages(inode) >= blk_per_seg)
4124 filemap_fdatawrite(inode->i_mapping);
4131 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4134 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4137 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4140 inode_unlock(inode);
4141 file_end_write(filp);
4146 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4149 case FS_IOC_GETVERSION:
4150 return f2fs_ioc_getversion(filp, arg);
4151 case F2FS_IOC_START_ATOMIC_WRITE:
4152 return f2fs_ioc_start_atomic_write(filp, false);
4153 case F2FS_IOC_START_ATOMIC_REPLACE:
4154 return f2fs_ioc_start_atomic_write(filp, true);
4155 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4156 return f2fs_ioc_commit_atomic_write(filp);
4157 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4158 return f2fs_ioc_abort_atomic_write(filp);
4159 case F2FS_IOC_START_VOLATILE_WRITE:
4160 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4162 case F2FS_IOC_SHUTDOWN:
4163 return f2fs_ioc_shutdown(filp, arg);
4165 return f2fs_ioc_fitrim(filp, arg);
4166 case FS_IOC_SET_ENCRYPTION_POLICY:
4167 return f2fs_ioc_set_encryption_policy(filp, arg);
4168 case FS_IOC_GET_ENCRYPTION_POLICY:
4169 return f2fs_ioc_get_encryption_policy(filp, arg);
4170 case FS_IOC_GET_ENCRYPTION_PWSALT:
4171 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4172 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4173 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4174 case FS_IOC_ADD_ENCRYPTION_KEY:
4175 return f2fs_ioc_add_encryption_key(filp, arg);
4176 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4177 return f2fs_ioc_remove_encryption_key(filp, arg);
4178 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4179 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4180 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4181 return f2fs_ioc_get_encryption_key_status(filp, arg);
4182 case FS_IOC_GET_ENCRYPTION_NONCE:
4183 return f2fs_ioc_get_encryption_nonce(filp, arg);
4184 case F2FS_IOC_GARBAGE_COLLECT:
4185 return f2fs_ioc_gc(filp, arg);
4186 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4187 return f2fs_ioc_gc_range(filp, arg);
4188 case F2FS_IOC_WRITE_CHECKPOINT:
4189 return f2fs_ioc_write_checkpoint(filp, arg);
4190 case F2FS_IOC_DEFRAGMENT:
4191 return f2fs_ioc_defragment(filp, arg);
4192 case F2FS_IOC_MOVE_RANGE:
4193 return f2fs_ioc_move_range(filp, arg);
4194 case F2FS_IOC_FLUSH_DEVICE:
4195 return f2fs_ioc_flush_device(filp, arg);
4196 case F2FS_IOC_GET_FEATURES:
4197 return f2fs_ioc_get_features(filp, arg);
4198 case F2FS_IOC_GET_PIN_FILE:
4199 return f2fs_ioc_get_pin_file(filp, arg);
4200 case F2FS_IOC_SET_PIN_FILE:
4201 return f2fs_ioc_set_pin_file(filp, arg);
4202 case F2FS_IOC_PRECACHE_EXTENTS:
4203 return f2fs_ioc_precache_extents(filp, arg);
4204 case F2FS_IOC_RESIZE_FS:
4205 return f2fs_ioc_resize_fs(filp, arg);
4206 case FS_IOC_ENABLE_VERITY:
4207 return f2fs_ioc_enable_verity(filp, arg);
4208 case FS_IOC_MEASURE_VERITY:
4209 return f2fs_ioc_measure_verity(filp, arg);
4210 case FS_IOC_READ_VERITY_METADATA:
4211 return f2fs_ioc_read_verity_metadata(filp, arg);
4212 case FS_IOC_GETFSLABEL:
4213 return f2fs_ioc_getfslabel(filp, arg);
4214 case FS_IOC_SETFSLABEL:
4215 return f2fs_ioc_setfslabel(filp, arg);
4216 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4217 return f2fs_get_compress_blocks(filp, arg);
4218 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4219 return f2fs_release_compress_blocks(filp, arg);
4220 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4221 return f2fs_reserve_compress_blocks(filp, arg);
4222 case F2FS_IOC_SEC_TRIM_FILE:
4223 return f2fs_sec_trim_file(filp, arg);
4224 case F2FS_IOC_GET_COMPRESS_OPTION:
4225 return f2fs_ioc_get_compress_option(filp, arg);
4226 case F2FS_IOC_SET_COMPRESS_OPTION:
4227 return f2fs_ioc_set_compress_option(filp, arg);
4228 case F2FS_IOC_DECOMPRESS_FILE:
4229 return f2fs_ioc_decompress_file(filp, arg);
4230 case F2FS_IOC_COMPRESS_FILE:
4231 return f2fs_ioc_compress_file(filp, arg);
4237 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4239 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4241 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4244 return __f2fs_ioctl(filp, cmd, arg);
4248 * Return %true if the given read or write request should use direct I/O, or
4249 * %false if it should use buffered I/O.
4251 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4252 struct iov_iter *iter)
4256 if (!(iocb->ki_flags & IOCB_DIRECT))
4259 if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
4263 * Direct I/O not aligned to the disk's logical_block_size will be
4264 * attempted, but will fail with -EINVAL.
4266 * f2fs additionally requires that direct I/O be aligned to the
4267 * filesystem block size, which is often a stricter requirement.
4268 * However, f2fs traditionally falls back to buffered I/O on requests
4269 * that are logical_block_size-aligned but not fs-block aligned.
4271 * The below logic implements this behavior.
4273 align = iocb->ki_pos | iov_iter_alignment(iter);
4274 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4275 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4281 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4284 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4286 dec_page_count(sbi, F2FS_DIO_READ);
4289 f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size);
4293 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4294 .end_io = f2fs_dio_read_end_io,
4297 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4299 struct file *file = iocb->ki_filp;
4300 struct inode *inode = file_inode(file);
4301 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4302 struct f2fs_inode_info *fi = F2FS_I(inode);
4303 const loff_t pos = iocb->ki_pos;
4304 const size_t count = iov_iter_count(to);
4305 struct iomap_dio *dio;
4309 return 0; /* skip atime update */
4311 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4313 if (iocb->ki_flags & IOCB_NOWAIT) {
4314 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4319 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4323 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4324 * the higher-level function iomap_dio_rw() in order to ensure that the
4325 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4327 inc_page_count(sbi, F2FS_DIO_READ);
4328 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4329 &f2fs_iomap_dio_read_ops, 0, NULL, 0);
4330 if (IS_ERR_OR_NULL(dio)) {
4331 ret = PTR_ERR_OR_ZERO(dio);
4332 if (ret != -EIOCBQUEUED)
4333 dec_page_count(sbi, F2FS_DIO_READ);
4335 ret = iomap_dio_complete(dio);
4338 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4340 file_accessed(file);
4342 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4346 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4348 struct inode *inode = file_inode(iocb->ki_filp);
4349 const loff_t pos = iocb->ki_pos;
4352 if (!f2fs_is_compress_backend_ready(inode))
4355 if (trace_f2fs_dataread_start_enabled()) {
4356 char *p = f2fs_kmalloc(F2FS_I_SB(inode), PATH_MAX, GFP_KERNEL);
4360 goto skip_read_trace;
4362 path = dentry_path_raw(file_dentry(iocb->ki_filp), p, PATH_MAX);
4365 goto skip_read_trace;
4368 trace_f2fs_dataread_start(inode, pos, iov_iter_count(to),
4369 current->pid, path, current->comm);
4373 if (f2fs_should_use_dio(inode, iocb, to)) {
4374 ret = f2fs_dio_read_iter(iocb, to);
4376 ret = filemap_read(iocb, to, 0);
4378 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4379 APP_BUFFERED_READ_IO, ret);
4381 if (trace_f2fs_dataread_end_enabled())
4382 trace_f2fs_dataread_end(inode, pos, ret);
4386 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4388 struct file *file = iocb->ki_filp;
4389 struct inode *inode = file_inode(file);
4393 if (IS_IMMUTABLE(inode))
4396 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4399 count = generic_write_checks(iocb, from);
4403 err = file_modified(file);
4410 * Preallocate blocks for a write request, if it is possible and helpful to do
4411 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4412 * blocks were preallocated, or a negative errno value if something went
4413 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4414 * requested blocks (not just some of them) have been allocated.
4416 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4419 struct inode *inode = file_inode(iocb->ki_filp);
4420 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4421 const loff_t pos = iocb->ki_pos;
4422 const size_t count = iov_iter_count(iter);
4423 struct f2fs_map_blocks map = {};
4427 /* If it will be an out-of-place direct write, don't bother. */
4428 if (dio && f2fs_lfs_mode(sbi))
4431 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4432 * buffered IO, if DIO meets any holes.
4434 if (dio && i_size_read(inode) &&
4435 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4438 /* No-wait I/O can't allocate blocks. */
4439 if (iocb->ki_flags & IOCB_NOWAIT)
4442 /* If it will be a short write, don't bother. */
4443 if (fault_in_iov_iter_readable(iter, count))
4446 if (f2fs_has_inline_data(inode)) {
4447 /* If the data will fit inline, don't bother. */
4448 if (pos + count <= MAX_INLINE_DATA(inode))
4450 ret = f2fs_convert_inline_inode(inode);
4455 /* Do not preallocate blocks that will be written partially in 4KB. */
4456 map.m_lblk = F2FS_BLK_ALIGN(pos);
4457 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4458 if (map.m_len > map.m_lblk)
4459 map.m_len -= map.m_lblk;
4462 map.m_may_create = true;
4464 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4465 flag = F2FS_GET_BLOCK_PRE_DIO;
4467 map.m_seg_type = NO_CHECK_TYPE;
4468 flag = F2FS_GET_BLOCK_PRE_AIO;
4471 ret = f2fs_map_blocks(inode, &map, 1, flag);
4472 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4473 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4476 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4480 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4481 struct iov_iter *from)
4483 struct file *file = iocb->ki_filp;
4484 struct inode *inode = file_inode(file);
4487 if (iocb->ki_flags & IOCB_NOWAIT)
4490 current->backing_dev_info = inode_to_bdi(inode);
4491 ret = generic_perform_write(iocb, from);
4492 current->backing_dev_info = NULL;
4495 iocb->ki_pos += ret;
4496 f2fs_update_iostat(F2FS_I_SB(inode), inode,
4497 APP_BUFFERED_IO, ret);
4502 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4505 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4507 dec_page_count(sbi, F2FS_DIO_WRITE);
4510 f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
4514 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4515 .end_io = f2fs_dio_write_end_io,
4518 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4519 bool *may_need_sync)
4521 struct file *file = iocb->ki_filp;
4522 struct inode *inode = file_inode(file);
4523 struct f2fs_inode_info *fi = F2FS_I(inode);
4524 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4525 const bool do_opu = f2fs_lfs_mode(sbi);
4526 const loff_t pos = iocb->ki_pos;
4527 const ssize_t count = iov_iter_count(from);
4528 unsigned int dio_flags;
4529 struct iomap_dio *dio;
4532 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4534 if (iocb->ki_flags & IOCB_NOWAIT) {
4535 /* f2fs_convert_inline_inode() and block allocation can block */
4536 if (f2fs_has_inline_data(inode) ||
4537 !f2fs_overwrite_io(inode, pos, count)) {
4542 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4546 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4547 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4552 ret = f2fs_convert_inline_inode(inode);
4556 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4558 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4562 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4563 * the higher-level function iomap_dio_rw() in order to ensure that the
4564 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4566 inc_page_count(sbi, F2FS_DIO_WRITE);
4568 if (pos + count > inode->i_size)
4569 dio_flags |= IOMAP_DIO_FORCE_WAIT;
4570 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4571 &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0);
4572 if (IS_ERR_OR_NULL(dio)) {
4573 ret = PTR_ERR_OR_ZERO(dio);
4574 if (ret == -ENOTBLK)
4576 if (ret != -EIOCBQUEUED)
4577 dec_page_count(sbi, F2FS_DIO_WRITE);
4579 ret = iomap_dio_complete(dio);
4583 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4584 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4588 if (pos + ret > inode->i_size)
4589 f2fs_i_size_write(inode, pos + ret);
4591 set_inode_flag(inode, FI_UPDATE_WRITE);
4593 if (iov_iter_count(from)) {
4595 loff_t bufio_start_pos = iocb->ki_pos;
4598 * The direct write was partial, so we need to fall back to a
4599 * buffered write for the remainder.
4602 ret2 = f2fs_buffered_write_iter(iocb, from);
4603 if (iov_iter_count(from))
4604 f2fs_write_failed(inode, iocb->ki_pos);
4609 * Ensure that the pagecache pages are written to disk and
4610 * invalidated to preserve the expected O_DIRECT semantics.
4613 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4617 ret2 = filemap_write_and_wait_range(file->f_mapping,
4622 invalidate_mapping_pages(file->f_mapping,
4623 bufio_start_pos >> PAGE_SHIFT,
4624 bufio_end_pos >> PAGE_SHIFT);
4627 /* iomap_dio_rw() already handled the generic_write_sync(). */
4628 *may_need_sync = false;
4631 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4635 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4637 struct inode *inode = file_inode(iocb->ki_filp);
4638 const loff_t orig_pos = iocb->ki_pos;
4639 const size_t orig_count = iov_iter_count(from);
4642 bool may_need_sync = true;
4646 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4651 if (!f2fs_is_compress_backend_ready(inode)) {
4656 if (iocb->ki_flags & IOCB_NOWAIT) {
4657 if (!inode_trylock(inode)) {
4665 ret = f2fs_write_checks(iocb, from);
4669 /* Determine whether we will do a direct write or a buffered write. */
4670 dio = f2fs_should_use_dio(inode, iocb, from);
4672 /* Possibly preallocate the blocks for the write. */
4673 target_size = iocb->ki_pos + iov_iter_count(from);
4674 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4675 if (preallocated < 0) {
4678 if (trace_f2fs_datawrite_start_enabled()) {
4679 char *p = f2fs_kmalloc(F2FS_I_SB(inode),
4680 PATH_MAX, GFP_KERNEL);
4684 goto skip_write_trace;
4685 path = dentry_path_raw(file_dentry(iocb->ki_filp),
4689 goto skip_write_trace;
4691 trace_f2fs_datawrite_start(inode, orig_pos, orig_count,
4692 current->pid, path, current->comm);
4696 /* Do the actual write. */
4698 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4699 f2fs_buffered_write_iter(iocb, from);
4701 if (trace_f2fs_datawrite_end_enabled())
4702 trace_f2fs_datawrite_end(inode, orig_pos, ret);
4705 /* Don't leave any preallocated blocks around past i_size. */
4706 if (preallocated && i_size_read(inode) < target_size) {
4707 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4708 filemap_invalidate_lock(inode->i_mapping);
4709 if (!f2fs_truncate(inode))
4710 file_dont_truncate(inode);
4711 filemap_invalidate_unlock(inode->i_mapping);
4712 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4714 file_dont_truncate(inode);
4717 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4719 inode_unlock(inode);
4721 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4722 if (ret > 0 && may_need_sync)
4723 ret = generic_write_sync(iocb, ret);
4727 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4730 struct address_space *mapping;
4731 struct backing_dev_info *bdi;
4732 struct inode *inode = file_inode(filp);
4735 if (advice == POSIX_FADV_SEQUENTIAL) {
4736 if (S_ISFIFO(inode->i_mode))
4739 mapping = filp->f_mapping;
4740 if (!mapping || len < 0)
4743 bdi = inode_to_bdi(mapping->host);
4744 filp->f_ra.ra_pages = bdi->ra_pages *
4745 F2FS_I_SB(inode)->seq_file_ra_mul;
4746 spin_lock(&filp->f_lock);
4747 filp->f_mode &= ~FMODE_RANDOM;
4748 spin_unlock(&filp->f_lock);
4752 err = generic_fadvise(filp, offset, len, advice);
4753 if (!err && advice == POSIX_FADV_DONTNEED &&
4754 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4755 f2fs_compressed_file(inode))
4756 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4761 #ifdef CONFIG_COMPAT
4762 struct compat_f2fs_gc_range {
4767 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4768 struct compat_f2fs_gc_range)
4770 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4772 struct compat_f2fs_gc_range __user *urange;
4773 struct f2fs_gc_range range;
4776 urange = compat_ptr(arg);
4777 err = get_user(range.sync, &urange->sync);
4778 err |= get_user(range.start, &urange->start);
4779 err |= get_user(range.len, &urange->len);
4783 return __f2fs_ioc_gc_range(file, &range);
4786 struct compat_f2fs_move_range {
4792 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4793 struct compat_f2fs_move_range)
4795 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4797 struct compat_f2fs_move_range __user *urange;
4798 struct f2fs_move_range range;
4801 urange = compat_ptr(arg);
4802 err = get_user(range.dst_fd, &urange->dst_fd);
4803 err |= get_user(range.pos_in, &urange->pos_in);
4804 err |= get_user(range.pos_out, &urange->pos_out);
4805 err |= get_user(range.len, &urange->len);
4809 return __f2fs_ioc_move_range(file, &range);
4812 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4814 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4816 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4820 case FS_IOC32_GETVERSION:
4821 cmd = FS_IOC_GETVERSION;
4823 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4824 return f2fs_compat_ioc_gc_range(file, arg);
4825 case F2FS_IOC32_MOVE_RANGE:
4826 return f2fs_compat_ioc_move_range(file, arg);
4827 case F2FS_IOC_START_ATOMIC_WRITE:
4828 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4829 case F2FS_IOC_START_VOLATILE_WRITE:
4830 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4831 case F2FS_IOC_ABORT_ATOMIC_WRITE:
4832 case F2FS_IOC_SHUTDOWN:
4834 case FS_IOC_SET_ENCRYPTION_POLICY:
4835 case FS_IOC_GET_ENCRYPTION_PWSALT:
4836 case FS_IOC_GET_ENCRYPTION_POLICY:
4837 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4838 case FS_IOC_ADD_ENCRYPTION_KEY:
4839 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4840 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4841 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4842 case FS_IOC_GET_ENCRYPTION_NONCE:
4843 case F2FS_IOC_GARBAGE_COLLECT:
4844 case F2FS_IOC_WRITE_CHECKPOINT:
4845 case F2FS_IOC_DEFRAGMENT:
4846 case F2FS_IOC_FLUSH_DEVICE:
4847 case F2FS_IOC_GET_FEATURES:
4848 case F2FS_IOC_GET_PIN_FILE:
4849 case F2FS_IOC_SET_PIN_FILE:
4850 case F2FS_IOC_PRECACHE_EXTENTS:
4851 case F2FS_IOC_RESIZE_FS:
4852 case FS_IOC_ENABLE_VERITY:
4853 case FS_IOC_MEASURE_VERITY:
4854 case FS_IOC_READ_VERITY_METADATA:
4855 case FS_IOC_GETFSLABEL:
4856 case FS_IOC_SETFSLABEL:
4857 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4858 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4859 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4860 case F2FS_IOC_SEC_TRIM_FILE:
4861 case F2FS_IOC_GET_COMPRESS_OPTION:
4862 case F2FS_IOC_SET_COMPRESS_OPTION:
4863 case F2FS_IOC_DECOMPRESS_FILE:
4864 case F2FS_IOC_COMPRESS_FILE:
4867 return -ENOIOCTLCMD;
4869 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4873 const struct file_operations f2fs_file_operations = {
4874 .llseek = f2fs_llseek,
4875 .read_iter = f2fs_file_read_iter,
4876 .write_iter = f2fs_file_write_iter,
4877 .open = f2fs_file_open,
4878 .release = f2fs_release_file,
4879 .mmap = f2fs_file_mmap,
4880 .flush = f2fs_file_flush,
4881 .fsync = f2fs_sync_file,
4882 .fallocate = f2fs_fallocate,
4883 .unlocked_ioctl = f2fs_ioctl,
4884 #ifdef CONFIG_COMPAT
4885 .compat_ioctl = f2fs_compat_ioctl,
4887 .splice_read = generic_file_splice_read,
4888 .splice_write = iter_file_splice_write,
4889 .fadvise = f2fs_file_fadvise,