4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uio.h>
24 #include <linux/uuid.h>
25 #include <linux/file.h>
34 #include <trace/events/f2fs.h>
36 static int f2fs_vm_page_mkwrite(struct vm_fault *vmf)
38 struct page *page = vmf->page;
39 struct inode *inode = file_inode(vmf->vma->vm_file);
40 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
41 struct dnode_of_data dn;
44 sb_start_pagefault(inode->i_sb);
46 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
48 /* block allocation */
50 set_new_dnode(&dn, inode, NULL, NULL, 0);
51 err = f2fs_reserve_block(&dn, page->index);
59 f2fs_balance_fs(sbi, dn.node_changed);
61 file_update_time(vmf->vma->vm_file);
63 if (unlikely(page->mapping != inode->i_mapping ||
64 page_offset(page) > i_size_read(inode) ||
65 !PageUptodate(page))) {
72 * check to see if the page is mapped already (no holes)
74 if (PageMappedToDisk(page))
77 /* page is wholly or partially inside EOF */
78 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
81 offset = i_size_read(inode) & ~PAGE_MASK;
82 zero_user_segment(page, offset, PAGE_SIZE);
85 if (!PageUptodate(page))
86 SetPageUptodate(page);
88 trace_f2fs_vm_page_mkwrite(page, DATA);
91 f2fs_wait_on_page_writeback(page, DATA, false);
93 /* wait for GCed encrypted page writeback */
94 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
95 f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
98 sb_end_pagefault(inode->i_sb);
99 f2fs_update_time(sbi, REQ_TIME);
100 return block_page_mkwrite_return(err);
103 static const struct vm_operations_struct f2fs_file_vm_ops = {
104 .fault = filemap_fault,
105 .map_pages = filemap_map_pages,
106 .page_mkwrite = f2fs_vm_page_mkwrite,
109 static int get_parent_ino(struct inode *inode, nid_t *pino)
111 struct dentry *dentry;
113 inode = igrab(inode);
114 dentry = d_find_any_alias(inode);
119 if (update_dent_inode(inode, inode, &dentry->d_name)) {
124 *pino = parent_ino(dentry);
129 static inline bool need_do_checkpoint(struct inode *inode)
131 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
132 bool need_cp = false;
134 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
136 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
138 else if (file_wrong_pino(inode))
140 else if (!space_for_roll_forward(sbi))
142 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
144 else if (test_opt(sbi, FASTBOOT))
146 else if (sbi->active_logs == 2)
152 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
154 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
156 /* But we need to avoid that there are some inode updates */
157 if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
163 static void try_to_fix_pino(struct inode *inode)
165 struct f2fs_inode_info *fi = F2FS_I(inode);
168 down_write(&fi->i_sem);
169 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
170 get_parent_ino(inode, &pino)) {
171 f2fs_i_pino_write(inode, pino);
172 file_got_pino(inode);
174 up_write(&fi->i_sem);
177 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
178 int datasync, bool atomic)
180 struct inode *inode = file->f_mapping->host;
181 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
182 nid_t ino = inode->i_ino;
184 bool need_cp = false;
185 struct writeback_control wbc = {
186 .sync_mode = WB_SYNC_ALL,
187 .nr_to_write = LONG_MAX,
191 if (unlikely(f2fs_readonly(inode->i_sb)))
194 trace_f2fs_sync_file_enter(inode);
196 /* if fdatasync is triggered, let's do in-place-update */
197 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
198 set_inode_flag(inode, FI_NEED_IPU);
199 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
200 clear_inode_flag(inode, FI_NEED_IPU);
203 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
207 /* if the inode is dirty, let's recover all the time */
208 if (!f2fs_skip_inode_update(inode, datasync)) {
209 f2fs_write_inode(inode, NULL);
214 * if there is no written data, don't waste time to write recovery info.
216 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
217 !exist_written_data(sbi, ino, APPEND_INO)) {
219 /* it may call write_inode just prior to fsync */
220 if (need_inode_page_update(sbi, ino))
223 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
224 exist_written_data(sbi, ino, UPDATE_INO))
230 * Both of fdatasync() and fsync() are able to be recovered from
233 down_read(&F2FS_I(inode)->i_sem);
234 need_cp = need_do_checkpoint(inode);
235 up_read(&F2FS_I(inode)->i_sem);
238 /* all the dirty node pages should be flushed for POR */
239 ret = f2fs_sync_fs(inode->i_sb, 1);
242 * We've secured consistency through sync_fs. Following pino
243 * will be used only for fsynced inodes after checkpoint.
245 try_to_fix_pino(inode);
246 clear_inode_flag(inode, FI_APPEND_WRITE);
247 clear_inode_flag(inode, FI_UPDATE_WRITE);
251 ret = fsync_node_pages(sbi, inode, &wbc, atomic);
255 /* if cp_error was enabled, we should avoid infinite loop */
256 if (unlikely(f2fs_cp_error(sbi))) {
261 if (need_inode_block_update(sbi, ino)) {
262 f2fs_mark_inode_dirty_sync(inode, true);
263 f2fs_write_inode(inode, NULL);
267 ret = wait_on_node_pages_writeback(sbi, ino);
271 /* once recovery info is written, don't need to tack this */
272 remove_ino_entry(sbi, ino, APPEND_INO);
273 clear_inode_flag(inode, FI_APPEND_WRITE);
275 remove_ino_entry(sbi, ino, UPDATE_INO);
276 clear_inode_flag(inode, FI_UPDATE_WRITE);
278 ret = f2fs_issue_flush(sbi);
279 f2fs_update_time(sbi, REQ_TIME);
281 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
282 f2fs_trace_ios(NULL, 1);
286 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
288 return f2fs_do_sync_file(file, start, end, datasync, false);
291 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
292 pgoff_t pgofs, int whence)
297 if (whence != SEEK_DATA)
300 /* find first dirty page index */
301 pagevec_init(&pvec, 0);
302 nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
303 PAGECACHE_TAG_DIRTY, 1);
304 pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
305 pagevec_release(&pvec);
309 static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
314 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
315 (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
319 if (blkaddr == NULL_ADDR)
326 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
328 struct inode *inode = file->f_mapping->host;
329 loff_t maxbytes = inode->i_sb->s_maxbytes;
330 struct dnode_of_data dn;
331 pgoff_t pgofs, end_offset, dirty;
332 loff_t data_ofs = offset;
338 isize = i_size_read(inode);
342 /* handle inline data case */
343 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
344 if (whence == SEEK_HOLE)
349 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
351 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
353 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
354 set_new_dnode(&dn, inode, NULL, NULL, 0);
355 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
356 if (err && err != -ENOENT) {
358 } else if (err == -ENOENT) {
359 /* direct node does not exists */
360 if (whence == SEEK_DATA) {
361 pgofs = get_next_page_offset(&dn, pgofs);
368 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
370 /* find data/hole in dnode block */
371 for (; dn.ofs_in_node < end_offset;
372 dn.ofs_in_node++, pgofs++,
373 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
375 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
377 if (__found_offset(blkaddr, dirty, pgofs, whence)) {
385 if (whence == SEEK_DATA)
388 if (whence == SEEK_HOLE && data_ofs > isize)
391 return vfs_setpos(file, data_ofs, maxbytes);
397 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
399 struct inode *inode = file->f_mapping->host;
400 loff_t maxbytes = inode->i_sb->s_maxbytes;
406 return generic_file_llseek_size(file, offset, whence,
407 maxbytes, i_size_read(inode));
412 return f2fs_seek_block(file, offset, whence);
418 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
420 struct inode *inode = file_inode(file);
423 if (f2fs_encrypted_inode(inode)) {
424 err = fscrypt_get_encryption_info(inode);
427 if (!f2fs_encrypted_inode(inode))
431 /* we don't need to use inline_data strictly */
432 err = f2fs_convert_inline_inode(inode);
437 vma->vm_ops = &f2fs_file_vm_ops;
441 static int f2fs_file_open(struct inode *inode, struct file *filp)
443 int ret = generic_file_open(inode, filp);
446 if (!ret && f2fs_encrypted_inode(inode)) {
447 ret = fscrypt_get_encryption_info(inode);
450 if (!fscrypt_has_encryption_key(inode))
453 dir = dget_parent(file_dentry(filp));
454 if (f2fs_encrypted_inode(d_inode(dir)) &&
455 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
463 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
465 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
466 struct f2fs_node *raw_node;
467 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
470 raw_node = F2FS_NODE(dn->node_page);
471 addr = blkaddr_in_node(raw_node) + ofs;
473 for (; count > 0; count--, addr++, dn->ofs_in_node++) {
474 block_t blkaddr = le32_to_cpu(*addr);
475 if (blkaddr == NULL_ADDR)
478 dn->data_blkaddr = NULL_ADDR;
479 set_data_blkaddr(dn);
480 invalidate_blocks(sbi, blkaddr);
481 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
482 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
489 * once we invalidate valid blkaddr in range [ofs, ofs + count],
490 * we will invalidate all blkaddr in the whole range.
492 fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
494 f2fs_update_extent_cache_range(dn, fofs, 0, len);
495 dec_valid_block_count(sbi, dn->inode, nr_free);
497 dn->ofs_in_node = ofs;
499 f2fs_update_time(sbi, REQ_TIME);
500 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
501 dn->ofs_in_node, nr_free);
505 void truncate_data_blocks(struct dnode_of_data *dn)
507 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
510 static int truncate_partial_data_page(struct inode *inode, u64 from,
513 unsigned offset = from & (PAGE_SIZE - 1);
514 pgoff_t index = from >> PAGE_SHIFT;
515 struct address_space *mapping = inode->i_mapping;
518 if (!offset && !cache_only)
522 page = find_lock_page(mapping, index);
523 if (page && PageUptodate(page))
525 f2fs_put_page(page, 1);
529 page = get_lock_data_page(inode, index, true);
533 f2fs_wait_on_page_writeback(page, DATA, true);
534 zero_user(page, offset, PAGE_SIZE - offset);
535 if (!cache_only || !f2fs_encrypted_inode(inode) ||
536 !S_ISREG(inode->i_mode))
537 set_page_dirty(page);
538 f2fs_put_page(page, 1);
542 int truncate_blocks(struct inode *inode, u64 from, bool lock)
544 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
545 unsigned int blocksize = inode->i_sb->s_blocksize;
546 struct dnode_of_data dn;
548 int count = 0, err = 0;
550 bool truncate_page = false;
552 trace_f2fs_truncate_blocks_enter(inode, from);
554 free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
556 if (free_from >= sbi->max_file_blocks)
562 ipage = get_node_page(sbi, inode->i_ino);
564 err = PTR_ERR(ipage);
568 if (f2fs_has_inline_data(inode)) {
569 truncate_inline_inode(ipage, from);
571 clear_inode_flag(inode, FI_DATA_EXIST);
572 f2fs_put_page(ipage, 1);
573 truncate_page = true;
577 set_new_dnode(&dn, inode, ipage, NULL, 0);
578 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
585 count = ADDRS_PER_PAGE(dn.node_page, inode);
587 count -= dn.ofs_in_node;
588 f2fs_bug_on(sbi, count < 0);
590 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
591 truncate_data_blocks_range(&dn, count);
597 err = truncate_inode_blocks(inode, free_from);
602 /* lastly zero out the first data page */
604 err = truncate_partial_data_page(inode, from, truncate_page);
606 trace_f2fs_truncate_blocks_exit(inode, err);
610 int f2fs_truncate(struct inode *inode)
614 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
615 S_ISLNK(inode->i_mode)))
618 trace_f2fs_truncate(inode);
620 /* we should check inline_data size */
621 if (!f2fs_may_inline_data(inode)) {
622 err = f2fs_convert_inline_inode(inode);
627 err = truncate_blocks(inode, i_size_read(inode), true);
631 inode->i_mtime = inode->i_ctime = current_time(inode);
632 f2fs_mark_inode_dirty_sync(inode, false);
636 int f2fs_getattr(const struct path *path, struct kstat *stat,
637 u32 request_mask, unsigned int flags)
639 struct inode *inode = d_inode(path->dentry);
640 generic_fillattr(inode, stat);
645 #ifdef CONFIG_F2FS_FS_POSIX_ACL
646 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
648 unsigned int ia_valid = attr->ia_valid;
650 if (ia_valid & ATTR_UID)
651 inode->i_uid = attr->ia_uid;
652 if (ia_valid & ATTR_GID)
653 inode->i_gid = attr->ia_gid;
654 if (ia_valid & ATTR_ATIME)
655 inode->i_atime = timespec_trunc(attr->ia_atime,
656 inode->i_sb->s_time_gran);
657 if (ia_valid & ATTR_MTIME)
658 inode->i_mtime = timespec_trunc(attr->ia_mtime,
659 inode->i_sb->s_time_gran);
660 if (ia_valid & ATTR_CTIME)
661 inode->i_ctime = timespec_trunc(attr->ia_ctime,
662 inode->i_sb->s_time_gran);
663 if (ia_valid & ATTR_MODE) {
664 umode_t mode = attr->ia_mode;
666 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
668 set_acl_inode(inode, mode);
672 #define __setattr_copy setattr_copy
675 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
677 struct inode *inode = d_inode(dentry);
679 bool size_changed = false;
681 err = setattr_prepare(dentry, attr);
685 if (attr->ia_valid & ATTR_SIZE) {
686 if (f2fs_encrypted_inode(inode) &&
687 fscrypt_get_encryption_info(inode))
690 if (attr->ia_size <= i_size_read(inode)) {
691 truncate_setsize(inode, attr->ia_size);
692 err = f2fs_truncate(inode);
697 * do not trim all blocks after i_size if target size is
698 * larger than i_size.
700 truncate_setsize(inode, attr->ia_size);
702 /* should convert inline inode here */
703 if (!f2fs_may_inline_data(inode)) {
704 err = f2fs_convert_inline_inode(inode);
708 inode->i_mtime = inode->i_ctime = current_time(inode);
714 __setattr_copy(inode, attr);
716 if (attr->ia_valid & ATTR_MODE) {
717 err = posix_acl_chmod(inode, get_inode_mode(inode));
718 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
719 inode->i_mode = F2FS_I(inode)->i_acl_mode;
720 clear_inode_flag(inode, FI_ACL_MODE);
724 /* file size may changed here */
725 f2fs_mark_inode_dirty_sync(inode, size_changed);
727 /* inode change will produce dirty node pages flushed by checkpoint */
728 f2fs_balance_fs(F2FS_I_SB(inode), true);
733 const struct inode_operations f2fs_file_inode_operations = {
734 .getattr = f2fs_getattr,
735 .setattr = f2fs_setattr,
736 .get_acl = f2fs_get_acl,
737 .set_acl = f2fs_set_acl,
738 #ifdef CONFIG_F2FS_FS_XATTR
739 .listxattr = f2fs_listxattr,
741 .fiemap = f2fs_fiemap,
744 static int fill_zero(struct inode *inode, pgoff_t index,
745 loff_t start, loff_t len)
747 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
753 f2fs_balance_fs(sbi, true);
756 page = get_new_data_page(inode, NULL, index, false);
760 return PTR_ERR(page);
762 f2fs_wait_on_page_writeback(page, DATA, true);
763 zero_user(page, start, len);
764 set_page_dirty(page);
765 f2fs_put_page(page, 1);
769 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
773 while (pg_start < pg_end) {
774 struct dnode_of_data dn;
775 pgoff_t end_offset, count;
777 set_new_dnode(&dn, inode, NULL, NULL, 0);
778 err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
780 if (err == -ENOENT) {
787 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
788 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
790 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
792 truncate_data_blocks_range(&dn, count);
800 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
802 pgoff_t pg_start, pg_end;
803 loff_t off_start, off_end;
806 ret = f2fs_convert_inline_inode(inode);
810 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
811 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
813 off_start = offset & (PAGE_SIZE - 1);
814 off_end = (offset + len) & (PAGE_SIZE - 1);
816 if (pg_start == pg_end) {
817 ret = fill_zero(inode, pg_start, off_start,
818 off_end - off_start);
823 ret = fill_zero(inode, pg_start++, off_start,
824 PAGE_SIZE - off_start);
829 ret = fill_zero(inode, pg_end, 0, off_end);
834 if (pg_start < pg_end) {
835 struct address_space *mapping = inode->i_mapping;
836 loff_t blk_start, blk_end;
837 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
839 f2fs_balance_fs(sbi, true);
841 blk_start = (loff_t)pg_start << PAGE_SHIFT;
842 blk_end = (loff_t)pg_end << PAGE_SHIFT;
843 truncate_inode_pages_range(mapping, blk_start,
847 ret = truncate_hole(inode, pg_start, pg_end);
855 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
856 int *do_replace, pgoff_t off, pgoff_t len)
858 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
859 struct dnode_of_data dn;
863 set_new_dnode(&dn, inode, NULL, NULL, 0);
864 ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
865 if (ret && ret != -ENOENT) {
867 } else if (ret == -ENOENT) {
868 if (dn.max_level == 0)
870 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
876 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
877 dn.ofs_in_node, len);
878 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
879 *blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
880 if (!is_checkpointed_data(sbi, *blkaddr)) {
882 if (test_opt(sbi, LFS)) {
887 /* do not invalidate this block address */
888 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
901 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
902 int *do_replace, pgoff_t off, int len)
904 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
905 struct dnode_of_data dn;
908 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
909 if (*do_replace == 0)
912 set_new_dnode(&dn, inode, NULL, NULL, 0);
913 ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
915 dec_valid_block_count(sbi, inode, 1);
916 invalidate_blocks(sbi, *blkaddr);
918 f2fs_update_data_blkaddr(&dn, *blkaddr);
925 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
926 block_t *blkaddr, int *do_replace,
927 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
929 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
934 if (blkaddr[i] == NULL_ADDR && !full) {
939 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
940 struct dnode_of_data dn;
945 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
946 ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
950 get_node_info(sbi, dn.nid, &ni);
952 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
953 dn.ofs_in_node, len - i);
955 dn.data_blkaddr = datablock_addr(dn.node_page,
957 truncate_data_blocks_range(&dn, 1);
960 f2fs_i_blocks_write(src_inode,
962 f2fs_i_blocks_write(dst_inode,
964 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
965 blkaddr[i], ni.version, true, false);
971 new_size = (dst + i) << PAGE_SHIFT;
972 if (dst_inode->i_size < new_size)
973 f2fs_i_size_write(dst_inode, new_size);
974 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
978 struct page *psrc, *pdst;
980 psrc = get_lock_data_page(src_inode, src + i, true);
982 return PTR_ERR(psrc);
983 pdst = get_new_data_page(dst_inode, NULL, dst + i,
986 f2fs_put_page(psrc, 1);
987 return PTR_ERR(pdst);
989 f2fs_copy_page(psrc, pdst);
990 set_page_dirty(pdst);
991 f2fs_put_page(pdst, 1);
992 f2fs_put_page(psrc, 1);
994 ret = truncate_hole(src_inode, src + i, src + i + 1);
1003 static int __exchange_data_block(struct inode *src_inode,
1004 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1005 pgoff_t len, bool full)
1007 block_t *src_blkaddr;
1013 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1015 src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
1019 do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
1021 kvfree(src_blkaddr);
1025 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1026 do_replace, src, olen);
1030 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1031 do_replace, src, dst, olen, full);
1039 kvfree(src_blkaddr);
1045 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
1046 kvfree(src_blkaddr);
1051 static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
1053 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1054 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1057 f2fs_balance_fs(sbi, true);
1060 f2fs_drop_extent_tree(inode);
1062 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1063 f2fs_unlock_op(sbi);
1067 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1069 pgoff_t pg_start, pg_end;
1073 if (offset + len >= i_size_read(inode))
1076 /* collapse range should be aligned to block size of f2fs. */
1077 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1080 ret = f2fs_convert_inline_inode(inode);
1084 pg_start = offset >> PAGE_SHIFT;
1085 pg_end = (offset + len) >> PAGE_SHIFT;
1087 /* write out all dirty pages from offset */
1088 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1092 truncate_pagecache(inode, offset);
1094 ret = f2fs_do_collapse(inode, pg_start, pg_end);
1098 /* write out all moved pages, if possible */
1099 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1100 truncate_pagecache(inode, offset);
1102 new_size = i_size_read(inode) - len;
1103 truncate_pagecache(inode, new_size);
1105 ret = truncate_blocks(inode, new_size, true);
1107 f2fs_i_size_write(inode, new_size);
1112 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1115 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1116 pgoff_t index = start;
1117 unsigned int ofs_in_node = dn->ofs_in_node;
1121 for (; index < end; index++, dn->ofs_in_node++) {
1122 if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
1126 dn->ofs_in_node = ofs_in_node;
1127 ret = reserve_new_blocks(dn, count);
1131 dn->ofs_in_node = ofs_in_node;
1132 for (index = start; index < end; index++, dn->ofs_in_node++) {
1134 datablock_addr(dn->node_page, dn->ofs_in_node);
1136 * reserve_new_blocks will not guarantee entire block
1139 if (dn->data_blkaddr == NULL_ADDR) {
1143 if (dn->data_blkaddr != NEW_ADDR) {
1144 invalidate_blocks(sbi, dn->data_blkaddr);
1145 dn->data_blkaddr = NEW_ADDR;
1146 set_data_blkaddr(dn);
1150 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1155 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1158 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1159 struct address_space *mapping = inode->i_mapping;
1160 pgoff_t index, pg_start, pg_end;
1161 loff_t new_size = i_size_read(inode);
1162 loff_t off_start, off_end;
1165 ret = inode_newsize_ok(inode, (len + offset));
1169 ret = f2fs_convert_inline_inode(inode);
1173 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1177 truncate_pagecache_range(inode, offset, offset + len - 1);
1179 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1180 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1182 off_start = offset & (PAGE_SIZE - 1);
1183 off_end = (offset + len) & (PAGE_SIZE - 1);
1185 if (pg_start == pg_end) {
1186 ret = fill_zero(inode, pg_start, off_start,
1187 off_end - off_start);
1191 if (offset + len > new_size)
1192 new_size = offset + len;
1193 new_size = max_t(loff_t, new_size, offset + len);
1196 ret = fill_zero(inode, pg_start++, off_start,
1197 PAGE_SIZE - off_start);
1201 new_size = max_t(loff_t, new_size,
1202 (loff_t)pg_start << PAGE_SHIFT);
1205 for (index = pg_start; index < pg_end;) {
1206 struct dnode_of_data dn;
1207 unsigned int end_offset;
1212 set_new_dnode(&dn, inode, NULL, NULL, 0);
1213 ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
1215 f2fs_unlock_op(sbi);
1219 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1220 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1222 ret = f2fs_do_zero_range(&dn, index, end);
1223 f2fs_put_dnode(&dn);
1224 f2fs_unlock_op(sbi);
1226 f2fs_balance_fs(sbi, dn.node_changed);
1232 new_size = max_t(loff_t, new_size,
1233 (loff_t)index << PAGE_SHIFT);
1237 ret = fill_zero(inode, pg_end, 0, off_end);
1241 new_size = max_t(loff_t, new_size, offset + len);
1246 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1247 f2fs_i_size_write(inode, new_size);
1252 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1254 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1255 pgoff_t nr, pg_start, pg_end, delta, idx;
1259 new_size = i_size_read(inode) + len;
1260 if (new_size > inode->i_sb->s_maxbytes)
1263 if (offset >= i_size_read(inode))
1266 /* insert range should be aligned to block size of f2fs. */
1267 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1270 ret = f2fs_convert_inline_inode(inode);
1274 f2fs_balance_fs(sbi, true);
1276 ret = truncate_blocks(inode, i_size_read(inode), true);
1280 /* write out all dirty pages from offset */
1281 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1285 truncate_pagecache(inode, offset);
1287 pg_start = offset >> PAGE_SHIFT;
1288 pg_end = (offset + len) >> PAGE_SHIFT;
1289 delta = pg_end - pg_start;
1290 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1292 while (!ret && idx > pg_start) {
1293 nr = idx - pg_start;
1299 f2fs_drop_extent_tree(inode);
1301 ret = __exchange_data_block(inode, inode, idx,
1302 idx + delta, nr, false);
1303 f2fs_unlock_op(sbi);
1306 /* write out all moved pages, if possible */
1307 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1308 truncate_pagecache(inode, offset);
1311 f2fs_i_size_write(inode, new_size);
1315 static int expand_inode_data(struct inode *inode, loff_t offset,
1316 loff_t len, int mode)
1318 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1319 struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
1321 loff_t new_size = i_size_read(inode);
1325 err = inode_newsize_ok(inode, (len + offset));
1329 err = f2fs_convert_inline_inode(inode);
1333 f2fs_balance_fs(sbi, true);
1335 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1336 off_end = (offset + len) & (PAGE_SIZE - 1);
1338 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1339 map.m_len = pg_end - map.m_lblk;
1343 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1350 last_off = map.m_lblk + map.m_len - 1;
1352 /* update new size to the failed position */
1353 new_size = (last_off == pg_end) ? offset + len:
1354 (loff_t)(last_off + 1) << PAGE_SHIFT;
1356 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1359 if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
1360 f2fs_i_size_write(inode, new_size);
1365 static long f2fs_fallocate(struct file *file, int mode,
1366 loff_t offset, loff_t len)
1368 struct inode *inode = file_inode(file);
1371 /* f2fs only support ->fallocate for regular file */
1372 if (!S_ISREG(inode->i_mode))
1375 if (f2fs_encrypted_inode(inode) &&
1376 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1379 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1380 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1381 FALLOC_FL_INSERT_RANGE))
1386 if (mode & FALLOC_FL_PUNCH_HOLE) {
1387 if (offset >= inode->i_size)
1390 ret = punch_hole(inode, offset, len);
1391 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1392 ret = f2fs_collapse_range(inode, offset, len);
1393 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1394 ret = f2fs_zero_range(inode, offset, len, mode);
1395 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1396 ret = f2fs_insert_range(inode, offset, len);
1398 ret = expand_inode_data(inode, offset, len, mode);
1402 inode->i_mtime = inode->i_ctime = current_time(inode);
1403 f2fs_mark_inode_dirty_sync(inode, false);
1404 if (mode & FALLOC_FL_KEEP_SIZE)
1405 file_set_keep_isize(inode);
1406 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1410 inode_unlock(inode);
1412 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1416 static int f2fs_release_file(struct inode *inode, struct file *filp)
1419 * f2fs_relase_file is called at every close calls. So we should
1420 * not drop any inmemory pages by close called by other process.
1422 if (!(filp->f_mode & FMODE_WRITE) ||
1423 atomic_read(&inode->i_writecount) != 1)
1426 /* some remained atomic pages should discarded */
1427 if (f2fs_is_atomic_file(inode))
1428 drop_inmem_pages(inode);
1429 if (f2fs_is_volatile_file(inode)) {
1430 clear_inode_flag(inode, FI_VOLATILE_FILE);
1431 set_inode_flag(inode, FI_DROP_CACHE);
1432 filemap_fdatawrite(inode->i_mapping);
1433 clear_inode_flag(inode, FI_DROP_CACHE);
1438 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1439 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1441 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
1445 else if (S_ISREG(mode))
1446 return flags & F2FS_REG_FLMASK;
1448 return flags & F2FS_OTHER_FLMASK;
1451 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1453 struct inode *inode = file_inode(filp);
1454 struct f2fs_inode_info *fi = F2FS_I(inode);
1455 unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
1456 return put_user(flags, (int __user *)arg);
1459 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1461 struct inode *inode = file_inode(filp);
1462 struct f2fs_inode_info *fi = F2FS_I(inode);
1464 unsigned int oldflags;
1467 if (!inode_owner_or_capable(inode))
1470 if (get_user(flags, (int __user *)arg))
1473 ret = mnt_want_write_file(filp);
1477 flags = f2fs_mask_flags(inode->i_mode, flags);
1481 oldflags = fi->i_flags;
1483 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
1484 if (!capable(CAP_LINUX_IMMUTABLE)) {
1485 inode_unlock(inode);
1491 flags = flags & FS_FL_USER_MODIFIABLE;
1492 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
1493 fi->i_flags = flags;
1494 inode_unlock(inode);
1496 inode->i_ctime = current_time(inode);
1497 f2fs_set_inode_flags(inode);
1499 mnt_drop_write_file(filp);
1503 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1505 struct inode *inode = file_inode(filp);
1507 return put_user(inode->i_generation, (int __user *)arg);
1510 static int f2fs_ioc_start_atomic_write(struct file *filp)
1512 struct inode *inode = file_inode(filp);
1515 if (!inode_owner_or_capable(inode))
1518 ret = mnt_want_write_file(filp);
1524 if (f2fs_is_atomic_file(inode))
1527 ret = f2fs_convert_inline_inode(inode);
1531 set_inode_flag(inode, FI_ATOMIC_FILE);
1532 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1534 if (!get_dirty_pages(inode))
1537 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1538 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1539 inode->i_ino, get_dirty_pages(inode));
1540 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1542 clear_inode_flag(inode, FI_ATOMIC_FILE);
1544 stat_inc_atomic_write(inode);
1545 stat_update_max_atomic_write(inode);
1546 inode_unlock(inode);
1547 mnt_drop_write_file(filp);
1551 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1553 struct inode *inode = file_inode(filp);
1556 if (!inode_owner_or_capable(inode))
1559 ret = mnt_want_write_file(filp);
1565 if (f2fs_is_volatile_file(inode))
1568 if (f2fs_is_atomic_file(inode)) {
1569 ret = commit_inmem_pages(inode);
1573 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1575 clear_inode_flag(inode, FI_ATOMIC_FILE);
1576 stat_dec_atomic_write(inode);
1579 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1582 inode_unlock(inode);
1583 mnt_drop_write_file(filp);
1587 static int f2fs_ioc_start_volatile_write(struct file *filp)
1589 struct inode *inode = file_inode(filp);
1592 if (!inode_owner_or_capable(inode))
1595 ret = mnt_want_write_file(filp);
1601 if (f2fs_is_volatile_file(inode))
1604 ret = f2fs_convert_inline_inode(inode);
1608 set_inode_flag(inode, FI_VOLATILE_FILE);
1609 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1611 inode_unlock(inode);
1612 mnt_drop_write_file(filp);
1616 static int f2fs_ioc_release_volatile_write(struct file *filp)
1618 struct inode *inode = file_inode(filp);
1621 if (!inode_owner_or_capable(inode))
1624 ret = mnt_want_write_file(filp);
1630 if (!f2fs_is_volatile_file(inode))
1633 if (!f2fs_is_first_block_written(inode)) {
1634 ret = truncate_partial_data_page(inode, 0, true);
1638 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
1640 inode_unlock(inode);
1641 mnt_drop_write_file(filp);
1645 static int f2fs_ioc_abort_volatile_write(struct file *filp)
1647 struct inode *inode = file_inode(filp);
1650 if (!inode_owner_or_capable(inode))
1653 ret = mnt_want_write_file(filp);
1659 if (f2fs_is_atomic_file(inode))
1660 drop_inmem_pages(inode);
1661 if (f2fs_is_volatile_file(inode)) {
1662 clear_inode_flag(inode, FI_VOLATILE_FILE);
1663 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1666 inode_unlock(inode);
1668 mnt_drop_write_file(filp);
1669 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1673 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1675 struct inode *inode = file_inode(filp);
1676 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1677 struct super_block *sb = sbi->sb;
1681 if (!capable(CAP_SYS_ADMIN))
1684 if (get_user(in, (__u32 __user *)arg))
1687 ret = mnt_want_write_file(filp);
1692 case F2FS_GOING_DOWN_FULLSYNC:
1693 sb = freeze_bdev(sb->s_bdev);
1694 if (sb && !IS_ERR(sb)) {
1695 f2fs_stop_checkpoint(sbi, false);
1696 thaw_bdev(sb->s_bdev, sb);
1699 case F2FS_GOING_DOWN_METASYNC:
1700 /* do checkpoint only */
1701 f2fs_sync_fs(sb, 1);
1702 f2fs_stop_checkpoint(sbi, false);
1704 case F2FS_GOING_DOWN_NOSYNC:
1705 f2fs_stop_checkpoint(sbi, false);
1707 case F2FS_GOING_DOWN_METAFLUSH:
1708 sync_meta_pages(sbi, META, LONG_MAX);
1709 f2fs_stop_checkpoint(sbi, false);
1715 f2fs_update_time(sbi, REQ_TIME);
1717 mnt_drop_write_file(filp);
1721 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1723 struct inode *inode = file_inode(filp);
1724 struct super_block *sb = inode->i_sb;
1725 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1726 struct fstrim_range range;
1729 if (!capable(CAP_SYS_ADMIN))
1732 if (!blk_queue_discard(q))
1735 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1739 ret = mnt_want_write_file(filp);
1743 range.minlen = max((unsigned int)range.minlen,
1744 q->limits.discard_granularity);
1745 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
1746 mnt_drop_write_file(filp);
1750 if (copy_to_user((struct fstrim_range __user *)arg, &range,
1753 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1757 static bool uuid_is_nonzero(__u8 u[16])
1761 for (i = 0; i < 16; i++)
1767 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
1769 struct inode *inode = file_inode(filp);
1771 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1773 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
1776 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
1778 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
1781 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
1783 struct inode *inode = file_inode(filp);
1784 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1787 if (!f2fs_sb_has_crypto(inode->i_sb))
1790 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
1793 err = mnt_want_write_file(filp);
1797 /* update superblock with uuid */
1798 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
1800 err = f2fs_commit_super(sbi, false);
1803 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
1804 mnt_drop_write_file(filp);
1807 mnt_drop_write_file(filp);
1809 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
1815 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
1817 struct inode *inode = file_inode(filp);
1818 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1822 if (!capable(CAP_SYS_ADMIN))
1825 if (get_user(sync, (__u32 __user *)arg))
1828 if (f2fs_readonly(sbi->sb))
1831 ret = mnt_want_write_file(filp);
1836 if (!mutex_trylock(&sbi->gc_mutex)) {
1841 mutex_lock(&sbi->gc_mutex);
1844 ret = f2fs_gc(sbi, sync, true);
1846 mnt_drop_write_file(filp);
1850 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
1852 struct inode *inode = file_inode(filp);
1853 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1856 if (!capable(CAP_SYS_ADMIN))
1859 if (f2fs_readonly(sbi->sb))
1862 ret = mnt_want_write_file(filp);
1866 ret = f2fs_sync_fs(sbi->sb, 1);
1868 mnt_drop_write_file(filp);
1872 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
1874 struct f2fs_defragment *range)
1876 struct inode *inode = file_inode(filp);
1877 struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
1878 struct extent_info ei = {0,0,0};
1879 pgoff_t pg_start, pg_end;
1880 unsigned int blk_per_seg = sbi->blocks_per_seg;
1881 unsigned int total = 0, sec_num;
1882 unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
1883 block_t blk_end = 0;
1884 bool fragmented = false;
1887 /* if in-place-update policy is enabled, don't waste time here */
1888 if (need_inplace_update(inode))
1891 pg_start = range->start >> PAGE_SHIFT;
1892 pg_end = (range->start + range->len) >> PAGE_SHIFT;
1894 f2fs_balance_fs(sbi, true);
1898 /* writeback all dirty pages in the range */
1899 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
1900 range->start + range->len - 1);
1905 * lookup mapping info in extent cache, skip defragmenting if physical
1906 * block addresses are continuous.
1908 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
1909 if (ei.fofs + ei.len >= pg_end)
1913 map.m_lblk = pg_start;
1916 * lookup mapping info in dnode page cache, skip defragmenting if all
1917 * physical block addresses are continuous even if there are hole(s)
1918 * in logical blocks.
1920 while (map.m_lblk < pg_end) {
1921 map.m_len = pg_end - map.m_lblk;
1922 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1926 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1931 if (blk_end && blk_end != map.m_pblk) {
1935 blk_end = map.m_pblk + map.m_len;
1937 map.m_lblk += map.m_len;
1943 map.m_lblk = pg_start;
1944 map.m_len = pg_end - pg_start;
1946 sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
1949 * make sure there are enough free section for LFS allocation, this can
1950 * avoid defragment running in SSR mode when free section are allocated
1953 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
1958 while (map.m_lblk < pg_end) {
1963 map.m_len = pg_end - map.m_lblk;
1964 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
1968 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1973 set_inode_flag(inode, FI_DO_DEFRAG);
1976 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
1979 page = get_lock_data_page(inode, idx, true);
1981 err = PTR_ERR(page);
1985 set_page_dirty(page);
1986 f2fs_put_page(page, 1);
1995 if (idx < pg_end && cnt < blk_per_seg)
1998 clear_inode_flag(inode, FI_DO_DEFRAG);
2000 err = filemap_fdatawrite(inode->i_mapping);
2005 clear_inode_flag(inode, FI_DO_DEFRAG);
2007 inode_unlock(inode);
2009 range->len = (u64)total << PAGE_SHIFT;
2013 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2015 struct inode *inode = file_inode(filp);
2016 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2017 struct f2fs_defragment range;
2020 if (!capable(CAP_SYS_ADMIN))
2023 if (!S_ISREG(inode->i_mode))
2026 err = mnt_want_write_file(filp);
2030 if (f2fs_readonly(sbi->sb)) {
2035 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2041 /* verify alignment of offset & size */
2042 if (range.start & (F2FS_BLKSIZE - 1) ||
2043 range.len & (F2FS_BLKSIZE - 1)) {
2048 err = f2fs_defragment_range(sbi, filp, &range);
2049 f2fs_update_time(sbi, REQ_TIME);
2053 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2057 mnt_drop_write_file(filp);
2061 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2062 struct file *file_out, loff_t pos_out, size_t len)
2064 struct inode *src = file_inode(file_in);
2065 struct inode *dst = file_inode(file_out);
2066 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2067 size_t olen = len, dst_max_i_size = 0;
2071 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2072 src->i_sb != dst->i_sb)
2075 if (unlikely(f2fs_readonly(src->i_sb)))
2078 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2081 if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
2085 if (pos_in == pos_out)
2087 if (pos_out > pos_in && pos_out < pos_in + len)
2093 if (!inode_trylock(dst)) {
2100 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2103 olen = len = src->i_size - pos_in;
2104 if (pos_in + len == src->i_size)
2105 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2111 dst_osize = dst->i_size;
2112 if (pos_out + olen > dst->i_size)
2113 dst_max_i_size = pos_out + olen;
2115 /* verify the end result is block aligned */
2116 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2117 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2118 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2121 ret = f2fs_convert_inline_inode(src);
2125 ret = f2fs_convert_inline_inode(dst);
2129 /* write out all dirty pages from offset */
2130 ret = filemap_write_and_wait_range(src->i_mapping,
2131 pos_in, pos_in + len);
2135 ret = filemap_write_and_wait_range(dst->i_mapping,
2136 pos_out, pos_out + len);
2140 f2fs_balance_fs(sbi, true);
2142 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2143 pos_out >> F2FS_BLKSIZE_BITS,
2144 len >> F2FS_BLKSIZE_BITS, false);
2148 f2fs_i_size_write(dst, dst_max_i_size);
2149 else if (dst_osize != dst->i_size)
2150 f2fs_i_size_write(dst, dst_osize);
2152 f2fs_unlock_op(sbi);
2161 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2163 struct f2fs_move_range range;
2167 if (!(filp->f_mode & FMODE_READ) ||
2168 !(filp->f_mode & FMODE_WRITE))
2171 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2175 dst = fdget(range.dst_fd);
2179 if (!(dst.file->f_mode & FMODE_WRITE)) {
2184 err = mnt_want_write_file(filp);
2188 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2189 range.pos_out, range.len);
2191 mnt_drop_write_file(filp);
2193 if (copy_to_user((struct f2fs_move_range __user *)arg,
2194 &range, sizeof(range)))
2201 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2204 case F2FS_IOC_GETFLAGS:
2205 return f2fs_ioc_getflags(filp, arg);
2206 case F2FS_IOC_SETFLAGS:
2207 return f2fs_ioc_setflags(filp, arg);
2208 case F2FS_IOC_GETVERSION:
2209 return f2fs_ioc_getversion(filp, arg);
2210 case F2FS_IOC_START_ATOMIC_WRITE:
2211 return f2fs_ioc_start_atomic_write(filp);
2212 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
2213 return f2fs_ioc_commit_atomic_write(filp);
2214 case F2FS_IOC_START_VOLATILE_WRITE:
2215 return f2fs_ioc_start_volatile_write(filp);
2216 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
2217 return f2fs_ioc_release_volatile_write(filp);
2218 case F2FS_IOC_ABORT_VOLATILE_WRITE:
2219 return f2fs_ioc_abort_volatile_write(filp);
2220 case F2FS_IOC_SHUTDOWN:
2221 return f2fs_ioc_shutdown(filp, arg);
2223 return f2fs_ioc_fitrim(filp, arg);
2224 case F2FS_IOC_SET_ENCRYPTION_POLICY:
2225 return f2fs_ioc_set_encryption_policy(filp, arg);
2226 case F2FS_IOC_GET_ENCRYPTION_POLICY:
2227 return f2fs_ioc_get_encryption_policy(filp, arg);
2228 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
2229 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
2230 case F2FS_IOC_GARBAGE_COLLECT:
2231 return f2fs_ioc_gc(filp, arg);
2232 case F2FS_IOC_WRITE_CHECKPOINT:
2233 return f2fs_ioc_write_checkpoint(filp, arg);
2234 case F2FS_IOC_DEFRAGMENT:
2235 return f2fs_ioc_defragment(filp, arg);
2236 case F2FS_IOC_MOVE_RANGE:
2237 return f2fs_ioc_move_range(filp, arg);
2243 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2245 struct file *file = iocb->ki_filp;
2246 struct inode *inode = file_inode(file);
2247 struct blk_plug plug;
2250 if (f2fs_encrypted_inode(inode) &&
2251 !fscrypt_has_encryption_key(inode) &&
2252 fscrypt_get_encryption_info(inode))
2256 ret = generic_write_checks(iocb, from);
2260 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
2261 set_inode_flag(inode, FI_NO_PREALLOC);
2263 err = f2fs_preallocate_blocks(iocb, from);
2265 inode_unlock(inode);
2268 blk_start_plug(&plug);
2269 ret = __generic_file_write_iter(iocb, from);
2270 blk_finish_plug(&plug);
2271 clear_inode_flag(inode, FI_NO_PREALLOC);
2273 inode_unlock(inode);
2276 ret = generic_write_sync(iocb, ret);
2280 #ifdef CONFIG_COMPAT
2281 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2284 case F2FS_IOC32_GETFLAGS:
2285 cmd = F2FS_IOC_GETFLAGS;
2287 case F2FS_IOC32_SETFLAGS:
2288 cmd = F2FS_IOC_SETFLAGS;
2290 case F2FS_IOC32_GETVERSION:
2291 cmd = F2FS_IOC_GETVERSION;
2293 case F2FS_IOC_START_ATOMIC_WRITE:
2294 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
2295 case F2FS_IOC_START_VOLATILE_WRITE:
2296 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
2297 case F2FS_IOC_ABORT_VOLATILE_WRITE:
2298 case F2FS_IOC_SHUTDOWN:
2299 case F2FS_IOC_SET_ENCRYPTION_POLICY:
2300 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
2301 case F2FS_IOC_GET_ENCRYPTION_POLICY:
2302 case F2FS_IOC_GARBAGE_COLLECT:
2303 case F2FS_IOC_WRITE_CHECKPOINT:
2304 case F2FS_IOC_DEFRAGMENT:
2306 case F2FS_IOC_MOVE_RANGE:
2309 return -ENOIOCTLCMD;
2311 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
2315 const struct file_operations f2fs_file_operations = {
2316 .llseek = f2fs_llseek,
2317 .read_iter = generic_file_read_iter,
2318 .write_iter = f2fs_file_write_iter,
2319 .open = f2fs_file_open,
2320 .release = f2fs_release_file,
2321 .mmap = f2fs_file_mmap,
2322 .fsync = f2fs_sync_file,
2323 .fallocate = f2fs_fallocate,
2324 .unlocked_ioctl = f2fs_ioctl,
2325 #ifdef CONFIG_COMPAT
2326 .compat_ioctl = f2fs_compat_ioctl,
2328 .splice_read = generic_file_splice_read,
2329 .splice_write = iter_file_splice_write,