1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
31 #include <trace/events/f2fs.h>
33 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
35 struct inode *inode = file_inode(vmf->vma->vm_file);
38 down_read(&F2FS_I(inode)->i_mmap_sem);
39 ret = filemap_fault(vmf);
40 up_read(&F2FS_I(inode)->i_mmap_sem);
45 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
47 struct page *page = vmf->page;
48 struct inode *inode = file_inode(vmf->vma->vm_file);
49 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
50 struct dnode_of_data dn = { .node_changed = false };
53 if (unlikely(f2fs_cp_error(sbi))) {
58 sb_start_pagefault(inode->i_sb);
60 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
62 file_update_time(vmf->vma->vm_file);
63 down_read(&F2FS_I(inode)->i_mmap_sem);
65 if (unlikely(page->mapping != inode->i_mapping ||
66 page_offset(page) > i_size_read(inode) ||
67 !PageUptodate(page))) {
73 /* block allocation */
74 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
75 set_new_dnode(&dn, inode, NULL, NULL, 0);
76 err = f2fs_get_block(&dn, page->index);
78 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
85 f2fs_wait_on_page_writeback(page, DATA, false, true);
87 /* wait for GCed page writeback via META_MAPPING */
88 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
91 * check to see if the page is mapped already (no holes)
93 if (PageMappedToDisk(page))
96 /* page is wholly or partially inside EOF */
97 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
101 offset = i_size_read(inode) & ~PAGE_MASK;
102 zero_user_segment(page, offset, PAGE_SIZE);
104 set_page_dirty(page);
105 if (!PageUptodate(page))
106 SetPageUptodate(page);
108 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
109 f2fs_update_time(sbi, REQ_TIME);
111 trace_f2fs_vm_page_mkwrite(page, DATA);
113 up_read(&F2FS_I(inode)->i_mmap_sem);
115 f2fs_balance_fs(sbi, dn.node_changed);
117 sb_end_pagefault(inode->i_sb);
119 return block_page_mkwrite_return(err);
122 static const struct vm_operations_struct f2fs_file_vm_ops = {
123 .fault = f2fs_filemap_fault,
124 .map_pages = filemap_map_pages,
125 .page_mkwrite = f2fs_vm_page_mkwrite,
128 static int get_parent_ino(struct inode *inode, nid_t *pino)
130 struct dentry *dentry;
132 inode = igrab(inode);
133 dentry = d_find_any_alias(inode);
138 *pino = parent_ino(dentry);
143 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
145 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
146 enum cp_reason_type cp_reason = CP_NO_NEEDED;
148 if (!S_ISREG(inode->i_mode))
149 cp_reason = CP_NON_REGULAR;
150 else if (inode->i_nlink != 1)
151 cp_reason = CP_HARDLINK;
152 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
153 cp_reason = CP_SB_NEED_CP;
154 else if (file_wrong_pino(inode))
155 cp_reason = CP_WRONG_PINO;
156 else if (!f2fs_space_for_roll_forward(sbi))
157 cp_reason = CP_NO_SPC_ROLL;
158 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
159 cp_reason = CP_NODE_NEED_CP;
160 else if (test_opt(sbi, FASTBOOT))
161 cp_reason = CP_FASTBOOT_MODE;
162 else if (F2FS_OPTION(sbi).active_logs == 2)
163 cp_reason = CP_SPEC_LOG_NUM;
164 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
165 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
166 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
168 cp_reason = CP_RECOVER_DIR;
173 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
175 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
177 /* But we need to avoid that there are some inode updates */
178 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
184 static void try_to_fix_pino(struct inode *inode)
186 struct f2fs_inode_info *fi = F2FS_I(inode);
189 down_write(&fi->i_sem);
190 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
191 get_parent_ino(inode, &pino)) {
192 f2fs_i_pino_write(inode, pino);
193 file_got_pino(inode);
195 up_write(&fi->i_sem);
198 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
199 int datasync, bool atomic)
201 struct inode *inode = file->f_mapping->host;
202 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
203 nid_t ino = inode->i_ino;
205 enum cp_reason_type cp_reason = 0;
206 struct writeback_control wbc = {
207 .sync_mode = WB_SYNC_ALL,
208 .nr_to_write = LONG_MAX,
211 unsigned int seq_id = 0;
213 if (unlikely(f2fs_readonly(inode->i_sb) ||
214 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
217 trace_f2fs_sync_file_enter(inode);
219 if (S_ISDIR(inode->i_mode))
222 /* if fdatasync is triggered, let's do in-place-update */
223 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
224 set_inode_flag(inode, FI_NEED_IPU);
225 ret = file_write_and_wait_range(file, start, end);
226 clear_inode_flag(inode, FI_NEED_IPU);
229 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
233 /* if the inode is dirty, let's recover all the time */
234 if (!f2fs_skip_inode_update(inode, datasync)) {
235 f2fs_write_inode(inode, NULL);
240 * if there is no written data, don't waste time to write recovery info.
242 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
243 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
245 /* it may call write_inode just prior to fsync */
246 if (need_inode_page_update(sbi, ino))
249 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
250 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
256 * Both of fdatasync() and fsync() are able to be recovered from
259 down_read(&F2FS_I(inode)->i_sem);
260 cp_reason = need_do_checkpoint(inode);
261 up_read(&F2FS_I(inode)->i_sem);
264 /* all the dirty node pages should be flushed for POR */
265 ret = f2fs_sync_fs(inode->i_sb, 1);
268 * We've secured consistency through sync_fs. Following pino
269 * will be used only for fsynced inodes after checkpoint.
271 try_to_fix_pino(inode);
272 clear_inode_flag(inode, FI_APPEND_WRITE);
273 clear_inode_flag(inode, FI_UPDATE_WRITE);
277 atomic_inc(&sbi->wb_sync_req[NODE]);
278 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
279 atomic_dec(&sbi->wb_sync_req[NODE]);
283 /* if cp_error was enabled, we should avoid infinite loop */
284 if (unlikely(f2fs_cp_error(sbi))) {
289 if (f2fs_need_inode_block_update(sbi, ino)) {
290 f2fs_mark_inode_dirty_sync(inode, true);
291 f2fs_write_inode(inode, NULL);
296 * If it's atomic_write, it's just fine to keep write ordering. So
297 * here we don't need to wait for node write completion, since we use
298 * node chain which serializes node blocks. If one of node writes are
299 * reordered, we can see simply broken chain, resulting in stopping
300 * roll-forward recovery. It means we'll recover all or none node blocks
304 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
309 /* once recovery info is written, don't need to tack this */
310 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
311 clear_inode_flag(inode, FI_APPEND_WRITE);
313 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
314 ret = f2fs_issue_flush(sbi, inode->i_ino);
316 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
317 clear_inode_flag(inode, FI_UPDATE_WRITE);
318 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
320 f2fs_update_time(sbi, REQ_TIME);
322 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
323 f2fs_trace_ios(NULL, 1);
327 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
329 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
331 return f2fs_do_sync_file(file, start, end, datasync, false);
334 static pgoff_t __get_first_dirty_index(struct address_space *mapping,
335 pgoff_t pgofs, int whence)
340 if (whence != SEEK_DATA)
343 /* find first dirty page index */
344 nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY,
353 static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
354 pgoff_t dirty, pgoff_t pgofs, int whence)
358 if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
359 is_valid_data_blkaddr(sbi, blkaddr))
363 if (blkaddr == NULL_ADDR)
370 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
372 struct inode *inode = file->f_mapping->host;
373 loff_t maxbytes = inode->i_sb->s_maxbytes;
374 struct dnode_of_data dn;
375 pgoff_t pgofs, end_offset, dirty;
376 loff_t data_ofs = offset;
382 isize = i_size_read(inode);
386 /* handle inline data case */
387 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
388 if (whence == SEEK_HOLE)
393 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
395 dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
397 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
398 set_new_dnode(&dn, inode, NULL, NULL, 0);
399 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
400 if (err && err != -ENOENT) {
402 } else if (err == -ENOENT) {
403 /* direct node does not exists */
404 if (whence == SEEK_DATA) {
405 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
412 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
414 /* find data/hole in dnode block */
415 for (; dn.ofs_in_node < end_offset;
416 dn.ofs_in_node++, pgofs++,
417 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
420 blkaddr = datablock_addr(dn.inode,
421 dn.node_page, dn.ofs_in_node);
423 if (__is_valid_data_blkaddr(blkaddr) &&
424 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
425 blkaddr, DATA_GENERIC)) {
430 if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
439 if (whence == SEEK_DATA)
442 if (whence == SEEK_HOLE && data_ofs > isize)
445 return vfs_setpos(file, data_ofs, maxbytes);
451 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
453 struct inode *inode = file->f_mapping->host;
454 loff_t maxbytes = inode->i_sb->s_maxbytes;
460 return generic_file_llseek_size(file, offset, whence,
461 maxbytes, i_size_read(inode));
466 return f2fs_seek_block(file, offset, whence);
472 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
474 struct inode *inode = file_inode(file);
477 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
480 /* we don't need to use inline_data strictly */
481 err = f2fs_convert_inline_inode(inode);
486 vma->vm_ops = &f2fs_file_vm_ops;
490 static int f2fs_file_open(struct inode *inode, struct file *filp)
492 int err = fscrypt_file_open(inode, filp);
497 filp->f_mode |= FMODE_NOWAIT;
499 return dquot_file_open(inode, filp);
502 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
504 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
505 struct f2fs_node *raw_node;
506 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
510 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
511 base = get_extra_isize(dn->inode);
513 raw_node = F2FS_NODE(dn->node_page);
514 addr = blkaddr_in_node(raw_node) + base + ofs;
516 for (; count > 0; count--, addr++, dn->ofs_in_node++) {
517 block_t blkaddr = le32_to_cpu(*addr);
519 if (blkaddr == NULL_ADDR)
522 dn->data_blkaddr = NULL_ADDR;
523 f2fs_set_data_blkaddr(dn);
525 if (__is_valid_data_blkaddr(blkaddr) &&
526 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
529 f2fs_invalidate_blocks(sbi, blkaddr);
530 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
531 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
538 * once we invalidate valid blkaddr in range [ofs, ofs + count],
539 * we will invalidate all blkaddr in the whole range.
541 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
543 f2fs_update_extent_cache_range(dn, fofs, 0, len);
544 dec_valid_block_count(sbi, dn->inode, nr_free);
546 dn->ofs_in_node = ofs;
548 f2fs_update_time(sbi, REQ_TIME);
549 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
550 dn->ofs_in_node, nr_free);
553 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
555 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
558 static int truncate_partial_data_page(struct inode *inode, u64 from,
561 loff_t offset = from & (PAGE_SIZE - 1);
562 pgoff_t index = from >> PAGE_SHIFT;
563 struct address_space *mapping = inode->i_mapping;
566 if (!offset && !cache_only)
570 page = find_lock_page(mapping, index);
571 if (page && PageUptodate(page))
573 f2fs_put_page(page, 1);
577 page = f2fs_get_lock_data_page(inode, index, true);
579 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
581 f2fs_wait_on_page_writeback(page, DATA, true, true);
582 zero_user(page, offset, PAGE_SIZE - offset);
584 /* An encrypted inode should have a key and truncate the last page. */
585 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
587 set_page_dirty(page);
588 f2fs_put_page(page, 1);
592 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
595 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
596 struct dnode_of_data dn;
598 int count = 0, err = 0;
600 bool truncate_page = false;
601 int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
603 trace_f2fs_truncate_blocks_enter(inode, from);
605 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
607 if (free_from >= sbi->max_file_blocks)
611 __do_map_lock(sbi, flag, true);
613 ipage = f2fs_get_node_page(sbi, inode->i_ino);
615 err = PTR_ERR(ipage);
619 if (f2fs_has_inline_data(inode)) {
620 f2fs_truncate_inline_inode(inode, ipage, from);
621 f2fs_put_page(ipage, 1);
622 truncate_page = true;
626 set_new_dnode(&dn, inode, ipage, NULL, 0);
627 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
634 count = ADDRS_PER_PAGE(dn.node_page, inode);
636 count -= dn.ofs_in_node;
637 f2fs_bug_on(sbi, count < 0);
639 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
640 f2fs_truncate_data_blocks_range(&dn, count);
646 err = f2fs_truncate_inode_blocks(inode, free_from);
649 __do_map_lock(sbi, flag, false);
651 /* lastly zero out the first data page */
653 err = truncate_partial_data_page(inode, from, truncate_page);
655 trace_f2fs_truncate_blocks_exit(inode, err);
659 int f2fs_truncate(struct inode *inode)
663 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
666 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
667 S_ISLNK(inode->i_mode)))
670 trace_f2fs_truncate(inode);
672 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
673 f2fs_show_injection_info(FAULT_TRUNCATE);
677 /* we should check inline_data size */
678 if (!f2fs_may_inline_data(inode)) {
679 err = f2fs_convert_inline_inode(inode);
684 err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
688 inode->i_mtime = inode->i_ctime = current_time(inode);
689 f2fs_mark_inode_dirty_sync(inode, false);
693 int f2fs_getattr(const struct path *path, struct kstat *stat,
694 u32 request_mask, unsigned int query_flags)
696 struct inode *inode = d_inode(path->dentry);
697 struct f2fs_inode_info *fi = F2FS_I(inode);
698 struct f2fs_inode *ri;
701 if (f2fs_has_extra_attr(inode) &&
702 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
703 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
704 stat->result_mask |= STATX_BTIME;
705 stat->btime.tv_sec = fi->i_crtime.tv_sec;
706 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
709 flags = fi->i_flags & F2FS_FL_USER_VISIBLE;
710 if (flags & F2FS_APPEND_FL)
711 stat->attributes |= STATX_ATTR_APPEND;
712 if (flags & F2FS_COMPR_FL)
713 stat->attributes |= STATX_ATTR_COMPRESSED;
714 if (IS_ENCRYPTED(inode))
715 stat->attributes |= STATX_ATTR_ENCRYPTED;
716 if (flags & F2FS_IMMUTABLE_FL)
717 stat->attributes |= STATX_ATTR_IMMUTABLE;
718 if (flags & F2FS_NODUMP_FL)
719 stat->attributes |= STATX_ATTR_NODUMP;
721 stat->attributes_mask |= (STATX_ATTR_APPEND |
722 STATX_ATTR_COMPRESSED |
723 STATX_ATTR_ENCRYPTED |
724 STATX_ATTR_IMMUTABLE |
727 generic_fillattr(inode, stat);
729 /* we need to show initial sectors used for inline_data/dentries */
730 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
731 f2fs_has_inline_dentry(inode))
732 stat->blocks += (stat->size + 511) >> 9;
737 #ifdef CONFIG_F2FS_FS_POSIX_ACL
738 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
740 unsigned int ia_valid = attr->ia_valid;
742 if (ia_valid & ATTR_UID)
743 inode->i_uid = attr->ia_uid;
744 if (ia_valid & ATTR_GID)
745 inode->i_gid = attr->ia_gid;
746 if (ia_valid & ATTR_ATIME)
747 inode->i_atime = timespec64_trunc(attr->ia_atime,
748 inode->i_sb->s_time_gran);
749 if (ia_valid & ATTR_MTIME)
750 inode->i_mtime = timespec64_trunc(attr->ia_mtime,
751 inode->i_sb->s_time_gran);
752 if (ia_valid & ATTR_CTIME)
753 inode->i_ctime = timespec64_trunc(attr->ia_ctime,
754 inode->i_sb->s_time_gran);
755 if (ia_valid & ATTR_MODE) {
756 umode_t mode = attr->ia_mode;
758 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
760 set_acl_inode(inode, mode);
764 #define __setattr_copy setattr_copy
767 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
769 struct inode *inode = d_inode(dentry);
771 bool size_changed = false;
773 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
776 err = setattr_prepare(dentry, attr);
780 err = fscrypt_prepare_setattr(dentry, attr);
784 if (is_quota_modification(inode, attr)) {
785 err = dquot_initialize(inode);
789 if ((attr->ia_valid & ATTR_UID &&
790 !uid_eq(attr->ia_uid, inode->i_uid)) ||
791 (attr->ia_valid & ATTR_GID &&
792 !gid_eq(attr->ia_gid, inode->i_gid))) {
793 f2fs_lock_op(F2FS_I_SB(inode));
794 err = dquot_transfer(inode, attr);
796 set_sbi_flag(F2FS_I_SB(inode),
797 SBI_QUOTA_NEED_REPAIR);
798 f2fs_unlock_op(F2FS_I_SB(inode));
802 * update uid/gid under lock_op(), so that dquot and inode can
803 * be updated atomically.
805 if (attr->ia_valid & ATTR_UID)
806 inode->i_uid = attr->ia_uid;
807 if (attr->ia_valid & ATTR_GID)
808 inode->i_gid = attr->ia_gid;
809 f2fs_mark_inode_dirty_sync(inode, true);
810 f2fs_unlock_op(F2FS_I_SB(inode));
813 if (attr->ia_valid & ATTR_SIZE) {
814 bool to_smaller = (attr->ia_size <= i_size_read(inode));
816 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
817 down_write(&F2FS_I(inode)->i_mmap_sem);
819 truncate_setsize(inode, attr->ia_size);
822 err = f2fs_truncate(inode);
824 * do not trim all blocks after i_size if target size is
825 * larger than i_size.
827 up_write(&F2FS_I(inode)->i_mmap_sem);
828 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
834 /* should convert inline inode here */
835 if (!f2fs_may_inline_data(inode)) {
836 err = f2fs_convert_inline_inode(inode);
840 inode->i_mtime = inode->i_ctime = current_time(inode);
843 down_write(&F2FS_I(inode)->i_sem);
844 F2FS_I(inode)->last_disk_size = i_size_read(inode);
845 up_write(&F2FS_I(inode)->i_sem);
850 __setattr_copy(inode, attr);
852 if (attr->ia_valid & ATTR_MODE) {
853 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
854 if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
855 inode->i_mode = F2FS_I(inode)->i_acl_mode;
856 clear_inode_flag(inode, FI_ACL_MODE);
860 /* file size may changed here */
861 f2fs_mark_inode_dirty_sync(inode, size_changed);
863 /* inode change will produce dirty node pages flushed by checkpoint */
864 f2fs_balance_fs(F2FS_I_SB(inode), true);
869 const struct inode_operations f2fs_file_inode_operations = {
870 .getattr = f2fs_getattr,
871 .setattr = f2fs_setattr,
872 .get_acl = f2fs_get_acl,
873 .set_acl = f2fs_set_acl,
874 #ifdef CONFIG_F2FS_FS_XATTR
875 .listxattr = f2fs_listxattr,
877 .fiemap = f2fs_fiemap,
880 static int fill_zero(struct inode *inode, pgoff_t index,
881 loff_t start, loff_t len)
883 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
889 f2fs_balance_fs(sbi, true);
892 page = f2fs_get_new_data_page(inode, NULL, index, false);
896 return PTR_ERR(page);
898 f2fs_wait_on_page_writeback(page, DATA, true, true);
899 zero_user(page, start, len);
900 set_page_dirty(page);
901 f2fs_put_page(page, 1);
905 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
909 while (pg_start < pg_end) {
910 struct dnode_of_data dn;
911 pgoff_t end_offset, count;
913 set_new_dnode(&dn, inode, NULL, NULL, 0);
914 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
916 if (err == -ENOENT) {
917 pg_start = f2fs_get_next_page_offset(&dn,
924 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
925 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
927 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
929 f2fs_truncate_data_blocks_range(&dn, count);
937 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
939 pgoff_t pg_start, pg_end;
940 loff_t off_start, off_end;
943 ret = f2fs_convert_inline_inode(inode);
947 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
948 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
950 off_start = offset & (PAGE_SIZE - 1);
951 off_end = (offset + len) & (PAGE_SIZE - 1);
953 if (pg_start == pg_end) {
954 ret = fill_zero(inode, pg_start, off_start,
955 off_end - off_start);
960 ret = fill_zero(inode, pg_start++, off_start,
961 PAGE_SIZE - off_start);
966 ret = fill_zero(inode, pg_end, 0, off_end);
971 if (pg_start < pg_end) {
972 struct address_space *mapping = inode->i_mapping;
973 loff_t blk_start, blk_end;
974 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
976 f2fs_balance_fs(sbi, true);
978 blk_start = (loff_t)pg_start << PAGE_SHIFT;
979 blk_end = (loff_t)pg_end << PAGE_SHIFT;
981 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
982 down_write(&F2FS_I(inode)->i_mmap_sem);
984 truncate_inode_pages_range(mapping, blk_start,
988 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
991 up_write(&F2FS_I(inode)->i_mmap_sem);
992 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
999 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1000 int *do_replace, pgoff_t off, pgoff_t len)
1002 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1003 struct dnode_of_data dn;
1007 set_new_dnode(&dn, inode, NULL, NULL, 0);
1008 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1009 if (ret && ret != -ENOENT) {
1011 } else if (ret == -ENOENT) {
1012 if (dn.max_level == 0)
1014 done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
1020 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1021 dn.ofs_in_node, len);
1022 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1023 *blkaddr = datablock_addr(dn.inode,
1024 dn.node_page, dn.ofs_in_node);
1025 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1027 if (test_opt(sbi, LFS)) {
1028 f2fs_put_dnode(&dn);
1032 /* do not invalidate this block address */
1033 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1037 f2fs_put_dnode(&dn);
1046 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1047 int *do_replace, pgoff_t off, int len)
1049 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1050 struct dnode_of_data dn;
1053 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1054 if (*do_replace == 0)
1057 set_new_dnode(&dn, inode, NULL, NULL, 0);
1058 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1060 dec_valid_block_count(sbi, inode, 1);
1061 f2fs_invalidate_blocks(sbi, *blkaddr);
1063 f2fs_update_data_blkaddr(&dn, *blkaddr);
1065 f2fs_put_dnode(&dn);
1070 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1071 block_t *blkaddr, int *do_replace,
1072 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1074 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1079 if (blkaddr[i] == NULL_ADDR && !full) {
1084 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1085 struct dnode_of_data dn;
1086 struct node_info ni;
1090 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1091 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1095 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1097 f2fs_put_dnode(&dn);
1101 ilen = min((pgoff_t)
1102 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1103 dn.ofs_in_node, len - i);
1105 dn.data_blkaddr = datablock_addr(dn.inode,
1106 dn.node_page, dn.ofs_in_node);
1107 f2fs_truncate_data_blocks_range(&dn, 1);
1109 if (do_replace[i]) {
1110 f2fs_i_blocks_write(src_inode,
1112 f2fs_i_blocks_write(dst_inode,
1114 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1115 blkaddr[i], ni.version, true, false);
1121 new_size = (dst + i) << PAGE_SHIFT;
1122 if (dst_inode->i_size < new_size)
1123 f2fs_i_size_write(dst_inode, new_size);
1124 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1126 f2fs_put_dnode(&dn);
1128 struct page *psrc, *pdst;
1130 psrc = f2fs_get_lock_data_page(src_inode,
1133 return PTR_ERR(psrc);
1134 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1137 f2fs_put_page(psrc, 1);
1138 return PTR_ERR(pdst);
1140 f2fs_copy_page(psrc, pdst);
1141 set_page_dirty(pdst);
1142 f2fs_put_page(pdst, 1);
1143 f2fs_put_page(psrc, 1);
1145 ret = f2fs_truncate_hole(src_inode,
1146 src + i, src + i + 1);
1155 static int __exchange_data_block(struct inode *src_inode,
1156 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1157 pgoff_t len, bool full)
1159 block_t *src_blkaddr;
1165 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
1167 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1168 array_size(olen, sizeof(block_t)),
1173 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1174 array_size(olen, sizeof(int)),
1177 kvfree(src_blkaddr);
1181 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1182 do_replace, src, olen);
1186 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1187 do_replace, src, dst, olen, full);
1195 kvfree(src_blkaddr);
1201 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1202 kvfree(src_blkaddr);
1207 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1209 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1210 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1211 pgoff_t start = offset >> PAGE_SHIFT;
1212 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1215 f2fs_balance_fs(sbi, true);
1217 /* avoid gc operation during block exchange */
1218 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1219 down_write(&F2FS_I(inode)->i_mmap_sem);
1222 f2fs_drop_extent_tree(inode);
1223 truncate_pagecache(inode, offset);
1224 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1225 f2fs_unlock_op(sbi);
1227 up_write(&F2FS_I(inode)->i_mmap_sem);
1228 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1232 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1237 if (offset + len >= i_size_read(inode))
1240 /* collapse range should be aligned to block size of f2fs. */
1241 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1244 ret = f2fs_convert_inline_inode(inode);
1248 /* write out all dirty pages from offset */
1249 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1253 ret = f2fs_do_collapse(inode, offset, len);
1257 /* write out all moved pages, if possible */
1258 down_write(&F2FS_I(inode)->i_mmap_sem);
1259 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1260 truncate_pagecache(inode, offset);
1262 new_size = i_size_read(inode) - len;
1263 truncate_pagecache(inode, new_size);
1265 ret = f2fs_truncate_blocks(inode, new_size, true, false);
1266 up_write(&F2FS_I(inode)->i_mmap_sem);
1268 f2fs_i_size_write(inode, new_size);
1272 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1275 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1276 pgoff_t index = start;
1277 unsigned int ofs_in_node = dn->ofs_in_node;
1281 for (; index < end; index++, dn->ofs_in_node++) {
1282 if (datablock_addr(dn->inode, dn->node_page,
1283 dn->ofs_in_node) == NULL_ADDR)
1287 dn->ofs_in_node = ofs_in_node;
1288 ret = f2fs_reserve_new_blocks(dn, count);
1292 dn->ofs_in_node = ofs_in_node;
1293 for (index = start; index < end; index++, dn->ofs_in_node++) {
1294 dn->data_blkaddr = datablock_addr(dn->inode,
1295 dn->node_page, dn->ofs_in_node);
1297 * f2fs_reserve_new_blocks will not guarantee entire block
1300 if (dn->data_blkaddr == NULL_ADDR) {
1304 if (dn->data_blkaddr != NEW_ADDR) {
1305 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1306 dn->data_blkaddr = NEW_ADDR;
1307 f2fs_set_data_blkaddr(dn);
1311 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1316 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1319 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1320 struct address_space *mapping = inode->i_mapping;
1321 pgoff_t index, pg_start, pg_end;
1322 loff_t new_size = i_size_read(inode);
1323 loff_t off_start, off_end;
1326 ret = inode_newsize_ok(inode, (len + offset));
1330 ret = f2fs_convert_inline_inode(inode);
1334 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1338 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1339 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1341 off_start = offset & (PAGE_SIZE - 1);
1342 off_end = (offset + len) & (PAGE_SIZE - 1);
1344 if (pg_start == pg_end) {
1345 ret = fill_zero(inode, pg_start, off_start,
1346 off_end - off_start);
1350 new_size = max_t(loff_t, new_size, offset + len);
1353 ret = fill_zero(inode, pg_start++, off_start,
1354 PAGE_SIZE - off_start);
1358 new_size = max_t(loff_t, new_size,
1359 (loff_t)pg_start << PAGE_SHIFT);
1362 for (index = pg_start; index < pg_end;) {
1363 struct dnode_of_data dn;
1364 unsigned int end_offset;
1367 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1368 down_write(&F2FS_I(inode)->i_mmap_sem);
1370 truncate_pagecache_range(inode,
1371 (loff_t)index << PAGE_SHIFT,
1372 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1376 set_new_dnode(&dn, inode, NULL, NULL, 0);
1377 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1379 f2fs_unlock_op(sbi);
1380 up_write(&F2FS_I(inode)->i_mmap_sem);
1381 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1385 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1386 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1388 ret = f2fs_do_zero_range(&dn, index, end);
1389 f2fs_put_dnode(&dn);
1391 f2fs_unlock_op(sbi);
1392 up_write(&F2FS_I(inode)->i_mmap_sem);
1393 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1395 f2fs_balance_fs(sbi, dn.node_changed);
1401 new_size = max_t(loff_t, new_size,
1402 (loff_t)index << PAGE_SHIFT);
1406 ret = fill_zero(inode, pg_end, 0, off_end);
1410 new_size = max_t(loff_t, new_size, offset + len);
1415 if (new_size > i_size_read(inode)) {
1416 if (mode & FALLOC_FL_KEEP_SIZE)
1417 file_set_keep_isize(inode);
1419 f2fs_i_size_write(inode, new_size);
1424 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1426 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1427 pgoff_t nr, pg_start, pg_end, delta, idx;
1431 new_size = i_size_read(inode) + len;
1432 ret = inode_newsize_ok(inode, new_size);
1436 if (offset >= i_size_read(inode))
1439 /* insert range should be aligned to block size of f2fs. */
1440 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1443 ret = f2fs_convert_inline_inode(inode);
1447 f2fs_balance_fs(sbi, true);
1449 down_write(&F2FS_I(inode)->i_mmap_sem);
1450 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
1451 up_write(&F2FS_I(inode)->i_mmap_sem);
1455 /* write out all dirty pages from offset */
1456 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1460 pg_start = offset >> PAGE_SHIFT;
1461 pg_end = (offset + len) >> PAGE_SHIFT;
1462 delta = pg_end - pg_start;
1463 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
1465 /* avoid gc operation during block exchange */
1466 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1467 down_write(&F2FS_I(inode)->i_mmap_sem);
1468 truncate_pagecache(inode, offset);
1470 while (!ret && idx > pg_start) {
1471 nr = idx - pg_start;
1477 f2fs_drop_extent_tree(inode);
1479 ret = __exchange_data_block(inode, inode, idx,
1480 idx + delta, nr, false);
1481 f2fs_unlock_op(sbi);
1483 up_write(&F2FS_I(inode)->i_mmap_sem);
1484 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1486 /* write out all moved pages, if possible */
1487 down_write(&F2FS_I(inode)->i_mmap_sem);
1488 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1489 truncate_pagecache(inode, offset);
1490 up_write(&F2FS_I(inode)->i_mmap_sem);
1493 f2fs_i_size_write(inode, new_size);
1497 static int expand_inode_data(struct inode *inode, loff_t offset,
1498 loff_t len, int mode)
1500 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1501 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1502 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1503 .m_may_create = true };
1505 loff_t new_size = i_size_read(inode);
1509 err = inode_newsize_ok(inode, (len + offset));
1513 err = f2fs_convert_inline_inode(inode);
1517 f2fs_balance_fs(sbi, true);
1519 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1520 off_end = (offset + len) & (PAGE_SIZE - 1);
1522 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1523 map.m_len = pg_end - map.m_lblk;
1527 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1534 last_off = map.m_lblk + map.m_len - 1;
1536 /* update new size to the failed position */
1537 new_size = (last_off == pg_end) ? offset + len :
1538 (loff_t)(last_off + 1) << PAGE_SHIFT;
1540 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1543 if (new_size > i_size_read(inode)) {
1544 if (mode & FALLOC_FL_KEEP_SIZE)
1545 file_set_keep_isize(inode);
1547 f2fs_i_size_write(inode, new_size);
1553 static long f2fs_fallocate(struct file *file, int mode,
1554 loff_t offset, loff_t len)
1556 struct inode *inode = file_inode(file);
1559 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1562 /* f2fs only support ->fallocate for regular file */
1563 if (!S_ISREG(inode->i_mode))
1566 if (IS_ENCRYPTED(inode) &&
1567 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1570 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1571 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1572 FALLOC_FL_INSERT_RANGE))
1577 if (mode & FALLOC_FL_PUNCH_HOLE) {
1578 if (offset >= inode->i_size)
1581 ret = punch_hole(inode, offset, len);
1582 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1583 ret = f2fs_collapse_range(inode, offset, len);
1584 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1585 ret = f2fs_zero_range(inode, offset, len, mode);
1586 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1587 ret = f2fs_insert_range(inode, offset, len);
1589 ret = expand_inode_data(inode, offset, len, mode);
1593 inode->i_mtime = inode->i_ctime = current_time(inode);
1594 f2fs_mark_inode_dirty_sync(inode, false);
1595 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1599 inode_unlock(inode);
1601 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1605 static int f2fs_release_file(struct inode *inode, struct file *filp)
1608 * f2fs_relase_file is called at every close calls. So we should
1609 * not drop any inmemory pages by close called by other process.
1611 if (!(filp->f_mode & FMODE_WRITE) ||
1612 atomic_read(&inode->i_writecount) != 1)
1615 /* some remained atomic pages should discarded */
1616 if (f2fs_is_atomic_file(inode))
1617 f2fs_drop_inmem_pages(inode);
1618 if (f2fs_is_volatile_file(inode)) {
1619 set_inode_flag(inode, FI_DROP_CACHE);
1620 filemap_fdatawrite(inode->i_mapping);
1621 clear_inode_flag(inode, FI_DROP_CACHE);
1622 clear_inode_flag(inode, FI_VOLATILE_FILE);
1623 stat_dec_volatile_write(inode);
1628 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1630 struct inode *inode = file_inode(file);
1633 * If the process doing a transaction is crashed, we should do
1634 * roll-back. Otherwise, other reader/write can see corrupted database
1635 * until all the writers close its file. Since this should be done
1636 * before dropping file lock, it needs to do in ->flush.
1638 if (f2fs_is_atomic_file(inode) &&
1639 F2FS_I(inode)->inmem_task == current)
1640 f2fs_drop_inmem_pages(inode);
1644 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1646 struct inode *inode = file_inode(filp);
1647 struct f2fs_inode_info *fi = F2FS_I(inode);
1648 unsigned int flags = fi->i_flags;
1650 if (IS_ENCRYPTED(inode))
1651 flags |= F2FS_ENCRYPT_FL;
1652 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1653 flags |= F2FS_INLINE_DATA_FL;
1655 flags &= F2FS_FL_USER_VISIBLE;
1657 return put_user(flags, (int __user *)arg);
1660 static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
1662 struct f2fs_inode_info *fi = F2FS_I(inode);
1663 unsigned int oldflags;
1665 /* Is it quota file? Do not allow user to mess with it */
1666 if (IS_NOQUOTA(inode))
1669 flags = f2fs_mask_flags(inode->i_mode, flags);
1671 oldflags = fi->i_flags;
1673 if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
1674 if (!capable(CAP_LINUX_IMMUTABLE))
1677 flags = flags & F2FS_FL_USER_MODIFIABLE;
1678 flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE;
1679 fi->i_flags = flags;
1681 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1682 set_inode_flag(inode, FI_PROJ_INHERIT);
1684 clear_inode_flag(inode, FI_PROJ_INHERIT);
1686 inode->i_ctime = current_time(inode);
1687 f2fs_set_inode_flags(inode);
1688 f2fs_mark_inode_dirty_sync(inode, true);
1692 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1694 struct inode *inode = file_inode(filp);
1698 if (!inode_owner_or_capable(inode))
1701 if (get_user(flags, (int __user *)arg))
1704 ret = mnt_want_write_file(filp);
1710 ret = __f2fs_ioc_setflags(inode, flags);
1712 inode_unlock(inode);
1713 mnt_drop_write_file(filp);
1717 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
1719 struct inode *inode = file_inode(filp);
1721 return put_user(inode->i_generation, (int __user *)arg);
1724 static int f2fs_ioc_start_atomic_write(struct file *filp)
1726 struct inode *inode = file_inode(filp);
1729 if (!inode_owner_or_capable(inode))
1732 if (!S_ISREG(inode->i_mode))
1735 ret = mnt_want_write_file(filp);
1741 if (f2fs_is_atomic_file(inode)) {
1742 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
1747 ret = f2fs_convert_inline_inode(inode);
1751 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1753 if (!get_dirty_pages(inode))
1756 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1757 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1758 inode->i_ino, get_dirty_pages(inode));
1759 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1761 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1765 set_inode_flag(inode, FI_ATOMIC_FILE);
1766 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1767 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1769 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1770 F2FS_I(inode)->inmem_task = current;
1771 stat_inc_atomic_write(inode);
1772 stat_update_max_atomic_write(inode);
1774 inode_unlock(inode);
1775 mnt_drop_write_file(filp);
1779 static int f2fs_ioc_commit_atomic_write(struct file *filp)
1781 struct inode *inode = file_inode(filp);
1784 if (!inode_owner_or_capable(inode))
1787 ret = mnt_want_write_file(filp);
1791 f2fs_balance_fs(F2FS_I_SB(inode), true);
1795 if (f2fs_is_volatile_file(inode)) {
1800 if (f2fs_is_atomic_file(inode)) {
1801 ret = f2fs_commit_inmem_pages(inode);
1805 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1807 clear_inode_flag(inode, FI_ATOMIC_FILE);
1808 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
1809 stat_dec_atomic_write(inode);
1812 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
1815 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
1816 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1819 inode_unlock(inode);
1820 mnt_drop_write_file(filp);
1824 static int f2fs_ioc_start_volatile_write(struct file *filp)
1826 struct inode *inode = file_inode(filp);
1829 if (!inode_owner_or_capable(inode))
1832 if (!S_ISREG(inode->i_mode))
1835 ret = mnt_want_write_file(filp);
1841 if (f2fs_is_volatile_file(inode))
1844 ret = f2fs_convert_inline_inode(inode);
1848 stat_inc_volatile_write(inode);
1849 stat_update_max_volatile_write(inode);
1851 set_inode_flag(inode, FI_VOLATILE_FILE);
1852 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1854 inode_unlock(inode);
1855 mnt_drop_write_file(filp);
1859 static int f2fs_ioc_release_volatile_write(struct file *filp)
1861 struct inode *inode = file_inode(filp);
1864 if (!inode_owner_or_capable(inode))
1867 ret = mnt_want_write_file(filp);
1873 if (!f2fs_is_volatile_file(inode))
1876 if (!f2fs_is_first_block_written(inode)) {
1877 ret = truncate_partial_data_page(inode, 0, true);
1881 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
1883 inode_unlock(inode);
1884 mnt_drop_write_file(filp);
1888 static int f2fs_ioc_abort_volatile_write(struct file *filp)
1890 struct inode *inode = file_inode(filp);
1893 if (!inode_owner_or_capable(inode))
1896 ret = mnt_want_write_file(filp);
1902 if (f2fs_is_atomic_file(inode))
1903 f2fs_drop_inmem_pages(inode);
1904 if (f2fs_is_volatile_file(inode)) {
1905 clear_inode_flag(inode, FI_VOLATILE_FILE);
1906 stat_dec_volatile_write(inode);
1907 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
1910 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
1912 inode_unlock(inode);
1914 mnt_drop_write_file(filp);
1915 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1919 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
1921 struct inode *inode = file_inode(filp);
1922 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1923 struct super_block *sb = sbi->sb;
1927 if (!capable(CAP_SYS_ADMIN))
1930 if (get_user(in, (__u32 __user *)arg))
1933 if (in != F2FS_GOING_DOWN_FULLSYNC) {
1934 ret = mnt_want_write_file(filp);
1940 case F2FS_GOING_DOWN_FULLSYNC:
1941 sb = freeze_bdev(sb->s_bdev);
1947 f2fs_stop_checkpoint(sbi, false);
1948 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1949 thaw_bdev(sb->s_bdev, sb);
1952 case F2FS_GOING_DOWN_METASYNC:
1953 /* do checkpoint only */
1954 ret = f2fs_sync_fs(sb, 1);
1957 f2fs_stop_checkpoint(sbi, false);
1958 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1960 case F2FS_GOING_DOWN_NOSYNC:
1961 f2fs_stop_checkpoint(sbi, false);
1962 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1964 case F2FS_GOING_DOWN_METAFLUSH:
1965 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
1966 f2fs_stop_checkpoint(sbi, false);
1967 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1969 case F2FS_GOING_DOWN_NEED_FSCK:
1970 set_sbi_flag(sbi, SBI_NEED_FSCK);
1971 /* do checkpoint only */
1972 ret = f2fs_sync_fs(sb, 1);
1981 f2fs_stop_gc_thread(sbi);
1982 f2fs_stop_discard_thread(sbi);
1984 f2fs_drop_discard_cmd(sbi);
1985 clear_opt(sbi, DISCARD);
1987 f2fs_update_time(sbi, REQ_TIME);
1989 if (in != F2FS_GOING_DOWN_FULLSYNC)
1990 mnt_drop_write_file(filp);
1994 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
1996 struct inode *inode = file_inode(filp);
1997 struct super_block *sb = inode->i_sb;
1998 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1999 struct fstrim_range range;
2002 if (!capable(CAP_SYS_ADMIN))
2005 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2008 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2012 ret = mnt_want_write_file(filp);
2016 range.minlen = max((unsigned int)range.minlen,
2017 q->limits.discard_granularity);
2018 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2019 mnt_drop_write_file(filp);
2023 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2026 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2030 static bool uuid_is_nonzero(__u8 u[16])
2034 for (i = 0; i < 16; i++)
2040 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2042 struct inode *inode = file_inode(filp);
2044 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2047 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2049 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2052 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2054 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2056 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2059 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2061 struct inode *inode = file_inode(filp);
2062 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2065 if (!f2fs_sb_has_encrypt(sbi))
2068 err = mnt_want_write_file(filp);
2072 down_write(&sbi->sb_lock);
2074 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2077 /* update superblock with uuid */
2078 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2080 err = f2fs_commit_super(sbi, false);
2083 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2087 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2091 up_write(&sbi->sb_lock);
2092 mnt_drop_write_file(filp);
2096 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2098 struct inode *inode = file_inode(filp);
2099 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2103 if (!capable(CAP_SYS_ADMIN))
2106 if (get_user(sync, (__u32 __user *)arg))
2109 if (f2fs_readonly(sbi->sb))
2112 ret = mnt_want_write_file(filp);
2117 if (!mutex_trylock(&sbi->gc_mutex)) {
2122 mutex_lock(&sbi->gc_mutex);
2125 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
2127 mnt_drop_write_file(filp);
2131 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2133 struct inode *inode = file_inode(filp);
2134 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2135 struct f2fs_gc_range range;
2139 if (!capable(CAP_SYS_ADMIN))
2142 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2146 if (f2fs_readonly(sbi->sb))
2149 end = range.start + range.len;
2150 if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) {
2154 ret = mnt_want_write_file(filp);
2160 if (!mutex_trylock(&sbi->gc_mutex)) {
2165 mutex_lock(&sbi->gc_mutex);
2168 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start));
2169 range.start += BLKS_PER_SEC(sbi);
2170 if (range.start <= end)
2173 mnt_drop_write_file(filp);
2177 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2179 struct inode *inode = file_inode(filp);
2180 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2183 if (!capable(CAP_SYS_ADMIN))
2186 if (f2fs_readonly(sbi->sb))
2189 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2190 f2fs_msg(sbi->sb, KERN_INFO,
2191 "Skipping Checkpoint. Checkpoints currently disabled.");
2195 ret = mnt_want_write_file(filp);
2199 ret = f2fs_sync_fs(sbi->sb, 1);
2201 mnt_drop_write_file(filp);
2205 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2207 struct f2fs_defragment *range)
2209 struct inode *inode = file_inode(filp);
2210 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2211 .m_seg_type = NO_CHECK_TYPE ,
2212 .m_may_create = false };
2213 struct extent_info ei = {0, 0, 0};
2214 pgoff_t pg_start, pg_end, next_pgofs;
2215 unsigned int blk_per_seg = sbi->blocks_per_seg;
2216 unsigned int total = 0, sec_num;
2217 block_t blk_end = 0;
2218 bool fragmented = false;
2221 /* if in-place-update policy is enabled, don't waste time here */
2222 if (f2fs_should_update_inplace(inode, NULL))
2225 pg_start = range->start >> PAGE_SHIFT;
2226 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2228 f2fs_balance_fs(sbi, true);
2232 /* writeback all dirty pages in the range */
2233 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2234 range->start + range->len - 1);
2239 * lookup mapping info in extent cache, skip defragmenting if physical
2240 * block addresses are continuous.
2242 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2243 if (ei.fofs + ei.len >= pg_end)
2247 map.m_lblk = pg_start;
2248 map.m_next_pgofs = &next_pgofs;
2251 * lookup mapping info in dnode page cache, skip defragmenting if all
2252 * physical block addresses are continuous even if there are hole(s)
2253 * in logical blocks.
2255 while (map.m_lblk < pg_end) {
2256 map.m_len = pg_end - map.m_lblk;
2257 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2261 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2262 map.m_lblk = next_pgofs;
2266 if (blk_end && blk_end != map.m_pblk)
2269 /* record total count of block that we're going to move */
2272 blk_end = map.m_pblk + map.m_len;
2274 map.m_lblk += map.m_len;
2280 sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
2283 * make sure there are enough free section for LFS allocation, this can
2284 * avoid defragment running in SSR mode when free section are allocated
2287 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2292 map.m_lblk = pg_start;
2293 map.m_len = pg_end - pg_start;
2296 while (map.m_lblk < pg_end) {
2301 map.m_len = pg_end - map.m_lblk;
2302 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2306 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2307 map.m_lblk = next_pgofs;
2311 set_inode_flag(inode, FI_DO_DEFRAG);
2314 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2317 page = f2fs_get_lock_data_page(inode, idx, true);
2319 err = PTR_ERR(page);
2323 set_page_dirty(page);
2324 f2fs_put_page(page, 1);
2333 if (idx < pg_end && cnt < blk_per_seg)
2336 clear_inode_flag(inode, FI_DO_DEFRAG);
2338 err = filemap_fdatawrite(inode->i_mapping);
2343 clear_inode_flag(inode, FI_DO_DEFRAG);
2345 inode_unlock(inode);
2347 range->len = (u64)total << PAGE_SHIFT;
2351 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2353 struct inode *inode = file_inode(filp);
2354 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2355 struct f2fs_defragment range;
2358 if (!capable(CAP_SYS_ADMIN))
2361 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2364 if (f2fs_readonly(sbi->sb))
2367 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2371 /* verify alignment of offset & size */
2372 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2375 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2376 sbi->max_file_blocks))
2379 err = mnt_want_write_file(filp);
2383 err = f2fs_defragment_range(sbi, filp, &range);
2384 mnt_drop_write_file(filp);
2386 f2fs_update_time(sbi, REQ_TIME);
2390 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2397 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2398 struct file *file_out, loff_t pos_out, size_t len)
2400 struct inode *src = file_inode(file_in);
2401 struct inode *dst = file_inode(file_out);
2402 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2403 size_t olen = len, dst_max_i_size = 0;
2407 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2408 src->i_sb != dst->i_sb)
2411 if (unlikely(f2fs_readonly(src->i_sb)))
2414 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2417 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2421 if (pos_in == pos_out)
2423 if (pos_out > pos_in && pos_out < pos_in + len)
2430 if (!inode_trylock(dst))
2435 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2438 olen = len = src->i_size - pos_in;
2439 if (pos_in + len == src->i_size)
2440 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2446 dst_osize = dst->i_size;
2447 if (pos_out + olen > dst->i_size)
2448 dst_max_i_size = pos_out + olen;
2450 /* verify the end result is block aligned */
2451 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2452 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2453 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2456 ret = f2fs_convert_inline_inode(src);
2460 ret = f2fs_convert_inline_inode(dst);
2464 /* write out all dirty pages from offset */
2465 ret = filemap_write_and_wait_range(src->i_mapping,
2466 pos_in, pos_in + len);
2470 ret = filemap_write_and_wait_range(dst->i_mapping,
2471 pos_out, pos_out + len);
2475 f2fs_balance_fs(sbi, true);
2477 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2480 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2485 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2486 pos_out >> F2FS_BLKSIZE_BITS,
2487 len >> F2FS_BLKSIZE_BITS, false);
2491 f2fs_i_size_write(dst, dst_max_i_size);
2492 else if (dst_osize != dst->i_size)
2493 f2fs_i_size_write(dst, dst_osize);
2495 f2fs_unlock_op(sbi);
2498 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2500 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2509 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2511 struct f2fs_move_range range;
2515 if (!(filp->f_mode & FMODE_READ) ||
2516 !(filp->f_mode & FMODE_WRITE))
2519 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2523 dst = fdget(range.dst_fd);
2527 if (!(dst.file->f_mode & FMODE_WRITE)) {
2532 err = mnt_want_write_file(filp);
2536 err = f2fs_move_file_range(filp, range.pos_in, dst.file,
2537 range.pos_out, range.len);
2539 mnt_drop_write_file(filp);
2543 if (copy_to_user((struct f2fs_move_range __user *)arg,
2544 &range, sizeof(range)))
2551 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2553 struct inode *inode = file_inode(filp);
2554 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2555 struct sit_info *sm = SIT_I(sbi);
2556 unsigned int start_segno = 0, end_segno = 0;
2557 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2558 struct f2fs_flush_device range;
2561 if (!capable(CAP_SYS_ADMIN))
2564 if (f2fs_readonly(sbi->sb))
2567 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2570 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2574 if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
2575 __is_large_section(sbi)) {
2576 f2fs_msg(sbi->sb, KERN_WARNING,
2577 "Can't flush %u in %d for segs_per_sec %u != 1\n",
2578 range.dev_num, sbi->s_ndevs,
2583 ret = mnt_want_write_file(filp);
2587 if (range.dev_num != 0)
2588 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2589 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2591 start_segno = sm->last_victim[FLUSH_DEVICE];
2592 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2593 start_segno = dev_start_segno;
2594 end_segno = min(start_segno + range.segments, dev_end_segno);
2596 while (start_segno < end_segno) {
2597 if (!mutex_trylock(&sbi->gc_mutex)) {
2601 sm->last_victim[GC_CB] = end_segno + 1;
2602 sm->last_victim[GC_GREEDY] = end_segno + 1;
2603 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2604 ret = f2fs_gc(sbi, true, true, start_segno);
2612 mnt_drop_write_file(filp);
2616 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2618 struct inode *inode = file_inode(filp);
2619 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2621 /* Must validate to set it with SQLite behavior in Android. */
2622 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2624 return put_user(sb_feature, (u32 __user *)arg);
2628 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2630 struct dquot *transfer_to[MAXQUOTAS] = {};
2631 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2632 struct super_block *sb = sbi->sb;
2635 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
2636 if (!IS_ERR(transfer_to[PRJQUOTA])) {
2637 err = __dquot_transfer(inode, transfer_to);
2639 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2640 dqput(transfer_to[PRJQUOTA]);
2645 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2647 struct inode *inode = file_inode(filp);
2648 struct f2fs_inode_info *fi = F2FS_I(inode);
2649 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2654 if (!f2fs_sb_has_project_quota(sbi)) {
2655 if (projid != F2FS_DEF_PROJID)
2661 if (!f2fs_has_extra_attr(inode))
2664 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
2666 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
2670 /* Is it quota file? Do not allow user to mess with it */
2671 if (IS_NOQUOTA(inode))
2674 ipage = f2fs_get_node_page(sbi, inode->i_ino);
2676 return PTR_ERR(ipage);
2678 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
2681 f2fs_put_page(ipage, 1);
2684 f2fs_put_page(ipage, 1);
2686 err = dquot_initialize(inode);
2691 err = f2fs_transfer_project_quota(inode, kprojid);
2695 F2FS_I(inode)->i_projid = kprojid;
2696 inode->i_ctime = current_time(inode);
2697 f2fs_mark_inode_dirty_sync(inode, true);
2699 f2fs_unlock_op(sbi);
2703 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
2708 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2710 if (projid != F2FS_DEF_PROJID)
2716 /* Transfer internal flags to xflags */
2717 static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags)
2721 if (iflags & F2FS_SYNC_FL)
2722 xflags |= FS_XFLAG_SYNC;
2723 if (iflags & F2FS_IMMUTABLE_FL)
2724 xflags |= FS_XFLAG_IMMUTABLE;
2725 if (iflags & F2FS_APPEND_FL)
2726 xflags |= FS_XFLAG_APPEND;
2727 if (iflags & F2FS_NODUMP_FL)
2728 xflags |= FS_XFLAG_NODUMP;
2729 if (iflags & F2FS_NOATIME_FL)
2730 xflags |= FS_XFLAG_NOATIME;
2731 if (iflags & F2FS_PROJINHERIT_FL)
2732 xflags |= FS_XFLAG_PROJINHERIT;
2736 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2737 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2738 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2740 /* Transfer xflags flags to internal */
2741 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
2743 unsigned long iflags = 0;
2745 if (xflags & FS_XFLAG_SYNC)
2746 iflags |= F2FS_SYNC_FL;
2747 if (xflags & FS_XFLAG_IMMUTABLE)
2748 iflags |= F2FS_IMMUTABLE_FL;
2749 if (xflags & FS_XFLAG_APPEND)
2750 iflags |= F2FS_APPEND_FL;
2751 if (xflags & FS_XFLAG_NODUMP)
2752 iflags |= F2FS_NODUMP_FL;
2753 if (xflags & FS_XFLAG_NOATIME)
2754 iflags |= F2FS_NOATIME_FL;
2755 if (xflags & FS_XFLAG_PROJINHERIT)
2756 iflags |= F2FS_PROJINHERIT_FL;
2761 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2763 struct inode *inode = file_inode(filp);
2764 struct f2fs_inode_info *fi = F2FS_I(inode);
2767 memset(&fa, 0, sizeof(struct fsxattr));
2768 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags &
2769 F2FS_FL_USER_VISIBLE);
2771 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2772 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
2775 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
2780 static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
2783 * Project Quota ID state is only allowed to change from within the init
2784 * namespace. Enforce that restriction only if we are trying to change
2785 * the quota ID state. Everything else is allowed in user namespaces.
2787 if (current_user_ns() == &init_user_ns)
2790 if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
2793 if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
2794 if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
2797 if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
2804 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2806 struct inode *inode = file_inode(filp);
2807 struct f2fs_inode_info *fi = F2FS_I(inode);
2812 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
2815 /* Make sure caller has proper permission */
2816 if (!inode_owner_or_capable(inode))
2819 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS)
2822 flags = f2fs_xflags_to_iflags(fa.fsx_xflags);
2823 if (f2fs_mask_flags(inode->i_mode, flags) != flags)
2826 err = mnt_want_write_file(filp);
2831 err = f2fs_ioctl_check_project(inode, &fa);
2834 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) |
2835 (flags & F2FS_FL_XFLAG_VISIBLE);
2836 err = __f2fs_ioc_setflags(inode, flags);
2840 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
2842 inode_unlock(inode);
2843 mnt_drop_write_file(filp);
2847 int f2fs_pin_file_control(struct inode *inode, bool inc)
2849 struct f2fs_inode_info *fi = F2FS_I(inode);
2850 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2852 /* Use i_gc_failures for normal file as a risk signal. */
2854 f2fs_i_gc_failures_write(inode,
2855 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
2857 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
2858 f2fs_msg(sbi->sb, KERN_WARNING,
2859 "%s: Enable GC = ino %lx after %x GC trials\n",
2860 __func__, inode->i_ino,
2861 fi->i_gc_failures[GC_FAILURE_PIN]);
2862 clear_inode_flag(inode, FI_PIN_FILE);
2868 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
2870 struct inode *inode = file_inode(filp);
2874 if (!inode_owner_or_capable(inode))
2877 if (get_user(pin, (__u32 __user *)arg))
2880 if (!S_ISREG(inode->i_mode))
2883 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
2886 ret = mnt_want_write_file(filp);
2892 if (f2fs_should_update_outplace(inode, NULL)) {
2898 clear_inode_flag(inode, FI_PIN_FILE);
2899 f2fs_i_gc_failures_write(inode, 0);
2903 if (f2fs_pin_file_control(inode, false)) {
2907 ret = f2fs_convert_inline_inode(inode);
2911 set_inode_flag(inode, FI_PIN_FILE);
2912 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
2914 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2916 inode_unlock(inode);
2917 mnt_drop_write_file(filp);
2921 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
2923 struct inode *inode = file_inode(filp);
2926 if (is_inode_flag_set(inode, FI_PIN_FILE))
2927 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
2928 return put_user(pin, (u32 __user *)arg);
2931 int f2fs_precache_extents(struct inode *inode)
2933 struct f2fs_inode_info *fi = F2FS_I(inode);
2934 struct f2fs_map_blocks map;
2935 pgoff_t m_next_extent;
2939 if (is_inode_flag_set(inode, FI_NO_EXTENT))
2943 map.m_next_pgofs = NULL;
2944 map.m_next_extent = &m_next_extent;
2945 map.m_seg_type = NO_CHECK_TYPE;
2946 map.m_may_create = false;
2947 end = F2FS_I_SB(inode)->max_file_blocks;
2949 while (map.m_lblk < end) {
2950 map.m_len = end - map.m_lblk;
2952 down_write(&fi->i_gc_rwsem[WRITE]);
2953 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
2954 up_write(&fi->i_gc_rwsem[WRITE]);
2958 map.m_lblk = m_next_extent;
2964 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
2966 return f2fs_precache_extents(file_inode(filp));
2969 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2971 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
2975 case F2FS_IOC_GETFLAGS:
2976 return f2fs_ioc_getflags(filp, arg);
2977 case F2FS_IOC_SETFLAGS:
2978 return f2fs_ioc_setflags(filp, arg);
2979 case F2FS_IOC_GETVERSION:
2980 return f2fs_ioc_getversion(filp, arg);
2981 case F2FS_IOC_START_ATOMIC_WRITE:
2982 return f2fs_ioc_start_atomic_write(filp);
2983 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
2984 return f2fs_ioc_commit_atomic_write(filp);
2985 case F2FS_IOC_START_VOLATILE_WRITE:
2986 return f2fs_ioc_start_volatile_write(filp);
2987 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
2988 return f2fs_ioc_release_volatile_write(filp);
2989 case F2FS_IOC_ABORT_VOLATILE_WRITE:
2990 return f2fs_ioc_abort_volatile_write(filp);
2991 case F2FS_IOC_SHUTDOWN:
2992 return f2fs_ioc_shutdown(filp, arg);
2994 return f2fs_ioc_fitrim(filp, arg);
2995 case F2FS_IOC_SET_ENCRYPTION_POLICY:
2996 return f2fs_ioc_set_encryption_policy(filp, arg);
2997 case F2FS_IOC_GET_ENCRYPTION_POLICY:
2998 return f2fs_ioc_get_encryption_policy(filp, arg);
2999 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3000 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3001 case F2FS_IOC_GARBAGE_COLLECT:
3002 return f2fs_ioc_gc(filp, arg);
3003 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3004 return f2fs_ioc_gc_range(filp, arg);
3005 case F2FS_IOC_WRITE_CHECKPOINT:
3006 return f2fs_ioc_write_checkpoint(filp, arg);
3007 case F2FS_IOC_DEFRAGMENT:
3008 return f2fs_ioc_defragment(filp, arg);
3009 case F2FS_IOC_MOVE_RANGE:
3010 return f2fs_ioc_move_range(filp, arg);
3011 case F2FS_IOC_FLUSH_DEVICE:
3012 return f2fs_ioc_flush_device(filp, arg);
3013 case F2FS_IOC_GET_FEATURES:
3014 return f2fs_ioc_get_features(filp, arg);
3015 case F2FS_IOC_FSGETXATTR:
3016 return f2fs_ioc_fsgetxattr(filp, arg);
3017 case F2FS_IOC_FSSETXATTR:
3018 return f2fs_ioc_fssetxattr(filp, arg);
3019 case F2FS_IOC_GET_PIN_FILE:
3020 return f2fs_ioc_get_pin_file(filp, arg);
3021 case F2FS_IOC_SET_PIN_FILE:
3022 return f2fs_ioc_set_pin_file(filp, arg);
3023 case F2FS_IOC_PRECACHE_EXTENTS:
3024 return f2fs_ioc_precache_extents(filp, arg);
3030 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3032 struct file *file = iocb->ki_filp;
3033 struct inode *inode = file_inode(file);
3036 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
3039 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
3042 if (!inode_trylock(inode)) {
3043 if (iocb->ki_flags & IOCB_NOWAIT)
3048 ret = generic_write_checks(iocb, from);
3050 bool preallocated = false;
3051 size_t target_size = 0;
3054 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
3055 set_inode_flag(inode, FI_NO_PREALLOC);
3057 if ((iocb->ki_flags & IOCB_NOWAIT) &&
3058 (iocb->ki_flags & IOCB_DIRECT)) {
3059 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
3060 iov_iter_count(from)) ||
3061 f2fs_has_inline_data(inode) ||
3062 f2fs_force_buffered_io(inode,
3064 clear_inode_flag(inode,
3066 inode_unlock(inode);
3071 preallocated = true;
3072 target_size = iocb->ki_pos + iov_iter_count(from);
3074 err = f2fs_preallocate_blocks(iocb, from);
3076 clear_inode_flag(inode, FI_NO_PREALLOC);
3077 inode_unlock(inode);
3081 ret = __generic_file_write_iter(iocb, from);
3082 clear_inode_flag(inode, FI_NO_PREALLOC);
3084 /* if we couldn't write data, we should deallocate blocks. */
3085 if (preallocated && i_size_read(inode) < target_size)
3086 f2fs_truncate(inode);
3089 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
3091 inode_unlock(inode);
3094 ret = generic_write_sync(iocb, ret);
3098 #ifdef CONFIG_COMPAT
3099 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3102 case F2FS_IOC32_GETFLAGS:
3103 cmd = F2FS_IOC_GETFLAGS;
3105 case F2FS_IOC32_SETFLAGS:
3106 cmd = F2FS_IOC_SETFLAGS;
3108 case F2FS_IOC32_GETVERSION:
3109 cmd = F2FS_IOC_GETVERSION;
3111 case F2FS_IOC_START_ATOMIC_WRITE:
3112 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3113 case F2FS_IOC_START_VOLATILE_WRITE:
3114 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3115 case F2FS_IOC_ABORT_VOLATILE_WRITE:
3116 case F2FS_IOC_SHUTDOWN:
3117 case F2FS_IOC_SET_ENCRYPTION_POLICY:
3118 case F2FS_IOC_GET_ENCRYPTION_PWSALT:
3119 case F2FS_IOC_GET_ENCRYPTION_POLICY:
3120 case F2FS_IOC_GARBAGE_COLLECT:
3121 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
3122 case F2FS_IOC_WRITE_CHECKPOINT:
3123 case F2FS_IOC_DEFRAGMENT:
3124 case F2FS_IOC_MOVE_RANGE:
3125 case F2FS_IOC_FLUSH_DEVICE:
3126 case F2FS_IOC_GET_FEATURES:
3127 case F2FS_IOC_FSGETXATTR:
3128 case F2FS_IOC_FSSETXATTR:
3129 case F2FS_IOC_GET_PIN_FILE:
3130 case F2FS_IOC_SET_PIN_FILE:
3131 case F2FS_IOC_PRECACHE_EXTENTS:
3134 return -ENOIOCTLCMD;
3136 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
3140 const struct file_operations f2fs_file_operations = {
3141 .llseek = f2fs_llseek,
3142 .read_iter = generic_file_read_iter,
3143 .write_iter = f2fs_file_write_iter,
3144 .open = f2fs_file_open,
3145 .release = f2fs_release_file,
3146 .mmap = f2fs_file_mmap,
3147 .flush = f2fs_file_flush,
3148 .fsync = f2fs_sync_file,
3149 .fallocate = f2fs_fallocate,
3150 .unlocked_ioctl = f2fs_ioctl,
3151 #ifdef CONFIG_COMPAT
3152 .compat_ioctl = f2fs_compat_ioctl,
3154 .splice_read = generic_file_splice_read,
3155 .splice_write = iter_file_splice_write,