f2fs: introduce F2FS_IOC_RESERVE_COMPRESS_BLOCKS
authorChao Yu <yuchao0@huawei.com>
Fri, 6 Mar 2020 06:35:33 +0000 (14:35 +0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Tue, 12 May 2020 03:37:12 +0000 (20:37 -0700)
This patch introduces a new ioctl to rollback all compress inode
status:
- add reserved blocks in dnode blocks
- increase i_compr_blocks, i_blocks, total_valid_block_count
- remove immutable flag

Then compress inode can be restored to support overwrite
functionality again.

Signee-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
fs/f2fs/f2fs.h
fs/f2fs/file.c

index 7f3d259e7e3763ccecfe34ea0fac7e9993658e17..1d96f733b1b71bf9d58b5bb1adcb5853aa70320e 100644 (file)
@@ -430,6 +430,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
 #define F2FS_IOC_GET_COMPRESS_BLOCKS   _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
 #define F2FS_IOC_RELEASE_COMPRESS_BLOCKS                               \
                                        _IOR(F2FS_IOCTL_MAGIC, 18, __u64)
+#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS                               \
+                                       _IOR(F2FS_IOCTL_MAGIC, 19, __u64)
 
 #define F2FS_IOC_GET_VOLUME_NAME       FS_IOC_GETFSLABEL
 #define F2FS_IOC_SET_VOLUME_NAME       FS_IOC_SETFSLABEL
index 189f8ce046f07e962151500cf3a8d4c062eff77c..48d90881180772abf26dcb5ea327d451458aa230 100644 (file)
@@ -3598,6 +3598,165 @@ out:
        return ret;
 }
 
+static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+       unsigned int reserved_blocks = 0;
+       int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+       block_t blkaddr;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               blkaddr = data_blkaddr(dn->inode, dn->node_page,
+                                               dn->ofs_in_node + i);
+
+               if (!__is_valid_data_blkaddr(blkaddr))
+                       continue;
+               if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
+                                       DATA_GENERIC_ENHANCE)))
+                       return -EFSCORRUPTED;
+       }
+
+       while (count) {
+               int compr_blocks = 0;
+               blkcnt_t reserved;
+               int ret;
+
+               for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+                       blkaddr = f2fs_data_blkaddr(dn);
+
+                       if (i == 0) {
+                               if (blkaddr == COMPRESS_ADDR)
+                                       continue;
+                               dn->ofs_in_node += cluster_size;
+                               goto next;
+                       }
+
+                       if (__is_valid_data_blkaddr(blkaddr)) {
+                               compr_blocks++;
+                               continue;
+                       }
+
+                       dn->data_blkaddr = NEW_ADDR;
+                       f2fs_set_data_blkaddr(dn);
+               }
+
+               reserved = cluster_size - compr_blocks;
+               ret = inc_valid_block_count(sbi, dn->inode, &reserved);
+               if (ret)
+                       return ret;
+
+               if (reserved != cluster_size - compr_blocks)
+                       return -ENOSPC;
+
+               f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+
+               reserved_blocks += reserved;
+next:
+               count -= cluster_size;
+       }
+
+       return reserved_blocks;
+}
+
+static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       pgoff_t page_idx = 0, last_idx;
+       unsigned int reserved_blocks = 0;
+       int ret;
+
+       if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+               return -EOPNOTSUPP;
+
+       if (!f2fs_compressed_file(inode))
+               return -EINVAL;
+
+       if (f2fs_readonly(sbi->sb))
+               return -EROFS;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       if (F2FS_I(inode)->i_compr_blocks)
+               goto out;
+
+       f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+       inode_lock(inode);
+
+       if (!IS_IMMUTABLE(inode)) {
+               ret = -EINVAL;
+               goto unlock_inode;
+       }
+
+       down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       down_write(&F2FS_I(inode)->i_mmap_sem);
+
+       last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+       while (page_idx < last_idx) {
+               struct dnode_of_data dn;
+               pgoff_t end_offset, count;
+
+               set_new_dnode(&dn, inode, NULL, NULL, 0);
+               ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+               if (ret) {
+                       if (ret == -ENOENT) {
+                               page_idx = f2fs_get_next_page_offset(&dn,
+                                                               page_idx);
+                               ret = 0;
+                               continue;
+                       }
+                       break;
+               }
+
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+               count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+               count = roundup(count, F2FS_I(inode)->i_cluster_size);
+
+               ret = reserve_compress_blocks(&dn, count);
+
+               f2fs_put_dnode(&dn);
+
+               if (ret < 0)
+                       break;
+
+               page_idx += count;
+               reserved_blocks += ret;
+       }
+
+       up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+       up_write(&F2FS_I(inode)->i_mmap_sem);
+
+       if (ret >= 0) {
+               F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
+               f2fs_set_inode_flags(inode);
+               inode->i_ctime = current_time(inode);
+               f2fs_mark_inode_dirty_sync(inode, true);
+       }
+unlock_inode:
+       inode_unlock(inode);
+out:
+       mnt_drop_write_file(filp);
+
+       if (ret >= 0) {
+               ret = put_user(reserved_blocks, (u64 __user *)arg);
+       } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
+                       "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
+                       "run fsck to fix.",
+                       __func__, inode->i_ino, inode->i_blocks,
+                       reserved_blocks,
+                       F2FS_I(inode)->i_compr_blocks);
+       }
+
+       return ret;
+}
+
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -3682,6 +3841,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                return f2fs_get_compress_blocks(filp, arg);
        case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
                return f2fs_release_compress_blocks(filp, arg);
+       case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
+               return f2fs_reserve_compress_blocks(filp, arg);
        default:
                return -ENOTTY;
        }
@@ -3850,6 +4011,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case F2FS_IOC_SET_VOLUME_NAME:
        case F2FS_IOC_GET_COMPRESS_BLOCKS:
        case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
+       case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
                break;
        default:
                return -ENOIOCTLCMD;