f2fs: ro: compress: fix to avoid caching unaligned extent
authorChao Yu <chao@kernel.org>
Mon, 26 Feb 2024 07:35:38 +0000 (15:35 +0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Mon, 4 Mar 2024 17:51:52 +0000 (09:51 -0800)
Mapping info from dump.f2fs:
i_addr[0x2d] cluster flag     [0xfffffffe : 4294967294]
i_addr[0x2e]                  [0x   10428 : 66600]
i_addr[0x2f]                  [0x   10429 : 66601]
i_addr[0x30]                  [0x   1042a : 66602]

f2fs_io fiemap 37 1 /mnt/f2fs/disk-58390c8c.raw

Previsouly, it missed to align fofs and ofs_in_node to cluster_size,
result in adding incorrect read extent cache, fix it.

Before:
f2fs_update_read_extent_tree_range: dev = (253,48), ino = 5, pgofs = 37, len = 4, blkaddr = 66600, c_len = 3

After:
f2fs_update_read_extent_tree_range: dev = (253,48), ino = 5, pgofs = 36, len = 4, blkaddr = 66600, c_len = 3

Fixes: 94afd6d6e525 ("f2fs: extent cache: support unaligned extent")
Signed-off-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
fs/f2fs/compress.c
fs/f2fs/f2fs.h
fs/f2fs/node.c

index 3dc488ce882be6809ad914d11370439cfac939ef..8892c82621414602e50b070d2187d9f66c0e23a5 100644 (file)
@@ -1817,16 +1817,18 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
  * check whether cluster blocks are contiguous, and add extent cache entry
  * only if cluster blocks are logically and physically contiguous.
  */
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
+                                               unsigned int ofs_in_node)
 {
-       bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
+       bool compressed = data_blkaddr(dn->inode, dn->node_page,
+                                       ofs_in_node) == COMPRESS_ADDR;
        int i = compressed ? 1 : 0;
        block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
-                                               dn->ofs_in_node + i);
+                                                       ofs_in_node + i);
 
        for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
                block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
-                                               dn->ofs_in_node + i);
+                                                       ofs_in_node + i);
 
                if (!__is_valid_data_blkaddr(blkaddr))
                        break;
index f1ec1a53afece502fa71c198a798aad2e0e4e935..db05fd02350a5275dcefe9d6fc8225f9cf844329 100644 (file)
@@ -4305,7 +4305,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
 void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
                                bool in_task);
 void f2fs_put_page_dic(struct page *page, bool in_task);
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
+                                               unsigned int ofs_in_node);
 int f2fs_init_compress_ctx(struct compress_ctx *cc);
 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
@@ -4362,7 +4363,8 @@ static inline void f2fs_put_page_dic(struct page *page, bool in_task)
 {
        WARN_ON_ONCE(1);
 }
-static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
+static inline unsigned int f2fs_cluster_blocks_are_contiguous(
+                       struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
 static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
 static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
index 51241996b9ecba1dbe886314ba2847c1e98eeff5..b3de6d6cdb02199c13808bfaabbc8e742b52a067 100644 (file)
@@ -852,21 +852,29 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 
        if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
                                        f2fs_sb_has_readonly(sbi)) {
-               unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
+               unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+               unsigned int ofs_in_node = dn->ofs_in_node;
+               pgoff_t fofs = index;
+               unsigned int c_len;
                block_t blkaddr;
 
+               /* should align fofs and ofs_in_node to cluster_size */
+               if (fofs % cluster_size) {
+                       fofs = round_down(fofs, cluster_size);
+                       ofs_in_node = round_down(ofs_in_node, cluster_size);
+               }
+
+               c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
                if (!c_len)
                        goto out;
 
-               blkaddr = f2fs_data_blkaddr(dn);
+               blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
                if (blkaddr == COMPRESS_ADDR)
                        blkaddr = data_blkaddr(dn->inode, dn->node_page,
-                                               dn->ofs_in_node + 1);
+                                               ofs_in_node + 1);
 
                f2fs_update_read_extent_tree_range_compressed(dn->inode,
-                                       index, blkaddr,
-                                       F2FS_I(dn->inode)->i_cluster_size,
-                                       c_len);
+                                       fofs, blkaddr, cluster_size, c_len);
        }
 out:
        return 0;