xfs: add xfs_zero_range and xfs_truncate_page helpers
authorShiyang Ruan <ruansy.fnst@fujitsu.com>
Mon, 29 Nov 2021 10:21:49 +0000 (11:21 +0100)
committerDan Williams <dan.j.williams@intel.com>
Sat, 4 Dec 2021 16:58:52 +0000 (08:58 -0800)
Add helpers to prepare for using different DAX operations.

Signed-off-by: Shiyang Ruan <ruansy.fnst@fujitsu.com>
[hch: split from a larger patch + slight cleanups]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20211129102203.2243509-16-hch@lst.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_file.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iops.c
fs/xfs/xfs_reflink.c

index 73a36b7be3bd109894e410be719c4fc774c17de7..797ea0c8b14e1a0e41e05ba30f0c1c2f662a91ba 100644 (file)
@@ -1001,7 +1001,7 @@ xfs_free_file_space(
 
        /*
         * Now that we've unmap all full blocks we'll have to zero out any
-        * partial block at the beginning and/or end.  iomap_zero_range is smart
+        * partial block at the beginning and/or end.  xfs_zero_range is smart
         * enough to skip any holes, including those we just created, but we
         * must take care not to zero beyond EOF and enlarge i_size.
         */
@@ -1009,15 +1009,14 @@ xfs_free_file_space(
                return 0;
        if (offset + len > XFS_ISIZE(ip))
                len = XFS_ISIZE(ip) - offset;
-       error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
-                       &xfs_buffered_write_iomap_ops);
+       error = xfs_zero_range(ip, offset, len, NULL);
        if (error)
                return error;
 
        /*
         * If we zeroed right up to EOF and EOF straddles a page boundary we
         * must make sure that the post-EOF area is also zeroed because the
-        * page could be mmap'd and iomap_zero_range doesn't do that for us.
+        * page could be mmap'd and xfs_zero_range doesn't do that for us.
         * Writeback of the eof page will do this, albeit clumsily.
         */
        if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
index 27594738b0d181e2a9145c2264c23d09e95a6ac9..8d4c5ca261bd70f41233150f01dd8d010778d268 100644 (file)
@@ -437,8 +437,7 @@ restart:
                }
 
                trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
-               error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
-                               NULL, &xfs_buffered_write_iomap_ops);
+               error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
                if (error)
                        return error;
        } else
index 093758440ad531c58df8c6b377e173a856e0267f..d6d71ae9f2ae48f78c01566323ac1eadcbe2d4db 100644 (file)
@@ -1311,3 +1311,28 @@ out_unlock:
 const struct iomap_ops xfs_xattr_iomap_ops = {
        .iomap_begin            = xfs_xattr_iomap_begin,
 };
+
+int
+xfs_zero_range(
+       struct xfs_inode        *ip,
+       loff_t                  pos,
+       loff_t                  len,
+       bool                    *did_zero)
+{
+       struct inode            *inode = VFS_I(ip);
+
+       return iomap_zero_range(inode, pos, len, did_zero,
+                               &xfs_buffered_write_iomap_ops);
+}
+
+int
+xfs_truncate_page(
+       struct xfs_inode        *ip,
+       loff_t                  pos,
+       bool                    *did_zero)
+{
+       struct inode            *inode = VFS_I(ip);
+
+       return iomap_truncate_page(inode, pos, did_zero,
+                                  &xfs_buffered_write_iomap_ops);
+}
index 7d3703556d0e082271ccfd6b06b915fe11c34425..f1a281ab9328c970cdba369186e1f41b78e183ef 100644 (file)
@@ -20,6 +20,10 @@ xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
 int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
                struct xfs_bmbt_irec *, u16);
 
+int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
+               bool *did_zero);
+int xfs_truncate_page(struct xfs_inode *ip, loff_t pos, bool *did_zero);
+
 static inline xfs_filblks_t
 xfs_aligned_fsb_count(
        xfs_fileoff_t           offset_fsb,
index a607d6aca5c4d38e722e35e6cf97e12675ed5bd0..ab5ef52b2a9ff4215392101efc60519a185f32f5 100644 (file)
@@ -911,8 +911,8 @@ xfs_setattr_size(
         */
        if (newsize > oldsize) {
                trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
-               error = iomap_zero_range(inode, oldsize, newsize - oldsize,
-                               &did_zeroing, &xfs_buffered_write_iomap_ops);
+               error = xfs_zero_range(ip, oldsize, newsize - oldsize,
+                               &did_zeroing);
        } else {
                /*
                 * iomap won't detect a dirty page over an unwritten block (or a
@@ -924,8 +924,7 @@ xfs_setattr_size(
                                                     newsize);
                if (error)
                        return error;
-               error = iomap_truncate_page(inode, newsize, &did_zeroing,
-                               &xfs_buffered_write_iomap_ops);
+               error = xfs_truncate_page(ip, newsize, &did_zeroing);
        }
 
        if (error)
index cb0edb1d68ef16b37672ac77550be0ccc25acffa..facce5c076d83be29d7401fb434eb6415dd4e213 100644 (file)
@@ -1269,8 +1269,7 @@ xfs_reflink_zero_posteof(
                return 0;
 
        trace_xfs_zero_eof(ip, isize, pos - isize);
-       return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
-                       &xfs_buffered_write_iomap_ops);
+       return xfs_zero_range(ip, isize, pos - isize, NULL);
 }
 
 /*