iomap: fall back to buffered writes for invalidation failures
authorChristoph Hellwig <hch@lst.de>
Fri, 24 Jul 2020 05:45:59 +0000 (22:45 -0700)
committerDarrick J. Wong <darrick.wong@oracle.com>
Wed, 5 Aug 2020 16:24:16 +0000 (09:24 -0700)
Failing to invalid the page cache means data in incoherent, which is
a very bad state for the system.  Always fall back to buffered I/O
through the page cache if we can't invalidate mappings.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Acked-by: Bob Peterson <rpeterso@redhat.com>
Acked-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Theodore Ts'o <tytso@mit.edu> # for ext4
Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com> # for gfs2
Reviewed-by: Ritesh Harjani <riteshh@linux.ibm.com>
fs/ext4/file.c
fs/gfs2/file.c
fs/iomap/direct-io.c
fs/iomap/trace.h
fs/xfs/xfs_file.c
fs/zonefs/super.c

index 2a01e31a032c4ce393ea5e9514b92e1950d1f5fa..129cc1dd6b79520d9510f614f16a45906633d547 100644 (file)
@@ -544,6 +544,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
                iomap_ops = &ext4_iomap_overwrite_ops;
        ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
                           is_sync_kiocb(iocb) || unaligned_io || extend);
+       if (ret == -ENOTBLK)
+               ret = 0;
 
        if (extend)
                ret = ext4_handle_inode_extension(inode, offset, ret, count);
index fe305e4bfd37345048aa6859ed75b74c1699987a..b8929e470b9f603f5887ad318187aab89bcadb02 100644 (file)
@@ -814,7 +814,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
 
        ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
                           is_sync_kiocb(iocb));
-
+       if (ret == -ENOTBLK)
+               ret = 0;
 out:
        gfs2_glock_dq(&gh);
 out_uninit:
index 190967e87b69e4628803b3661e2b664aabbc462a..c1aafb2ab990721011947c6536122d811cd93b60 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/backing-dev.h>
 #include <linux/uio.h>
 #include <linux/task_io_accounting_ops.h>
+#include "trace.h"
 
 #include "../internal.h"
 
@@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
  * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
  * may be pure data writes. In that case, we still need to do a full data sync
  * completion.
+ *
+ * Returns -ENOTBLK In case of a page invalidation invalidation failure for
+ * writes.  The callers needs to fall back to buffered I/O in this case.
  */
 ssize_t
 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
@@ -478,13 +482,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (iov_iter_rw(iter) == WRITE) {
                /*
                 * Try to invalidate cache pages for the range we are writing.
-                * If this invalidation fails, tough, the write will still work,
-                * but racing two incompatible write paths is a pretty crazy
-                * thing to do, so we don't support it 100%.
+                * If this invalidation fails, let the caller fall back to
+                * buffered I/O.
                 */
                if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
-                               end >> PAGE_SHIFT))
-                       dio_warn_stale_pagecache(iocb->ki_filp);
+                               end >> PAGE_SHIFT)) {
+                       trace_iomap_dio_invalidate_fail(inode, pos, count);
+                       ret = -ENOTBLK;
+                       goto out_free_dio;
+               }
 
                if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
                        ret = sb_init_dio_done_wq(inode->i_sb);
index 5693a39d52fb630edf688ef58426c782632be759..fdc7ae388476f50071e8232bdc4c1bf472a7934b 100644 (file)
@@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \
 DEFINE_RANGE_EVENT(iomap_writepage);
 DEFINE_RANGE_EVENT(iomap_releasepage);
 DEFINE_RANGE_EVENT(iomap_invalidatepage);
+DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);
 
 #define IOMAP_TYPE_STRINGS \
        { IOMAP_HOLE,           "HOLE" }, \
index a6ef90457abf974b30f9ad56890baa7504096845..1b4517fc55f1b903abb491e1d6cda4fb46ee82b1 100644 (file)
@@ -553,8 +553,8 @@ out:
        xfs_iunlock(ip, iolock);
 
        /*
-        * No fallback to buffered IO on errors for XFS, direct IO will either
-        * complete fully or fail.
+        * No fallback to buffered IO after short writes for XFS, direct I/O
+        * will either complete fully or return an error.
         */
        ASSERT(ret < 0 || ret == count);
        return ret;
index 07bc42d62673cec1fc16f59932db30ecfd285089..d0a04528a7e18ec49d8411c1b11420942dafccc1 100644 (file)
@@ -786,8 +786,11 @@ static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
                return -EFBIG;
 
-       if (iocb->ki_flags & IOCB_DIRECT)
-               return zonefs_file_dio_write(iocb, from);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ssize_t ret = zonefs_file_dio_write(iocb, from);
+               if (ret != -ENOTBLK)
+                       return ret;
+       }
 
        return zonefs_file_buffered_write(iocb, from);
 }