netfs: Provide a launder_folio implementation
authorDavid Howells <dhowells@redhat.com>
Thu, 5 Oct 2023 15:52:58 +0000 (16:52 +0100)
committerDavid Howells <dhowells@redhat.com>
Thu, 28 Dec 2023 09:45:26 +0000 (09:45 +0000)
Provide a launder_folio implementation for netfslib.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org

fs/netfs/buffered_write.c
fs/netfs/main.c
include/linux/netfs.h
include/trace/events/netfs.h

index c078826f7fe671d4a1c1092920e1c7fa832b1a56..50be8fe3ca432a31412acea3b7e9629c1405cc9d 100644 (file)
@@ -1111,3 +1111,77 @@ out:
        return ret;
 }
 EXPORT_SYMBOL(netfs_writepages);
+
+/*
+ * Deal with the disposition of a laundered folio.
+ */
+static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
+{
+       if (wreq->error) {
+               pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
+               mapping_set_error(wreq->mapping, wreq->error);
+       }
+}
+
+/**
+ * netfs_launder_folio - Clean up a dirty folio that's being invalidated
+ * @folio: The folio to clean
+ *
+ * This is called to write back a folio that's being invalidated when an inode
+ * is getting torn down.  Ideally, writepages would be used instead.
+ */
+int netfs_launder_folio(struct folio *folio)
+{
+       struct netfs_io_request *wreq;
+       struct address_space *mapping = folio->mapping;
+       struct netfs_folio *finfo = netfs_folio_info(folio);
+       struct netfs_group *group = netfs_folio_group(folio);
+       struct bio_vec bvec;
+       unsigned long long i_size = i_size_read(mapping->host);
+       unsigned long long start = folio_pos(folio);
+       size_t offset = 0, len;
+       int ret = 0;
+
+       if (finfo) {
+               offset = finfo->dirty_offset;
+               start += offset;
+               len = finfo->dirty_len;
+       } else {
+               len = folio_size(folio);
+       }
+       len = min_t(unsigned long long, len, i_size - start);
+
+       wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
+       if (IS_ERR(wreq)) {
+               ret = PTR_ERR(wreq);
+               goto out;
+       }
+
+       if (!folio_clear_dirty_for_io(folio))
+               goto out_put;
+
+       trace_netfs_folio(folio, netfs_folio_trace_launder);
+
+       _debug("launder %llx-%llx", start, start + len - 1);
+
+       /* Speculatively write to the cache.  We have to fix this up later if
+        * the store fails.
+        */
+       wreq->cleanup = netfs_cleanup_launder_folio;
+
+       bvec_set_folio(&bvec, folio, len, offset);
+       iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
+       __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+       ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
+
+out_put:
+       folio_detach_private(folio);
+       netfs_put_group(group);
+       kfree(finfo);
+       netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+out:
+       folio_wait_fscache(folio);
+       _leave(" = %d", ret);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_launder_folio);
index 8e4db9ff40c40f652281bbc0ff22aba9267e12a5..473f889e1bd180835579b04e6cf0e2c386edd299 100644 (file)
@@ -30,6 +30,7 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
        [NETFS_READPAGE]                = "RP",
        [NETFS_READ_FOR_WRITE]          = "RW",
        [NETFS_WRITEBACK]               = "WB",
+       [NETFS_LAUNDER_WRITE]           = "LW",
        [NETFS_UNBUFFERED_WRITE]        = "UW",
        [NETFS_DIO_READ]                = "DR",
        [NETFS_DIO_WRITE]               = "DW",
index 86bb8cb7f8d0813514bfbb6496f35ef6c8ded6ba..29c66acad9256226ace8aaccb56bf9c079b78ac0 100644 (file)
@@ -227,6 +227,7 @@ enum netfs_io_origin {
        NETFS_READPAGE,                 /* This read is a synchronous read */
        NETFS_READ_FOR_WRITE,           /* This read is to prepare a write */
        NETFS_WRITEBACK,                /* This write was triggered by writepages */
+       NETFS_LAUNDER_WRITE,            /* This is triggered by ->launder_folio() */
        NETFS_UNBUFFERED_WRITE,         /* This is an unbuffered write */
        NETFS_DIO_READ,                 /* This is a direct I/O read */
        NETFS_DIO_WRITE,                /* This is a direct I/O write */
@@ -404,6 +405,7 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
 void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
 bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+int netfs_launder_folio(struct folio *folio);
 
 /* VMA operations API. */
 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
index 914a24b03d08ba1f5b054f5a7f26fc7ff5777918..cc998798e20a47b0c37cc94f14162ba29502742f 100644 (file)
@@ -25,6 +25,7 @@
 
 #define netfs_write_traces                                     \
        EM(netfs_write_trace_dio_write,         "DIO-WRITE")    \
+       EM(netfs_write_trace_launder,           "LAUNDER  ")    \
        EM(netfs_write_trace_unbuffered_write,  "UNB-WRITE")    \
        E_(netfs_write_trace_writeback,         "WRITEBACK")
 
@@ -33,6 +34,7 @@
        EM(NETFS_READPAGE,                      "RP")           \
        EM(NETFS_READ_FOR_WRITE,                "RW")           \
        EM(NETFS_WRITEBACK,                     "WB")           \
+       EM(NETFS_LAUNDER_WRITE,                 "LW")           \
        EM(NETFS_UNBUFFERED_WRITE,              "UW")           \
        EM(NETFS_DIO_READ,                      "DR")           \
        E_(NETFS_DIO_WRITE,                     "DW")
        EM(netfs_folio_trace_end_copy,          "end-copy")     \
        EM(netfs_folio_trace_filled_gaps,       "filled-gaps")  \
        EM(netfs_folio_trace_kill,              "kill")         \
+       EM(netfs_folio_trace_launder,           "launder")      \
        EM(netfs_folio_trace_mkwrite,           "mkwrite")      \
        EM(netfs_folio_trace_mkwrite_plus,      "mkwrite+")     \
        EM(netfs_folio_trace_read_gaps,         "read-gaps")    \