NFS: Convert the remaining pagelist helper functions to support folios
authorTrond Myklebust <trond.myklebust@hammerspace.com>
Thu, 19 Jan 2023 21:33:39 +0000 (16:33 -0500)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Tue, 14 Feb 2023 19:22:32 +0000 (14:22 -0500)
Allow creation of subrequests from a request that is carrying a folio.
Add helpers to set up and tear down requests carrying folios.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
fs/nfs/pagelist.c

index 4bdb570184f78e3a8b877199ce1b404d00aca5bd..dd99a5d381b3588ef08899bbe4c6c3dc66cc3e9e 100644 (file)
@@ -466,10 +466,9 @@ out:
                nfs_release_request(head);
 }
 
-static struct nfs_page *
-__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
-                  unsigned int pgbase, unsigned int offset,
-                  unsigned int count)
+static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
+                                       unsigned int pgbase, pgoff_t index,
+                                       unsigned int offset, unsigned int count)
 {
        struct nfs_page         *req;
        struct nfs_open_context *ctx = l_ctx->open_context;
@@ -488,19 +487,32 @@ __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
        /* Initialize the request struct. Initially, we assume a
         * long write-back delay. This will be adjusted in
         * update_nfs_request below if the region is not locked. */
-       req->wb_page    = page;
-       if (page) {
-               req->wb_index = page_index(page);
-               get_page(page);
-       }
-       req->wb_offset  = offset;
-       req->wb_pgbase  = pgbase;
-       req->wb_bytes   = count;
+       req->wb_pgbase = pgbase;
+       req->wb_index = index;
+       req->wb_offset = offset;
+       req->wb_bytes = count;
        kref_init(&req->wb_kref);
        req->wb_nio = 0;
        return req;
 }
 
+static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
+{
+       if (folio != NULL) {
+               req->wb_folio = folio;
+               folio_get(folio);
+               set_bit(PG_FOLIO, &req->wb_flags);
+       }
+}
+
+static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
+{
+       if (page != NULL) {
+               req->wb_page = page;
+               get_page(page);
+       }
+}
+
 /**
  * nfs_create_request - Create an NFS read/write request.
  * @ctx: open context to use
@@ -521,9 +533,11 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
 
        if (IS_ERR(l_ctx))
                return ERR_CAST(l_ctx);
-       ret = __nfs_create_request(l_ctx, page, offset, offset, count);
-       if (!IS_ERR(ret))
+       ret = nfs_page_create(l_ctx, offset, page_index(page), offset, count);
+       if (!IS_ERR(ret)) {
+               nfs_page_assign_page(ret, page);
                nfs_page_group_init(ret, NULL);
+       }
        nfs_put_lock_context(l_ctx);
        return ret;
 }
@@ -536,11 +550,16 @@ nfs_create_subreq(struct nfs_page *req,
 {
        struct nfs_page *last;
        struct nfs_page *ret;
+       struct folio *folio = nfs_page_to_folio(req);
        struct page *page = nfs_page_to_page(req, pgbase);
 
-       ret = __nfs_create_request(req->wb_lock_context, page, pgbase, offset,
-                                  count);
+       ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
+                             offset, count);
        if (!IS_ERR(ret)) {
+               if (folio)
+                       nfs_page_assign_folio(ret, folio);
+               else
+                       nfs_page_assign_page(ret, page);
                /* find the last request */
                for (last = req->wb_head;
                     last->wb_this_page != req->wb_head;
@@ -548,7 +567,6 @@ nfs_create_subreq(struct nfs_page *req,
                        ;
 
                nfs_lock_request(ret);
-               ret->wb_index = req->wb_index;
                nfs_page_group_init(ret, last);
                ret->wb_nio = req->wb_nio;
        }
@@ -587,11 +605,16 @@ void nfs_unlock_and_release_request(struct nfs_page *req)
  */
 static void nfs_clear_request(struct nfs_page *req)
 {
+       struct folio *folio = nfs_page_to_folio(req);
        struct page *page = req->wb_page;
        struct nfs_lock_context *l_ctx = req->wb_lock_context;
        struct nfs_open_context *ctx;
 
-       if (page != NULL) {
+       if (folio != NULL) {
+               folio_put(folio);
+               req->wb_folio = NULL;
+               clear_bit(PG_FOLIO, &req->wb_flags);
+       } else if (page != NULL) {
                put_page(page);
                req->wb_page = NULL;
        }
@@ -1471,16 +1494,21 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
 {
        struct nfs_pgio_mirror *mirror;
        struct nfs_page *prev;
+       struct folio *folio;
        u32 midx;
 
        for (midx = 0; midx < desc->pg_mirror_count; midx++) {
                mirror = nfs_pgio_get_mirror(desc, midx);
                if (!list_empty(&mirror->pg_list)) {
                        prev = nfs_list_entry(mirror->pg_list.prev);
-                       if (index != prev->wb_index + 1) {
-                               nfs_pageio_complete(desc);
-                               break;
-                       }
+                       folio = nfs_page_to_folio(prev);
+                       if (folio) {
+                               if (index == folio_next_index(folio))
+                                       continue;
+                       } else if (index == prev->wb_index + 1)
+                               continue;
+                       nfs_pageio_complete(desc);
+                       break;
                }
        }
 }