svcrdma: Add an async version of svc_rdma_send_ctxt_put()
authorChuck Lever <chuck.lever@oracle.com>
Tue, 21 Nov 2023 16:40:33 +0000 (11:40 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Sun, 7 Jan 2024 22:54:27 +0000 (17:54 -0500)
DMA unmapping can take quite some time, so it should not be handled
in a single-threaded completion handler. Defer releasing send_ctxts
to the recently-added workqueue.

With this patch, DMA unmapping can be handled in parallel, and it
does not cause head-of-queue blocking of Send completions.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index e18c94e816b3be211094f26a45d2fe2fa5f50c78..ab250017b99f35fb9a9fb9b916360e5e84074341 100644 (file)
@@ -152,7 +152,9 @@ struct svc_rdma_recv_ctxt {
 struct svc_rdma_send_ctxt {
        struct llist_node       sc_node;
        struct rpc_rdma_cid     sc_cid;
+       struct work_struct      sc_work;
 
+       struct svcxprt_rdma     *sc_rdma;
        struct ib_send_wr       sc_send_wr;
        struct ib_cqe           sc_cqe;
        struct xdr_buf          sc_hdrbuf;
index 45735f74eb86c175f89ecd15442a158ad28f2564..22c39ba923d279c28519585bb4c01ac5c38cc7cd 100644 (file)
@@ -143,6 +143,7 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
 
        svc_rdma_send_cid_init(rdma, &ctxt->sc_cid);
 
+       ctxt->sc_rdma = rdma;
        ctxt->sc_send_wr.next = NULL;
        ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
        ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
@@ -223,15 +224,8 @@ out_empty:
        goto out;
 }
 
-/**
- * svc_rdma_send_ctxt_put - Return send_ctxt to free list
- * @rdma: controlling svcxprt_rdma
- * @ctxt: object to return to the free list
- *
- * Pages left in sc_pages are DMA unmapped and released.
- */
-void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
-                           struct svc_rdma_send_ctxt *ctxt)
+static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
+                                      struct svc_rdma_send_ctxt *ctxt)
 {
        struct ib_device *device = rdma->sc_cm_id->device;
        unsigned int i;
@@ -255,6 +249,28 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
        llist_add(&ctxt->sc_node, &rdma->sc_send_ctxts);
 }
 
+static void svc_rdma_send_ctxt_put_async(struct work_struct *work)
+{
+       struct svc_rdma_send_ctxt *ctxt;
+
+       ctxt = container_of(work, struct svc_rdma_send_ctxt, sc_work);
+       svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt);
+}
+
+/**
+ * svc_rdma_send_ctxt_put - Return send_ctxt to free list
+ * @rdma: controlling svcxprt_rdma
+ * @ctxt: object to return to the free list
+ *
+ * Pages left in sc_pages are DMA unmapped and released.
+ */
+void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
+                           struct svc_rdma_send_ctxt *ctxt)
+{
+       INIT_WORK(&ctxt->sc_work, svc_rdma_send_ctxt_put_async);
+       queue_work(svcrdma_wq, &ctxt->sc_work);
+}
+
 /**
  * svc_rdma_wake_send_waiters - manage Send Queue accounting
  * @rdma: controlling transport