xprtrdma: Simplify locking that protects the rl_allreqs list
authorChuck Lever <chuck.lever@oracle.com>
Wed, 19 Dec 2018 15:59:33 +0000 (10:59 -0500)
committerAnna Schumaker <Anna.Schumaker@Netapp.com>
Wed, 2 Jan 2019 17:05:18 +0000 (12:05 -0500)
Clean up: There's little chance of contention between the use of
rb_lock and rb_reqslock, so merge the two. This avoids having to
take both in some (possibly future) cases.

Transport tear-down is already serialized, thus there is no need for
locking at all when destroying rpcrdma_reqs.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h

index e2704db2abcb42c7912e76cf42f6a6eb13ff85cd..aae2eb1ea50658b46e4475c98bb4e314869051ed 100644 (file)
 
 #undef RPCRDMA_BACKCHANNEL_DEBUG
 
-static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
-                                struct rpc_rqst *rqst)
-{
-       struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-       struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
-
-       spin_lock(&buf->rb_reqslock);
-       list_del(&req->rl_all);
-       spin_unlock(&buf->rb_reqslock);
-
-       rpcrdma_destroy_req(req);
-}
-
 static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
                                 unsigned int count)
 {
        struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+       struct rpcrdma_req *req;
        struct rpc_rqst *rqst;
        unsigned int i;
 
        for (i = 0; i < (count << 1); i++) {
                struct rpcrdma_regbuf *rb;
-               struct rpcrdma_req *req;
                size_t size;
 
                req = rpcrdma_create_req(r_xprt);
@@ -67,7 +54,7 @@ static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
        return 0;
 
 out_fail:
-       rpcrdma_bc_free_rqst(r_xprt, rqst);
+       rpcrdma_req_destroy(req);
        return -ENOMEM;
 }
 
@@ -225,7 +212,6 @@ drop_connection:
  */
 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
 {
-       struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
        struct rpc_rqst *rqst, *tmp;
 
        spin_lock(&xprt->bc_pa_lock);
@@ -233,7 +219,7 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
                list_del(&rqst->rq_bc_pa_list);
                spin_unlock(&xprt->bc_pa_lock);
 
-               rpcrdma_bc_free_rqst(r_xprt, rqst);
+               rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
 
                spin_lock(&xprt->bc_pa_lock);
        }
index 0cce7b23dff4b10ed5de22cda7f8cc7f6e82206c..51e09ae1a81bf7a01dd91dee13e12d674eec1220 100644 (file)
@@ -1043,9 +1043,9 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
        req->rl_buffer = buffer;
        INIT_LIST_HEAD(&req->rl_registered);
 
-       spin_lock(&buffer->rb_reqslock);
+       spin_lock(&buffer->rb_lock);
        list_add(&req->rl_all, &buffer->rb_allreqs);
-       spin_unlock(&buffer->rb_reqslock);
+       spin_unlock(&buffer->rb_lock);
        return req;
 }
 
@@ -1113,7 +1113,6 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
 
        INIT_LIST_HEAD(&buf->rb_send_bufs);
        INIT_LIST_HEAD(&buf->rb_allreqs);
-       spin_lock_init(&buf->rb_reqslock);
        for (i = 0; i < buf->rb_max_requests; i++) {
                struct rpcrdma_req *req;
 
@@ -1154,9 +1153,18 @@ rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
        kfree(rep);
 }
 
+/**
+ * rpcrdma_req_destroy - Destroy an rpcrdma_req object
+ * @req: unused object to be destroyed
+ *
+ * This function assumes that the caller prevents concurrent device
+ * unload and transport tear-down.
+ */
 void
-rpcrdma_destroy_req(struct rpcrdma_req *req)
+rpcrdma_req_destroy(struct rpcrdma_req *req)
 {
+       list_del(&req->rl_all);
+
        rpcrdma_free_regbuf(req->rl_recvbuf);
        rpcrdma_free_regbuf(req->rl_sendbuf);
        rpcrdma_free_regbuf(req->rl_rdmabuf);
@@ -1214,19 +1222,14 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
                rpcrdma_destroy_rep(rep);
        }
 
-       spin_lock(&buf->rb_reqslock);
-       while (!list_empty(&buf->rb_allreqs)) {
+       while (!list_empty(&buf->rb_send_bufs)) {
                struct rpcrdma_req *req;
 
-               req = list_first_entry(&buf->rb_allreqs,
-                                      struct rpcrdma_req, rl_all);
-               list_del(&req->rl_all);
-
-               spin_unlock(&buf->rb_reqslock);
-               rpcrdma_destroy_req(req);
-               spin_lock(&buf->rb_reqslock);
+               req = list_first_entry(&buf->rb_send_bufs,
+                                      struct rpcrdma_req, rl_list);
+               list_del(&req->rl_list);
+               rpcrdma_req_destroy(req);
        }
-       spin_unlock(&buf->rb_reqslock);
 
        rpcrdma_mrs_destroy(buf);
 }
index ff4eab1c3bf132f3e9f0057aa00eefbc0acdb802..a1cdc85898c7011e5afd71576d482ddbf94809c8 100644 (file)
@@ -392,14 +392,13 @@ struct rpcrdma_buffer {
        spinlock_t              rb_lock;        /* protect buf lists */
        struct list_head        rb_send_bufs;
        struct list_head        rb_recv_bufs;
+       struct list_head        rb_allreqs;
+
        unsigned long           rb_flags;
        u32                     rb_max_requests;
        u32                     rb_credits;     /* most recent credit grant */
 
        u32                     rb_bc_srv_max_requests;
-       spinlock_t              rb_reqslock;    /* protect rb_allreqs */
-       struct list_head        rb_allreqs;
-
        u32                     rb_bc_max_requests;
 
        struct workqueue_struct *rb_completion_wq;
@@ -522,7 +521,7 @@ int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
  * Buffer calls - xprtrdma/verbs.c
  */
 struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
-void rpcrdma_destroy_req(struct rpcrdma_req *);
+void rpcrdma_req_destroy(struct rpcrdma_req *req);
 int rpcrdma_buffer_create(struct rpcrdma_xprt *);
 void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);