Merge tag 'devicetree-fixes-for-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / net / sunrpc / xprtrdma / transport.c
index 143ce2579ba90cca47a87b5413ba35caf9d10792..ae2a83828953706e9b5cde03b8a3449b9aeb3df1 100644 (file)
@@ -225,69 +225,59 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
                }
 }
 
-void
-rpcrdma_conn_func(struct rpcrdma_ep *ep)
-{
-       schedule_delayed_work(&ep->rep_connect_worker, 0);
-}
-
-void
-rpcrdma_connect_worker(struct work_struct *work)
-{
-       struct rpcrdma_ep *ep =
-               container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
-       struct rpcrdma_xprt *r_xprt =
-               container_of(ep, struct rpcrdma_xprt, rx_ep);
-       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
-       spin_lock_bh(&xprt->transport_lock);
-       if (ep->rep_connected > 0) {
-               if (!xprt_test_and_set_connected(xprt))
-                       xprt_wake_pending_tasks(xprt, 0);
-       } else {
-               if (xprt_test_and_clear_connected(xprt))
-                       xprt_wake_pending_tasks(xprt, -ENOTCONN);
-       }
-       spin_unlock_bh(&xprt->transport_lock);
-}
-
+/**
+ * xprt_rdma_connect_worker - establish connection in the background
+ * @work: worker thread context
+ *
+ * Requester holds the xprt's send lock to prevent activity on this
+ * transport while a fresh connection is being established. RPC tasks
+ * sleep on the xprt's pending queue waiting for connect to complete.
+ */
 static void
 xprt_rdma_connect_worker(struct work_struct *work)
 {
        struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
                                                   rx_connect_worker.work);
        struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-       int rc = 0;
-
-       xprt_clear_connected(xprt);
+       int rc;
 
        rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
-       if (rc)
-               xprt_wake_pending_tasks(xprt, rc);
-
        xprt_clear_connecting(xprt);
+       if (r_xprt->rx_ep.rep_connected > 0) {
+               if (!xprt_test_and_set_connected(xprt)) {
+                       xprt->stat.connect_count++;
+                       xprt->stat.connect_time += (long)jiffies -
+                                                  xprt->stat.connect_start;
+                       xprt_wake_pending_tasks(xprt, -EAGAIN);
+               }
+       } else {
+               if (xprt_test_and_clear_connected(xprt))
+                       xprt_wake_pending_tasks(xprt, rc);
+       }
 }
 
+/**
+ * xprt_rdma_inject_disconnect - inject a connection fault
+ * @xprt: transport context
+ *
+ * If @xprt is connected, disconnect it to simulate spurious connection
+ * loss.
+ */
 static void
 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
 {
-       struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
-                                                  rx_xprt);
+       struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
 
        trace_xprtrdma_inject_dsc(r_xprt);
        rdma_disconnect(r_xprt->rx_ia.ri_id);
 }
 
-/*
- * xprt_rdma_destroy
+/**
+ * xprt_rdma_destroy - Full tear down of transport
+ * @xprt: doomed transport context
  *
- * Destroy the xprt.
- * Free all memory associated with the object, including its own.
- * NOTE: none of the *destroy methods free memory for their top-level
- * objects, even though they may have allocated it (they do free
- * private memory). It's up to the caller to handle it. In this
- * case (RDMA transport), all structure memory is inlined with the
- * struct rpcrdma_xprt.
+ * Caller guarantees there will be no more calls to us with
+ * this @xprt.
  */
 static void
 xprt_rdma_destroy(struct rpc_xprt *xprt)
@@ -298,8 +288,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
 
        cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
 
-       xprt_clear_connected(xprt);
-
        rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
        rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ia_close(&r_xprt->rx_ia);
@@ -442,11 +430,12 @@ out1:
 }
 
 /**
- * xprt_rdma_close - Close down RDMA connection
- * @xprt: generic transport to be closed
+ * xprt_rdma_close - close a transport connection
+ * @xprt: transport context
  *
- * Called during transport shutdown reconnect, or device
- * removal. Caller holds the transport's write lock.
+ * Called during transport shutdown, reconnect, or device removal.
+ * Caller holds @xprt's send lock to prevent activity on this
+ * transport while the connection is torn down.
  */
 static void
 xprt_rdma_close(struct rpc_xprt *xprt)
@@ -468,6 +457,12 @@ xprt_rdma_close(struct rpc_xprt *xprt)
                xprt->reestablish_timeout = 0;
        xprt_disconnect_done(xprt);
        rpcrdma_ep_disconnect(ep, ia);
+
+       /* Prepare @xprt for the next connection by reinitializing
+        * its credit grant to one (see RFC 8166, Section 3.3.3).
+        */
+       r_xprt->rx_buf.rb_credits = 1;
+       xprt->cwnd = RPC_CWNDSHIFT;
 }
 
 /**
@@ -519,6 +514,12 @@ xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
        xprt_force_disconnect(xprt);
 }
 
+/**
+ * xprt_rdma_connect - try to establish a transport connection
+ * @xprt: transport state
+ * @task: RPC scheduler context
+ *
+ */
 static void
 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
 {
@@ -638,13 +639,6 @@ rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
  *        0:   Success; rq_buffer points to RPC buffer to use
  *   ENOMEM:   Out of memory, call again later
  *      EIO:   A permanent error occurred, do not retry
- *
- * The RDMA allocate/free functions need the task structure as a place
- * to hide the struct rpcrdma_req, which is necessary for the actual
- * send/recv sequence.
- *
- * xprt_rdma_allocate provides buffers that are already mapped for
- * DMA, and a local DMA lkey is provided for each.
  */
 static int
 xprt_rdma_allocate(struct rpc_task *task)
@@ -693,7 +687,7 @@ xprt_rdma_free(struct rpc_task *task)
 
 /**
  * xprt_rdma_send_request - marshal and send an RPC request
- * @task: RPC task with an RPC message in rq_snd_buf
+ * @rqst: RPC message in rq_snd_buf
  *
  * Caller holds the transport's write lock.
  *
@@ -706,9 +700,8 @@ xprt_rdma_free(struct rpc_task *task)
  *             sent. Do not try to send this message again.
  */
 static int
-xprt_rdma_send_request(struct rpc_task *task)
+xprt_rdma_send_request(struct rpc_rqst *rqst)
 {
-       struct rpc_rqst *rqst = task->tk_rqstp;
        struct rpc_xprt *xprt = rqst->rq_xprt;
        struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
        struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
@@ -722,6 +715,9 @@ xprt_rdma_send_request(struct rpc_task *task)
        if (!xprt_connected(xprt))
                goto drop_connection;
 
+       if (!xprt_request_get_cong(xprt, rqst))
+               return -EBADSLT;
+
        rc = rpcrdma_marshal_req(r_xprt, rqst);
        if (rc < 0)
                goto failed_marshal;
@@ -741,7 +737,7 @@ xprt_rdma_send_request(struct rpc_task *task)
        /* An RPC with no reply will throw off credit accounting,
         * so drop the connection to reset the credit grant.
         */
-       if (!rpc_reply_expected(task))
+       if (!rpc_reply_expected(rqst->rq_task))
                goto drop_connection;
        return 0;
 
@@ -766,7 +762,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
                   0,   /* need a local port? */
                   xprt->stat.bind_count,
                   xprt->stat.connect_count,
-                  xprt->stat.connect_time,
+                  xprt->stat.connect_time / HZ,
                   idle_time,
                   xprt->stat.sends,
                   xprt->stat.recvs,
@@ -786,7 +782,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
                   r_xprt->rx_stats.bad_reply_count,
                   r_xprt->rx_stats.nomsg_call_count);
        seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
-                  r_xprt->rx_stats.mrs_recovered,
+                  r_xprt->rx_stats.mrs_recycled,
                   r_xprt->rx_stats.mrs_orphaned,
                   r_xprt->rx_stats.mrs_allocated,
                   r_xprt->rx_stats.local_inv_needed,