spinlock_t sc_rw_ctxt_lock;
struct list_head sc_rw_ctxts;
+ u32 sc_pending_recvs;
+ u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
ctxt->rc_temp = temp;
ctxt->rc_recv_wr.next = recv_chain;
recv_chain = &ctxt->rc_recv_wr;
+ rdma->sc_pending_recvs++;
}
if (!recv_chain)
return false;
bad_wr = bad_wr->next;
svc_rdma_recv_ctxt_put(rdma, ctxt);
}
+ /* Since we're destroying the xprt, no need to reset
+ * sc_pending_recvs. */
return false;
}
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_recv_ctxt *ctxt;
+ rdma->sc_pending_recvs--;
+
/* WARNING: Only wc->wr_cqe and wc->status are reliable */
ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
* to reduce the likelihood of replayed requests once the
* client reconnects.
*/
- if (!svc_rdma_refresh_recvs(rdma, 1, false))
- goto flushed;
+ if (rdma->sc_pending_recvs < rdma->sc_max_requests)
+ if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
+ goto flushed;
/* All wc fields are now known to be valid */
ctxt->rc_byte_len = wc->byte_len;
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
- rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests;
+ newxprt->sc_recv_batch = RPCRDMA_MAX_RECV_BATCH;
+ rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests +
+ newxprt->sc_recv_batch;
if (rq_depth > dev->attrs.max_qp_wr) {
pr_warn("svcrdma: reducing receive depth to %d\n",
dev->attrs.max_qp_wr);
rq_depth = dev->attrs.max_qp_wr;
+ newxprt->sc_recv_batch = 1;
newxprt->sc_max_requests = rq_depth - 2;
newxprt->sc_max_bc_requests = 2;
}