2 * Copyright (c) 2015 Oracle. All rights reserved.
4 * Support for backward direction RPCs on RPC/RDMA.
7 #include <linux/module.h>
8 #include <linux/sunrpc/xprt.h>
9 #include <linux/sunrpc/svc.h>
10 #include <linux/sunrpc/svc_xprt.h>
12 #include "xprt_rdma.h"
14 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
15 # define RPCDBG_FACILITY RPCDBG_TRANS
18 #undef RPCRDMA_BACKCHANNEL_DEBUG
20 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
21 struct rpc_rqst *rqst)
23 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
24 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
26 spin_lock(&buf->rb_reqslock);
27 list_del(&req->rl_all);
28 spin_unlock(&buf->rb_reqslock);
30 rpcrdma_destroy_req(&r_xprt->rx_ia, req);
35 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
36 struct rpc_rqst *rqst)
38 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
39 struct rpcrdma_regbuf *rb;
40 struct rpcrdma_req *req;
43 req = rpcrdma_create_req(r_xprt);
46 req->rl_backchannel = true;
48 rb = rpcrdma_alloc_regbuf(ia, RPCRDMA_HDRBUF_SIZE, GFP_KERNEL);
53 size = r_xprt->rx_data.inline_rsize;
54 rb = rpcrdma_alloc_regbuf(ia, size, GFP_KERNEL);
58 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base, size);
59 rpcrdma_set_xprtdata(rqst, req);
63 rpcrdma_bc_free_rqst(r_xprt, rqst);
67 /* Allocate and add receive buffers to the rpcrdma_buffer's
68 * existing list of rep's. These are released when the
69 * transport is destroyed.
71 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
74 struct rpcrdma_rep *rep;
78 rep = rpcrdma_create_rep(r_xprt);
80 pr_err("RPC: %s: reply buffer alloc failed\n",
86 rpcrdma_recv_buffer_put(rep);
93 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
94 * @xprt: transport associated with these backchannel resources
95 * @reqs: number of concurrent incoming requests to expect
97 * Returns 0 on success; otherwise a negative errno
99 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
101 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
102 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
103 struct rpc_rqst *rqst;
107 /* The backchannel reply path returns each rpc_rqst to the
108 * bc_pa_list _after_ the reply is sent. If the server is
109 * faster than the client, it can send another backward
110 * direction request before the rpc_rqst is returned to the
111 * list. The client rejects the request in this case.
113 * Twice as many rpc_rqsts are prepared to ensure there is
114 * always an rpc_rqst available as soon as a reply is sent.
116 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
119 for (i = 0; i < (reqs << 1); i++) {
120 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
122 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
126 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
128 rqst->rq_xprt = &r_xprt->rx_xprt;
129 INIT_LIST_HEAD(&rqst->rq_list);
130 INIT_LIST_HEAD(&rqst->rq_bc_list);
132 if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
135 spin_lock_bh(&xprt->bc_pa_lock);
136 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
137 spin_unlock_bh(&xprt->bc_pa_lock);
140 rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
144 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
148 buffer->rb_bc_srv_max_requests = reqs;
149 request_module("svcrdma");
154 xprt_rdma_bc_destroy(xprt, reqs);
157 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
162 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
163 * @serv: server endpoint
164 * @net: network namespace
166 * The "xprt" is an implied argument: it supplies the name of the
167 * backchannel transport class.
169 * Returns zero on success, negative errno on failure
171 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
175 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
182 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
185 * Returns maximum size, in bytes, of a backchannel message
187 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
189 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
190 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
193 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
194 return maxmsg - RPCRDMA_HDRLEN_MIN;
198 * rpcrdma_bc_marshal_reply - Send backwards direction reply
199 * @rqst: buffer containing RPC reply data
201 * Returns zero on success.
203 int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
205 struct rpc_xprt *xprt = rqst->rq_xprt;
206 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
207 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
208 struct rpcrdma_msg *headerp;
211 headerp = rdmab_to_msg(req->rl_rdmabuf);
212 headerp->rm_xid = rqst->rq_xid;
213 headerp->rm_vers = rpcrdma_version;
215 cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
216 headerp->rm_type = rdma_msg;
217 headerp->rm_body.rm_chunks[0] = xdr_zero;
218 headerp->rm_body.rm_chunks[1] = xdr_zero;
219 headerp->rm_body.rm_chunks[2] = xdr_zero;
221 rpclen = rqst->rq_svec[0].iov_len;
223 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
224 pr_info("RPC: %s: rpclen %zd headerp 0x%p lkey 0x%x\n",
225 __func__, rpclen, headerp, rdmab_lkey(req->rl_rdmabuf));
226 pr_info("RPC: %s: RPC/RDMA: %*ph\n",
227 __func__, (int)RPCRDMA_HDRLEN_MIN, headerp);
228 pr_info("RPC: %s: RPC: %*ph\n",
229 __func__, (int)rpclen, rqst->rq_svec[0].iov_base);
232 req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
233 req->rl_send_iov[0].length = RPCRDMA_HDRLEN_MIN;
234 req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
236 req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
237 req->rl_send_iov[1].length = rpclen;
238 req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
245 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
246 * @xprt: transport associated with these backchannel resources
247 * @reqs: number of incoming requests to destroy; ignored
249 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
251 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
252 struct rpc_rqst *rqst, *tmp;
254 spin_lock_bh(&xprt->bc_pa_lock);
255 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
256 list_del(&rqst->rq_bc_pa_list);
257 spin_unlock_bh(&xprt->bc_pa_lock);
259 rpcrdma_bc_free_rqst(r_xprt, rqst);
261 spin_lock_bh(&xprt->bc_pa_lock);
263 spin_unlock_bh(&xprt->bc_pa_lock);
267 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
268 * @rqst: request to release
270 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
272 struct rpc_xprt *xprt = rqst->rq_xprt;
274 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
275 __func__, rqst, rpcr_to_rdmar(rqst));
277 smp_mb__before_atomic();
278 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
279 clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
280 smp_mb__after_atomic();
282 spin_lock_bh(&xprt->bc_pa_lock);
283 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
284 spin_unlock_bh(&xprt->bc_pa_lock);
288 * rpcrdma_bc_receive_call - Handle a backward direction call
289 * @xprt: transport receiving the call
290 * @rep: receive buffer containing the call
292 * Called in the RPC reply handler, which runs in a tasklet.
295 * Operational assumptions:
296 * o Backchannel credits are ignored, just as the NFS server
297 * forechannel currently does
298 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
299 * No replay detection is done at the transport level
301 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
302 struct rpcrdma_rep *rep)
304 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
305 struct rpcrdma_msg *headerp;
306 struct svc_serv *bc_serv;
307 struct rpcrdma_req *req;
308 struct rpc_rqst *rqst;
313 headerp = rdmab_to_msg(rep->rr_rdmabuf);
314 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
315 pr_info("RPC: %s: callback XID %08x, length=%u\n",
316 __func__, be32_to_cpu(headerp->rm_xid), rep->rr_len);
317 pr_info("RPC: %s: %*ph\n", __func__, rep->rr_len, headerp);
321 * Need at least enough bytes for RPC/RDMA header, as code
322 * here references the header fields by array offset. Also,
323 * backward calls are always inline, so ensure there
324 * are some bytes beyond the RPC/RDMA header.
326 if (rep->rr_len < RPCRDMA_HDRLEN_MIN + 24)
328 p = (__be32 *)((unsigned char *)headerp + RPCRDMA_HDRLEN_MIN);
329 size = rep->rr_len - RPCRDMA_HDRLEN_MIN;
331 /* Grab a free bc rqst */
332 spin_lock(&xprt->bc_pa_lock);
333 if (list_empty(&xprt->bc_pa_list)) {
334 spin_unlock(&xprt->bc_pa_lock);
337 rqst = list_first_entry(&xprt->bc_pa_list,
338 struct rpc_rqst, rq_bc_pa_list);
339 list_del(&rqst->rq_bc_pa_list);
340 spin_unlock(&xprt->bc_pa_lock);
341 dprintk("RPC: %s: using rqst %p\n", __func__, rqst);
344 rqst->rq_reply_bytes_recvd = 0;
345 rqst->rq_bytes_sent = 0;
346 rqst->rq_xid = headerp->rm_xid;
348 rqst->rq_private_buf.len = size;
349 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
351 buf = &rqst->rq_rcv_buf;
352 memset(buf, 0, sizeof(*buf));
353 buf->head[0].iov_base = p;
354 buf->head[0].iov_len = size;
357 /* The receive buffer has to be hooked to the rpcrdma_req
358 * so that it can be reposted after the server is done
359 * parsing it but just before sending the backward
362 req = rpcr_to_rdmar(rqst);
363 dprintk("RPC: %s: attaching rep %p to req %p\n",
367 /* Defeat the retransmit detection logic in send_request */
368 req->rl_connect_cookie = 0;
370 /* Queue rqst for ULP's callback service */
371 bc_serv = xprt->bc_serv;
372 spin_lock(&bc_serv->sv_cb_lock);
373 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
374 spin_unlock(&bc_serv->sv_cb_lock);
376 wake_up(&bc_serv->sv_cb_waitq);
378 r_xprt->rx_stats.bcall_count++;
382 pr_warn("RPC/RDMA backchannel overflow\n");
383 xprt_disconnect_done(xprt);
384 /* This receive buffer gets reposted automatically
385 * when the connection is re-established.
390 pr_warn("RPC/RDMA short backward direction call\n");
392 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
393 xprt_disconnect_done(xprt);
395 pr_warn("RPC: %s: reposting rep %p\n",