xprtrdma: Clean up synopsis of rpcrdma_req_create()
[linux-2.6-block.git] / net / sunrpc / xprtrdma / backchannel.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f531a5db 2/*
d11e9346 3 * Copyright (c) 2015-2020, Oracle and/or its affiliates.
f531a5db 4 *
84dff5eb 5 * Support for reverse-direction RPCs on RPC/RDMA.
f531a5db
CL
6 */
7
63cae470
CL
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
76566773 10#include <linux/sunrpc/svc_xprt.h>
bd2abef3 11#include <linux/sunrpc/svc_rdma.h>
f531a5db
CL
12
13#include "xprt_rdma.h"
b6e717cb 14#include <trace/events/rpcrdma.h>
f531a5db 15
c8bbe0c7 16#undef RPCRDMA_BACKCHANNEL_DEBUG
63cae470 17
f531a5db
CL
18/**
19 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
20 * @xprt: transport associated with these backchannel resources
21 * @reqs: number of concurrent incoming requests to expect
22 *
23 * Returns 0 on success; otherwise a negative errno
24 */
25int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
26{
27 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
f531a5db 28
3f9c7e76 29 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
fc1eb807 30 trace_xprtrdma_cb_setup(r_xprt, reqs);
f531a5db 31 return 0;
f531a5db
CL
32}
33
6b26cc8c
CL
34/**
35 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
36 * @xprt: transport
37 *
38 * Returns maximum size, in bytes, of a backchannel message
39 */
40size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
41{
42 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
e28ce900 43 struct rpcrdma_ep *ep = r_xprt->rx_ep;
6b26cc8c
CL
44 size_t maxmsg;
45
93aa8e0a 46 maxmsg = min_t(unsigned int, ep->re_inline_send, ep->re_inline_recv);
62aee0e3 47 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
6b26cc8c
CL
48 return maxmsg - RPCRDMA_HDRLEN_MIN;
49}
50
7402a4fe
TM
51unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *xprt)
52{
17d47f93 53 return RPCRDMA_BACKWARD_WRS >> 1;
7402a4fe
TM
54}
55
cf73daf5 56static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
83128a60 57{
7ec910e7 58 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
83128a60 59 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
7ec910e7
CL
60 __be32 *p;
61
62 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
63 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
8cec3dba 64 rdmab_data(req->rl_rdmabuf), rqst);
7ec910e7
CL
65
66 p = xdr_reserve_space(&req->rl_stream, 28);
67 if (unlikely(!p))
68 return -EIO;
69 *p++ = rqst->rq_xid;
70 *p++ = rpcrdma_version;
71 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
72 *p++ = rdma_msg;
73 *p++ = xdr_zero;
74 *p++ = xdr_zero;
75 *p = xdr_zero;
83128a60 76
857f9aca 77 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
614f3c96 78 &rqst->rq_snd_buf, rpcrdma_noch_pullup))
655fec69 79 return -EIO;
fc1eb807 80
d11e9346 81 trace_xprtrdma_cb_reply(r_xprt, rqst);
83128a60
CL
82 return 0;
83}
84
cf73daf5
CL
85/**
86 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
87 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
88 *
89 * Caller holds the transport's write lock.
90 *
91 * Returns:
92 * %0 if the RPC message has been sent
93 * %-ENOTCONN if the caller should reconnect and call again
94 * %-EIO if a permanent error occurred and the request was not
95 * sent. Do not try to send this message again.
96 */
97int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
98{
0c0829bc
CL
99 struct rpc_xprt *xprt = rqst->rq_xprt;
100 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
cf73daf5
CL
101 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
102 int rc;
103
0c0829bc
CL
104 if (!xprt_connected(xprt))
105 return -ENOTCONN;
cf73daf5 106
0c0829bc 107 if (!xprt_request_get_cong(xprt, rqst))
75891f50
TM
108 return -EBADSLT;
109
cf73daf5
CL
110 rc = rpcrdma_bc_marshal_reply(rqst);
111 if (rc < 0)
112 goto failed_marshal;
113
8d863b1f 114 if (frwr_send(r_xprt, req))
cf73daf5
CL
115 goto drop_connection;
116 return 0;
117
118failed_marshal:
119 if (rc != -ENOTCONN)
120 return rc;
121drop_connection:
0c0829bc 122 xprt_rdma_close(xprt);
cf73daf5
CL
123 return -ENOTCONN;
124}
125
f531a5db
CL
126/**
127 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
128 * @xprt: transport associated with these backchannel resources
129 * @reqs: number of incoming requests to destroy; ignored
130 */
131void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
132{
f531a5db
CL
133 struct rpc_rqst *rqst, *tmp;
134
f7d46681 135 spin_lock(&xprt->bc_pa_lock);
f531a5db
CL
136 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
137 list_del(&rqst->rq_bc_pa_list);
f7d46681 138 spin_unlock(&xprt->bc_pa_lock);
f531a5db 139
92f4433e 140 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
f531a5db 141
f7d46681 142 spin_lock(&xprt->bc_pa_lock);
f531a5db 143 }
f7d46681 144 spin_unlock(&xprt->bc_pa_lock);
f531a5db
CL
145}
146
147/**
148 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
149 * @rqst: request to release
150 */
151void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
152{
7c8d9e7c 153 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
c35ca60d 154 struct rpcrdma_rep *rep = req->rl_reply;
f531a5db 155 struct rpc_xprt *xprt = rqst->rq_xprt;
c35ca60d 156 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
f531a5db 157
c35ca60d 158 rpcrdma_rep_put(&r_xprt->rx_buf, rep);
7c8d9e7c 159 req->rl_reply = NULL;
c8bbe0c7 160
f7d46681 161 spin_lock(&xprt->bc_pa_lock);
f531a5db 162 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
f7d46681 163 spin_unlock(&xprt->bc_pa_lock);
9edb455e 164 xprt_put(xprt);
f531a5db 165}
63cae470 166
3f9c7e76
CL
167static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
168{
169 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
170 struct rpcrdma_req *req;
171 struct rpc_rqst *rqst;
172 size_t size;
173
174 spin_lock(&xprt->bc_pa_lock);
175 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
176 rq_bc_pa_list);
177 if (!rqst)
178 goto create_req;
179 list_del(&rqst->rq_bc_pa_list);
180 spin_unlock(&xprt->bc_pa_lock);
181 return rqst;
182
183create_req:
184 spin_unlock(&xprt->bc_pa_lock);
185
186 /* Set a limit to prevent a remote from overrunning our resources.
187 */
188 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
189 return NULL;
190
e28ce900 191 size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
3b50cc1c 192 req = rpcrdma_req_create(r_xprt, size);
3f9c7e76
CL
193 if (!req)
194 return NULL;
b78de1dc
CL
195 if (rpcrdma_req_setup(r_xprt, req)) {
196 rpcrdma_req_destroy(req);
197 return NULL;
198 }
3f9c7e76
CL
199
200 xprt->bc_alloc_count++;
201 rqst = &req->rl_slot;
202 rqst->rq_xprt = xprt;
203 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
204 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
205 return rqst;
206}
207
63cae470 208/**
84dff5eb 209 * rpcrdma_bc_receive_call - Handle a reverse-direction Call
9ab6d89e 210 * @r_xprt: transport receiving the call
63cae470
CL
211 * @rep: receive buffer containing the call
212 *
63cae470
CL
213 * Operational assumptions:
214 * o Backchannel credits are ignored, just as the NFS server
215 * forechannel currently does
216 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
217 * No replay detection is done at the transport level
218 */
219void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
220 struct rpcrdma_rep *rep)
221{
222 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
63cae470
CL
223 struct svc_serv *bc_serv;
224 struct rpcrdma_req *req;
225 struct rpc_rqst *rqst;
226 struct xdr_buf *buf;
227 size_t size;
228 __be32 *p;
229
41c8f70f
CL
230 p = xdr_inline_decode(&rep->rr_stream, 0);
231 size = xdr_stream_remaining(&rep->rr_stream);
232
63cae470
CL
233#ifdef RPCRDMA_BACKCHANNEL_DEBUG
234 pr_info("RPC: %s: callback XID %08x, length=%u\n",
41c8f70f
CL
235 __func__, be32_to_cpup(p), size);
236 pr_info("RPC: %s: %*ph\n", __func__, size, p);
63cae470
CL
237#endif
238
3f9c7e76
CL
239 rqst = rpcrdma_bc_rqst_get(r_xprt);
240 if (!rqst)
63cae470 241 goto out_overflow;
63cae470 242
63cae470 243 rqst->rq_reply_bytes_recvd = 0;
41c8f70f 244 rqst->rq_xid = *p;
9f74660b
CL
245
246 rqst->rq_private_buf.len = size;
63cae470
CL
247
248 buf = &rqst->rq_rcv_buf;
249 memset(buf, 0, sizeof(*buf));
250 buf->head[0].iov_base = p;
251 buf->head[0].iov_len = size;
252 buf->len = size;
253
254 /* The receive buffer has to be hooked to the rpcrdma_req
41c8f70f
CL
255 * so that it is not released while the req is pointing
256 * to its buffer, and so that it can be reposted after
257 * the Upper Layer is done decoding it.
63cae470
CL
258 */
259 req = rpcr_to_rdmar(rqst);
63cae470 260 req->rl_reply = rep;
d11e9346 261 trace_xprtrdma_cb_call(r_xprt, rqst);
63cae470 262
63cae470
CL
263 /* Queue rqst for ULP's callback service */
264 bc_serv = xprt->bc_serv;
9edb455e 265 xprt_get(xprt);
63cae470
CL
266 spin_lock(&bc_serv->sv_cb_lock);
267 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
268 spin_unlock(&bc_serv->sv_cb_lock);
269
270 wake_up(&bc_serv->sv_cb_waitq);
271
272 r_xprt->rx_stats.bcall_count++;
273 return;
274
275out_overflow:
276 pr_warn("RPC/RDMA backchannel overflow\n");
0c0829bc 277 xprt_force_disconnect(xprt);
63cae470
CL
278 /* This receive buffer gets reposted automatically
279 * when the connection is re-established.
280 */
281 return;
63cae470 282}