Merge tag 'fsnotify_for_v5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / net / sunrpc / xprtrdma / backchannel.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f531a5db
CL
2/*
3 * Copyright (c) 2015 Oracle. All rights reserved.
4 *
5 * Support for backward direction RPCs on RPC/RDMA.
6 */
7
63cae470
CL
8#include <linux/sunrpc/xprt.h>
9#include <linux/sunrpc/svc.h>
76566773 10#include <linux/sunrpc/svc_xprt.h>
bd2abef3 11#include <linux/sunrpc/svc_rdma.h>
f531a5db
CL
12
13#include "xprt_rdma.h"
b6e717cb 14#include <trace/events/rpcrdma.h>
f531a5db
CL
15
16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17# define RPCDBG_FACILITY RPCDBG_TRANS
18#endif
19
c8bbe0c7 20#undef RPCRDMA_BACKCHANNEL_DEBUG
63cae470 21
f531a5db
CL
22/**
23 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
24 * @xprt: transport associated with these backchannel resources
25 * @reqs: number of concurrent incoming requests to expect
26 *
27 * Returns 0 on success; otherwise a negative errno
28 */
29int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
30{
31 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
f531a5db 32
3f9c7e76 33 r_xprt->rx_buf.rb_bc_srv_max_requests = RPCRDMA_BACKWARD_WRS >> 1;
fc1eb807 34 trace_xprtrdma_cb_setup(r_xprt, reqs);
f531a5db 35 return 0;
f531a5db
CL
36}
37
6b26cc8c
CL
38/**
39 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
40 * @xprt: transport
41 *
42 * Returns maximum size, in bytes, of a backchannel message
43 */
44size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
45{
46 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
94087e97 47 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
6b26cc8c
CL
48 size_t maxmsg;
49
94087e97 50 maxmsg = min_t(unsigned int, ep->rep_inline_send, ep->rep_inline_recv);
62aee0e3 51 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
6b26cc8c
CL
52 return maxmsg - RPCRDMA_HDRLEN_MIN;
53}
54
cf73daf5 55static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
83128a60 56{
7ec910e7 57 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
83128a60 58 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
7ec910e7
CL
59 __be32 *p;
60
61 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
62 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
8cec3dba 63 rdmab_data(req->rl_rdmabuf), rqst);
7ec910e7
CL
64
65 p = xdr_reserve_space(&req->rl_stream, 28);
66 if (unlikely(!p))
67 return -EIO;
68 *p++ = rqst->rq_xid;
69 *p++ = rpcrdma_version;
70 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
71 *p++ = rdma_msg;
72 *p++ = xdr_zero;
73 *p++ = xdr_zero;
74 *p = xdr_zero;
83128a60 75
857f9aca
CL
76 if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
77 &rqst->rq_snd_buf, rpcrdma_noch))
655fec69 78 return -EIO;
fc1eb807
CL
79
80 trace_xprtrdma_cb_reply(rqst);
83128a60
CL
81 return 0;
82}
83
cf73daf5
CL
84/**
85 * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
86 * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
87 *
88 * Caller holds the transport's write lock.
89 *
90 * Returns:
91 * %0 if the RPC message has been sent
92 * %-ENOTCONN if the caller should reconnect and call again
93 * %-EIO if a permanent error occurred and the request was not
94 * sent. Do not try to send this message again.
95 */
96int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
97{
0c0829bc
CL
98 struct rpc_xprt *xprt = rqst->rq_xprt;
99 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
cf73daf5
CL
100 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
101 int rc;
102
0c0829bc
CL
103 if (!xprt_connected(xprt))
104 return -ENOTCONN;
cf73daf5 105
0c0829bc 106 if (!xprt_request_get_cong(xprt, rqst))
75891f50
TM
107 return -EBADSLT;
108
cf73daf5
CL
109 rc = rpcrdma_bc_marshal_reply(rqst);
110 if (rc < 0)
111 goto failed_marshal;
112
113 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
114 goto drop_connection;
115 return 0;
116
117failed_marshal:
118 if (rc != -ENOTCONN)
119 return rc;
120drop_connection:
0c0829bc 121 xprt_rdma_close(xprt);
cf73daf5
CL
122 return -ENOTCONN;
123}
124
f531a5db
CL
125/**
126 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
127 * @xprt: transport associated with these backchannel resources
128 * @reqs: number of incoming requests to destroy; ignored
129 */
130void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
131{
f531a5db
CL
132 struct rpc_rqst *rqst, *tmp;
133
f7d46681 134 spin_lock(&xprt->bc_pa_lock);
f531a5db
CL
135 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
136 list_del(&rqst->rq_bc_pa_list);
f7d46681 137 spin_unlock(&xprt->bc_pa_lock);
f531a5db 138
92f4433e 139 rpcrdma_req_destroy(rpcr_to_rdmar(rqst));
f531a5db 140
f7d46681 141 spin_lock(&xprt->bc_pa_lock);
f531a5db 142 }
f7d46681 143 spin_unlock(&xprt->bc_pa_lock);
f531a5db
CL
144}
145
146/**
147 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
148 * @rqst: request to release
149 */
150void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
151{
7c8d9e7c 152 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
f531a5db
CL
153 struct rpc_xprt *xprt = rqst->rq_xprt;
154
7c8d9e7c
CL
155 rpcrdma_recv_buffer_put(req->rl_reply);
156 req->rl_reply = NULL;
c8bbe0c7 157
f7d46681 158 spin_lock(&xprt->bc_pa_lock);
f531a5db 159 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
f7d46681 160 spin_unlock(&xprt->bc_pa_lock);
f531a5db 161}
63cae470 162
3f9c7e76
CL
163static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
164{
165 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
166 struct rpcrdma_req *req;
167 struct rpc_rqst *rqst;
168 size_t size;
169
170 spin_lock(&xprt->bc_pa_lock);
171 rqst = list_first_entry_or_null(&xprt->bc_pa_list, struct rpc_rqst,
172 rq_bc_pa_list);
173 if (!rqst)
174 goto create_req;
175 list_del(&rqst->rq_bc_pa_list);
176 spin_unlock(&xprt->bc_pa_lock);
177 return rqst;
178
179create_req:
180 spin_unlock(&xprt->bc_pa_lock);
181
182 /* Set a limit to prevent a remote from overrunning our resources.
183 */
184 if (xprt->bc_alloc_count >= RPCRDMA_BACKWARD_WRS)
185 return NULL;
186
94087e97 187 size = min_t(size_t, r_xprt->rx_ep.rep_inline_recv, PAGE_SIZE);
3f9c7e76
CL
188 req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
189 if (!req)
190 return NULL;
191
192 xprt->bc_alloc_count++;
193 rqst = &req->rl_slot;
194 rqst->rq_xprt = xprt;
195 __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
196 xdr_buf_init(&rqst->rq_snd_buf, rdmab_data(req->rl_sendbuf), size);
197 return rqst;
198}
199
63cae470
CL
200/**
201 * rpcrdma_bc_receive_call - Handle a backward direction call
9ab6d89e 202 * @r_xprt: transport receiving the call
63cae470
CL
203 * @rep: receive buffer containing the call
204 *
63cae470
CL
205 * Operational assumptions:
206 * o Backchannel credits are ignored, just as the NFS server
207 * forechannel currently does
208 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
209 * No replay detection is done at the transport level
210 */
211void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
212 struct rpcrdma_rep *rep)
213{
214 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
63cae470
CL
215 struct svc_serv *bc_serv;
216 struct rpcrdma_req *req;
217 struct rpc_rqst *rqst;
218 struct xdr_buf *buf;
219 size_t size;
220 __be32 *p;
221
41c8f70f
CL
222 p = xdr_inline_decode(&rep->rr_stream, 0);
223 size = xdr_stream_remaining(&rep->rr_stream);
224
63cae470
CL
225#ifdef RPCRDMA_BACKCHANNEL_DEBUG
226 pr_info("RPC: %s: callback XID %08x, length=%u\n",
41c8f70f
CL
227 __func__, be32_to_cpup(p), size);
228 pr_info("RPC: %s: %*ph\n", __func__, size, p);
63cae470
CL
229#endif
230
3f9c7e76
CL
231 rqst = rpcrdma_bc_rqst_get(r_xprt);
232 if (!rqst)
63cae470 233 goto out_overflow;
63cae470 234
63cae470 235 rqst->rq_reply_bytes_recvd = 0;
41c8f70f 236 rqst->rq_xid = *p;
9f74660b
CL
237
238 rqst->rq_private_buf.len = size;
63cae470
CL
239
240 buf = &rqst->rq_rcv_buf;
241 memset(buf, 0, sizeof(*buf));
242 buf->head[0].iov_base = p;
243 buf->head[0].iov_len = size;
244 buf->len = size;
245
246 /* The receive buffer has to be hooked to the rpcrdma_req
41c8f70f
CL
247 * so that it is not released while the req is pointing
248 * to its buffer, and so that it can be reposted after
249 * the Upper Layer is done decoding it.
63cae470
CL
250 */
251 req = rpcr_to_rdmar(rqst);
63cae470 252 req->rl_reply = rep;
fc1eb807 253 trace_xprtrdma_cb_call(rqst);
63cae470 254
63cae470
CL
255 /* Queue rqst for ULP's callback service */
256 bc_serv = xprt->bc_serv;
257 spin_lock(&bc_serv->sv_cb_lock);
258 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
259 spin_unlock(&bc_serv->sv_cb_lock);
260
261 wake_up(&bc_serv->sv_cb_waitq);
262
263 r_xprt->rx_stats.bcall_count++;
264 return;
265
266out_overflow:
267 pr_warn("RPC/RDMA backchannel overflow\n");
0c0829bc 268 xprt_force_disconnect(xprt);
63cae470
CL
269 /* This receive buffer gets reposted automatically
270 * when the connection is re-established.
271 */
272 return;
63cae470 273}