Commit | Line | Data |
---|---|---|
f58851e6 | 1 | /* |
62b56a67 | 2 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
c56c65fb TT |
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the BSD-type | |
9 | * license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * | |
18 | * Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials provided | |
21 | * with the distribution. | |
22 | * | |
23 | * Neither the name of the Network Appliance, Inc. nor the names of | |
24 | * its contributors may be used to endorse or promote products | |
25 | * derived from this software without specific prior written | |
26 | * permission. | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
29 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
30 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
31 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
32 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
33 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
34 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
35 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
36 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
38 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
f58851e6 TT |
39 | */ |
40 | ||
c56c65fb TT |
41 | /* |
42 | * verbs.c | |
43 | * | |
44 | * Encapsulates the major functions managing: | |
45 | * o adapters | |
46 | * o endpoints | |
47 | * o connections | |
48 | * o buffer memory | |
49 | */ | |
50 | ||
a6b7a407 | 51 | #include <linux/interrupt.h> |
5a0e3ad6 | 52 | #include <linux/slab.h> |
0dd39cae | 53 | #include <linux/sunrpc/addr.h> |
05c97466 | 54 | #include <linux/sunrpc/svc_rdma.h> |
ae72950a CL |
55 | |
56 | #include <asm-generic/barrier.h> | |
65866f82 | 57 | #include <asm/bitops.h> |
56a6bd15 | 58 | |
0a90487b | 59 | #include <rdma/ib_cm.h> |
c56c65fb | 60 | |
f58851e6 TT |
61 | #include "xprt_rdma.h" |
62 | ||
c56c65fb TT |
63 | /* |
64 | * Globals/Macros | |
65 | */ | |
66 | ||
f895b252 | 67 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
c56c65fb TT |
68 | # define RPCDBG_FACILITY RPCDBG_TRANS |
69 | #endif | |
70 | ||
71 | /* | |
72 | * internal functions | |
73 | */ | |
96ceddea CL |
74 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
75 | static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); | |
bebd0318 | 76 | static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb); |
c56c65fb | 77 | |
d8f532d2 | 78 | struct workqueue_struct *rpcrdma_receive_wq __read_mostly; |
c56c65fb | 79 | |
fe97b47c CL |
80 | int |
81 | rpcrdma_alloc_wq(void) | |
c56c65fb | 82 | { |
fe97b47c | 83 | struct workqueue_struct *recv_wq; |
c56c65fb | 84 | |
fe97b47c | 85 | recv_wq = alloc_workqueue("xprtrdma_receive", |
ccede759 | 86 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
fe97b47c CL |
87 | 0); |
88 | if (!recv_wq) | |
89 | return -ENOMEM; | |
c56c65fb | 90 | |
fe97b47c CL |
91 | rpcrdma_receive_wq = recv_wq; |
92 | return 0; | |
c56c65fb TT |
93 | } |
94 | ||
fe97b47c CL |
95 | void |
96 | rpcrdma_destroy_wq(void) | |
f1a03b76 | 97 | { |
fe97b47c | 98 | struct workqueue_struct *wq; |
f1a03b76 | 99 | |
fe97b47c CL |
100 | if (rpcrdma_receive_wq) { |
101 | wq = rpcrdma_receive_wq; | |
102 | rpcrdma_receive_wq = NULL; | |
103 | destroy_workqueue(wq); | |
104 | } | |
f1a03b76 CL |
105 | } |
106 | ||
c56c65fb TT |
107 | static void |
108 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | |
109 | { | |
110 | struct rpcrdma_ep *ep = context; | |
111 | ||
2f6922ca CL |
112 | pr_err("rpcrdma: %s on device %s ep %p\n", |
113 | ib_event_msg(event->event), event->device->name, context); | |
114 | ||
c56c65fb TT |
115 | if (ep->rep_connected == 1) { |
116 | ep->rep_connected = -EIO; | |
afadc468 | 117 | rpcrdma_conn_func(ep); |
c56c65fb TT |
118 | wake_up_all(&ep->rep_connect_wait); |
119 | } | |
120 | } | |
121 | ||
2fa8f88d CL |
122 | /** |
123 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC | |
124 | * @cq: completion queue (ignored) | |
125 | * @wc: completed WR | |
126 | * | |
fc664485 CL |
127 | */ |
128 | static void | |
2fa8f88d | 129 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 130 | { |
ae72950a CL |
131 | struct ib_cqe *cqe = wc->wr_cqe; |
132 | struct rpcrdma_sendctx *sc = | |
133 | container_of(cqe, struct rpcrdma_sendctx, sc_cqe); | |
134 | ||
2fa8f88d | 135 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
ab03eff5 | 136 | trace_xprtrdma_wc_send(sc, wc); |
2fa8f88d CL |
137 | if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) |
138 | pr_err("rpcrdma: Send: %s (%u/0x%x)\n", | |
139 | ib_wc_status_msg(wc->status), | |
140 | wc->status, wc->vendor_err); | |
ae72950a CL |
141 | |
142 | rpcrdma_sendctx_put_locked(sc); | |
fc664485 | 143 | } |
c56c65fb | 144 | |
552bf225 | 145 | /** |
1519e969 | 146 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
552bf225 CL |
147 | * @cq: completion queue (ignored) |
148 | * @wc: completed WR | |
149 | * | |
150 | */ | |
fc664485 | 151 | static void |
1519e969 | 152 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 153 | { |
552bf225 CL |
154 | struct ib_cqe *cqe = wc->wr_cqe; |
155 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, | |
156 | rr_cqe); | |
fc664485 | 157 | |
8502427c | 158 | /* WARNING: Only wr_id and status are reliable at this point */ |
b4a7f91c | 159 | trace_xprtrdma_wc_receive(rep, wc); |
8502427c CL |
160 | if (wc->status != IB_WC_SUCCESS) |
161 | goto out_fail; | |
fc664485 | 162 | |
8502427c | 163 | /* status == SUCCESS means all fields in wc are trustworthy */ |
96f8778f | 164 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); |
c8b920bb CL |
165 | rep->rr_wc_flags = wc->wc_flags; |
166 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; | |
167 | ||
91a10c52 | 168 | ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), |
6b1184cd | 169 | rdmab_addr(rep->rr_rdmabuf), |
e2a67190 | 170 | wc->byte_len, DMA_FROM_DEVICE); |
23826c7a | 171 | |
fc664485 | 172 | out_schedule: |
d8f532d2 | 173 | rpcrdma_reply_handler(rep); |
8502427c | 174 | return; |
fe97b47c | 175 | |
8502427c CL |
176 | out_fail: |
177 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
552bf225 CL |
178 | pr_err("rpcrdma: Recv: %s (%u/0x%x)\n", |
179 | ib_wc_status_msg(wc->status), | |
180 | wc->status, wc->vendor_err); | |
e2a67190 | 181 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0); |
8502427c | 182 | goto out_schedule; |
fc664485 CL |
183 | } |
184 | ||
87cfb9a0 CL |
185 | static void |
186 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, | |
187 | struct rdma_conn_param *param) | |
188 | { | |
189 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
190 | const struct rpcrdma_connect_private *pmsg = param->private_data; | |
191 | unsigned int rsize, wsize; | |
192 | ||
c8b920bb | 193 | /* Default settings for RPC-over-RDMA Version One */ |
b5f0afbe | 194 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
87cfb9a0 CL |
195 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
196 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; | |
197 | ||
198 | if (pmsg && | |
199 | pmsg->cp_magic == rpcrdma_cmp_magic && | |
200 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { | |
c95a3c6b | 201 | r_xprt->rx_ia.ri_implicit_roundup = true; |
87cfb9a0 CL |
202 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
203 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); | |
204 | } | |
205 | ||
206 | if (rsize < cdata->inline_rsize) | |
207 | cdata->inline_rsize = rsize; | |
208 | if (wsize < cdata->inline_wsize) | |
209 | cdata->inline_wsize = wsize; | |
6d6bf72d CL |
210 | dprintk("RPC: %s: max send %u, max recv %u\n", |
211 | __func__, cdata->inline_wsize, cdata->inline_rsize); | |
87cfb9a0 CL |
212 | rpcrdma_set_max_header_sizes(r_xprt); |
213 | } | |
214 | ||
c56c65fb TT |
215 | static int |
216 | rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |
217 | { | |
218 | struct rpcrdma_xprt *xprt = id->context; | |
219 | struct rpcrdma_ia *ia = &xprt->rx_ia; | |
220 | struct rpcrdma_ep *ep = &xprt->rx_ep; | |
c56c65fb TT |
221 | int connstate = 0; |
222 | ||
223 | switch (event->event) { | |
224 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
225 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
5675add3 | 226 | ia->ri_async_rc = 0; |
c56c65fb TT |
227 | complete(&ia->ri_done); |
228 | break; | |
229 | case RDMA_CM_EVENT_ADDR_ERROR: | |
230 | ia->ri_async_rc = -EHOSTUNREACH; | |
231 | dprintk("RPC: %s: CM address resolution error, ep 0x%p\n", | |
232 | __func__, ep); | |
233 | complete(&ia->ri_done); | |
234 | break; | |
235 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
236 | ia->ri_async_rc = -ENETUNREACH; | |
237 | dprintk("RPC: %s: CM route resolution error, ep 0x%p\n", | |
238 | __func__, ep); | |
239 | complete(&ia->ri_done); | |
240 | break; | |
bebd0318 CL |
241 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
242 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
d461f1f2 | 243 | pr_info("rpcrdma: removing device %s for %s:%s\n", |
173b8f49 | 244 | ia->ri_device->name, |
d461f1f2 | 245 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt)); |
bebd0318 CL |
246 | #endif |
247 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); | |
248 | ep->rep_connected = -ENODEV; | |
249 | xprt_force_disconnect(&xprt->rx_xprt); | |
250 | wait_for_completion(&ia->ri_remove_done); | |
251 | ||
252 | ia->ri_id = NULL; | |
253 | ia->ri_pd = NULL; | |
254 | ia->ri_device = NULL; | |
255 | /* Return 1 to ensure the core destroys the id. */ | |
256 | return 1; | |
c56c65fb TT |
257 | case RDMA_CM_EVENT_ESTABLISHED: |
258 | connstate = 1; | |
87cfb9a0 | 259 | rpcrdma_update_connect_private(xprt, &event->param.conn); |
c56c65fb TT |
260 | goto connected; |
261 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
262 | connstate = -ENOTCONN; | |
263 | goto connected; | |
264 | case RDMA_CM_EVENT_UNREACHABLE: | |
265 | connstate = -ENETDOWN; | |
266 | goto connected; | |
267 | case RDMA_CM_EVENT_REJECTED: | |
d461f1f2 CL |
268 | dprintk("rpcrdma: connection to %s:%s rejected: %s\n", |
269 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt), | |
0a90487b | 270 | rdma_reject_msg(id, event->status)); |
c56c65fb | 271 | connstate = -ECONNREFUSED; |
0a90487b CL |
272 | if (event->status == IB_CM_REJ_STALE_CONN) |
273 | connstate = -EAGAIN; | |
c56c65fb TT |
274 | goto connected; |
275 | case RDMA_CM_EVENT_DISCONNECTED: | |
276 | connstate = -ECONNABORTED; | |
c56c65fb | 277 | connected: |
be798f90 | 278 | xprt->rx_buf.rb_credits = 1; |
c56c65fb | 279 | ep->rep_connected = connstate; |
afadc468 | 280 | rpcrdma_conn_func(ep); |
c56c65fb | 281 | wake_up_all(&ep->rep_connect_wait); |
8079fb78 | 282 | /*FALLTHROUGH*/ |
c56c65fb | 283 | default: |
d461f1f2 CL |
284 | dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n", |
285 | __func__, | |
286 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt), | |
173b8f49 CL |
287 | ia->ri_device->name, ia->ri_ops->ro_displayname, |
288 | ep, rdma_event_msg(event->event)); | |
c56c65fb TT |
289 | break; |
290 | } | |
291 | ||
292 | return 0; | |
293 | } | |
294 | ||
295 | static struct rdma_cm_id * | |
dd229cee | 296 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
c56c65fb | 297 | { |
109b88ab | 298 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
c56c65fb TT |
299 | struct rdma_cm_id *id; |
300 | int rc; | |
301 | ||
1a954051 | 302 | init_completion(&ia->ri_done); |
bebd0318 | 303 | init_completion(&ia->ri_remove_done); |
1a954051 | 304 | |
fa20105e GS |
305 | id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, |
306 | IB_QPT_RC); | |
c56c65fb TT |
307 | if (IS_ERR(id)) { |
308 | rc = PTR_ERR(id); | |
309 | dprintk("RPC: %s: rdma_create_id() failed %i\n", | |
310 | __func__, rc); | |
311 | return id; | |
312 | } | |
313 | ||
5675add3 | 314 | ia->ri_async_rc = -ETIMEDOUT; |
dd229cee CL |
315 | rc = rdma_resolve_addr(id, NULL, |
316 | (struct sockaddr *)&xprt->rx_xprt.addr, | |
317 | RDMA_RESOLVE_TIMEOUT); | |
c56c65fb TT |
318 | if (rc) { |
319 | dprintk("RPC: %s: rdma_resolve_addr() failed %i\n", | |
320 | __func__, rc); | |
321 | goto out; | |
322 | } | |
109b88ab CL |
323 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
324 | if (rc < 0) { | |
325 | dprintk("RPC: %s: wait() exited: %i\n", | |
326 | __func__, rc); | |
327 | goto out; | |
328 | } | |
d0f36c46 | 329 | |
c56c65fb TT |
330 | rc = ia->ri_async_rc; |
331 | if (rc) | |
332 | goto out; | |
333 | ||
5675add3 | 334 | ia->ri_async_rc = -ETIMEDOUT; |
c56c65fb TT |
335 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
336 | if (rc) { | |
337 | dprintk("RPC: %s: rdma_resolve_route() failed %i\n", | |
338 | __func__, rc); | |
56a6bd15 | 339 | goto out; |
c56c65fb | 340 | } |
109b88ab CL |
341 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
342 | if (rc < 0) { | |
343 | dprintk("RPC: %s: wait() exited: %i\n", | |
344 | __func__, rc); | |
56a6bd15 | 345 | goto out; |
109b88ab | 346 | } |
c56c65fb TT |
347 | rc = ia->ri_async_rc; |
348 | if (rc) | |
56a6bd15 | 349 | goto out; |
c56c65fb TT |
350 | |
351 | return id; | |
56a6bd15 | 352 | |
c56c65fb TT |
353 | out: |
354 | rdma_destroy_id(id); | |
355 | return ERR_PTR(rc); | |
356 | } | |
357 | ||
c56c65fb TT |
358 | /* |
359 | * Exported functions. | |
360 | */ | |
361 | ||
fff09594 CL |
362 | /** |
363 | * rpcrdma_ia_open - Open and initialize an Interface Adapter. | |
dd229cee | 364 | * @xprt: transport with IA to (re)initialize |
fff09594 CL |
365 | * |
366 | * Returns 0 on success, negative errno if an appropriate | |
367 | * Interface Adapter could not be found and opened. | |
c56c65fb TT |
368 | */ |
369 | int | |
dd229cee | 370 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt) |
c56c65fb | 371 | { |
c56c65fb | 372 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
d1ed857e CL |
373 | int rc; |
374 | ||
dd229cee | 375 | ia->ri_id = rpcrdma_create_id(xprt, ia); |
c56c65fb TT |
376 | if (IS_ERR(ia->ri_id)) { |
377 | rc = PTR_ERR(ia->ri_id); | |
fff09594 | 378 | goto out_err; |
c56c65fb | 379 | } |
89e0d112 | 380 | ia->ri_device = ia->ri_id->device; |
c56c65fb | 381 | |
ed082d36 | 382 | ia->ri_pd = ib_alloc_pd(ia->ri_device, 0); |
c56c65fb TT |
383 | if (IS_ERR(ia->ri_pd)) { |
384 | rc = PTR_ERR(ia->ri_pd); | |
b54054ca | 385 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
fff09594 | 386 | goto out_err; |
c56c65fb TT |
387 | } |
388 | ||
fff09594 | 389 | switch (xprt_rdma_memreg_strategy) { |
ce5b3717 | 390 | case RPCRDMA_FRWR: |
b54054ca CL |
391 | if (frwr_is_supported(ia)) { |
392 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; | |
393 | break; | |
394 | } | |
395 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 396 | case RPCRDMA_MTHCAFMR: |
b54054ca CL |
397 | if (fmr_is_supported(ia)) { |
398 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; | |
399 | break; | |
400 | } | |
401 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 402 | default: |
fff09594 CL |
403 | pr_err("rpcrdma: Device %s does not support memreg mode %d\n", |
404 | ia->ri_device->name, xprt_rdma_memreg_strategy); | |
b54054ca | 405 | rc = -EINVAL; |
fff09594 | 406 | goto out_err; |
c56c65fb TT |
407 | } |
408 | ||
c56c65fb | 409 | return 0; |
5ae711a2 | 410 | |
fff09594 CL |
411 | out_err: |
412 | rpcrdma_ia_close(ia); | |
c56c65fb TT |
413 | return rc; |
414 | } | |
415 | ||
bebd0318 CL |
416 | /** |
417 | * rpcrdma_ia_remove - Handle device driver unload | |
418 | * @ia: interface adapter being removed | |
419 | * | |
420 | * Divest transport H/W resources associated with this adapter, | |
421 | * but allow it to be restored later. | |
422 | */ | |
423 | void | |
424 | rpcrdma_ia_remove(struct rpcrdma_ia *ia) | |
425 | { | |
426 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, | |
427 | rx_ia); | |
428 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; | |
429 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
430 | struct rpcrdma_req *req; | |
431 | struct rpcrdma_rep *rep; | |
432 | ||
433 | cancel_delayed_work_sync(&buf->rb_refresh_worker); | |
434 | ||
435 | /* This is similar to rpcrdma_ep_destroy, but: | |
436 | * - Don't cancel the connect worker. | |
437 | * - Don't call rpcrdma_ep_disconnect, which waits | |
438 | * for another conn upcall, which will deadlock. | |
439 | * - rdma_disconnect is unneeded, the underlying | |
440 | * connection is already gone. | |
441 | */ | |
442 | if (ia->ri_id->qp) { | |
443 | ib_drain_qp(ia->ri_id->qp); | |
444 | rdma_destroy_qp(ia->ri_id); | |
445 | ia->ri_id->qp = NULL; | |
446 | } | |
447 | ib_free_cq(ep->rep_attr.recv_cq); | |
448 | ib_free_cq(ep->rep_attr.send_cq); | |
449 | ||
450 | /* The ULP is responsible for ensuring all DMA | |
451 | * mappings and MRs are gone. | |
452 | */ | |
453 | list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) | |
454 | rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf); | |
455 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { | |
456 | rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf); | |
457 | rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); | |
458 | rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); | |
459 | } | |
96ceddea | 460 | rpcrdma_mrs_destroy(buf); |
bebd0318 CL |
461 | |
462 | /* Allow waiters to continue */ | |
463 | complete(&ia->ri_remove_done); | |
464 | } | |
465 | ||
fff09594 CL |
466 | /** |
467 | * rpcrdma_ia_close - Clean up/close an IA. | |
468 | * @ia: interface adapter to close | |
469 | * | |
c56c65fb TT |
470 | */ |
471 | void | |
472 | rpcrdma_ia_close(struct rpcrdma_ia *ia) | |
473 | { | |
c56c65fb | 474 | dprintk("RPC: %s: entering\n", __func__); |
fee08caf TT |
475 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
476 | if (ia->ri_id->qp) | |
477 | rdma_destroy_qp(ia->ri_id); | |
56a6bd15 | 478 | rdma_destroy_id(ia->ri_id); |
fee08caf | 479 | } |
fff09594 CL |
480 | ia->ri_id = NULL; |
481 | ia->ri_device = NULL; | |
6d44698d CL |
482 | |
483 | /* If the pd is still busy, xprtrdma missed freeing a resource */ | |
484 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) | |
7dd78647 | 485 | ib_dealloc_pd(ia->ri_pd); |
fff09594 | 486 | ia->ri_pd = NULL; |
c56c65fb TT |
487 | } |
488 | ||
489 | /* | |
490 | * Create unconnected endpoint. | |
491 | */ | |
492 | int | |
493 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |
16f906d6 | 494 | struct rpcrdma_create_data_internal *cdata) |
c56c65fb | 495 | { |
87cfb9a0 | 496 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
16f906d6 | 497 | unsigned int max_qp_wr, max_sge; |
fc664485 | 498 | struct ib_cq *sendcq, *recvcq; |
2fa8f88d | 499 | int rc; |
c56c65fb | 500 | |
eed50879 CL |
501 | max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
502 | RPCRDMA_MAX_SEND_SGES); | |
16f906d6 CL |
503 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
504 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); | |
b3221d6a CL |
505 | return -ENOMEM; |
506 | } | |
16f906d6 | 507 | ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; |
b3221d6a | 508 | |
e3e45b1b | 509 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { |
124fa17d CL |
510 | dprintk("RPC: %s: insufficient wqe's available\n", |
511 | __func__); | |
512 | return -ENOMEM; | |
513 | } | |
550d7502 | 514 | max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1; |
124fa17d | 515 | |
c56c65fb | 516 | /* check provider's send/recv wr limits */ |
124fa17d CL |
517 | if (cdata->max_requests > max_qp_wr) |
518 | cdata->max_requests = max_qp_wr; | |
c56c65fb TT |
519 | |
520 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; | |
521 | ep->rep_attr.qp_context = ep; | |
c56c65fb TT |
522 | ep->rep_attr.srq = NULL; |
523 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; | |
124fa17d | 524 | ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
550d7502 | 525 | ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */ |
3968cb58 CL |
526 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
527 | if (rc) | |
528 | return rc; | |
c56c65fb | 529 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
124fa17d | 530 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
550d7502 | 531 | ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ |
16f906d6 | 532 | ep->rep_attr.cap.max_send_sge = max_sge; |
c56c65fb TT |
533 | ep->rep_attr.cap.max_recv_sge = 1; |
534 | ep->rep_attr.cap.max_inline_data = 0; | |
535 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
536 | ep->rep_attr.qp_type = IB_QPT_RC; | |
537 | ep->rep_attr.port_num = ~0; | |
538 | ||
539 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " | |
540 | "iovs: send %d recv %d\n", | |
541 | __func__, | |
542 | ep->rep_attr.cap.max_send_wr, | |
543 | ep->rep_attr.cap.max_recv_wr, | |
544 | ep->rep_attr.cap.max_send_sge, | |
545 | ep->rep_attr.cap.max_recv_sge); | |
546 | ||
547 | /* set trigger for requesting send completion */ | |
ae72950a CL |
548 | ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, |
549 | cdata->max_requests >> 2); | |
550 | ep->rep_send_count = ep->rep_send_batch; | |
c56c65fb | 551 | init_waitqueue_head(&ep->rep_connect_wait); |
254f91e2 | 552 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
c56c65fb | 553 | |
2fa8f88d CL |
554 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
555 | ep->rep_attr.cap.max_send_wr + 1, | |
a4699f56 | 556 | 1, IB_POLL_WORKQUEUE); |
fc664485 CL |
557 | if (IS_ERR(sendcq)) { |
558 | rc = PTR_ERR(sendcq); | |
559 | dprintk("RPC: %s: failed to create send CQ: %i\n", | |
c56c65fb TT |
560 | __func__, rc); |
561 | goto out1; | |
562 | } | |
563 | ||
552bf225 CL |
564 | recvcq = ib_alloc_cq(ia->ri_device, NULL, |
565 | ep->rep_attr.cap.max_recv_wr + 1, | |
d8f532d2 | 566 | 0, IB_POLL_WORKQUEUE); |
fc664485 CL |
567 | if (IS_ERR(recvcq)) { |
568 | rc = PTR_ERR(recvcq); | |
569 | dprintk("RPC: %s: failed to create recv CQ: %i\n", | |
570 | __func__, rc); | |
571 | goto out2; | |
572 | } | |
573 | ||
fc664485 CL |
574 | ep->rep_attr.send_cq = sendcq; |
575 | ep->rep_attr.recv_cq = recvcq; | |
c56c65fb TT |
576 | |
577 | /* Initialize cma parameters */ | |
b2dde94b | 578 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
c56c65fb | 579 | |
87cfb9a0 CL |
580 | /* Prepare RDMA-CM private message */ |
581 | pmsg->cp_magic = rpcrdma_cmp_magic; | |
582 | pmsg->cp_version = RPCRDMA_CMP_VERSION; | |
c8b920bb | 583 | pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; |
87cfb9a0 CL |
584 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); |
585 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); | |
586 | ep->rep_remote_cma.private_data = pmsg; | |
587 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); | |
c56c65fb TT |
588 | |
589 | /* Client offers RDMA Read but does not initiate */ | |
b334eaab | 590 | ep->rep_remote_cma.initiator_depth = 0; |
e3e45b1b | 591 | if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ |
b334eaab TT |
592 | ep->rep_remote_cma.responder_resources = 32; |
593 | else | |
7bc7972c | 594 | ep->rep_remote_cma.responder_resources = |
e3e45b1b | 595 | ia->ri_device->attrs.max_qp_rd_atom; |
c56c65fb | 596 | |
b2dde94b CL |
597 | /* Limit transport retries so client can detect server |
598 | * GID changes quickly. RPC layer handles re-establishing | |
599 | * transport connection and retransmission. | |
600 | */ | |
601 | ep->rep_remote_cma.retry_count = 6; | |
602 | ||
603 | /* RPC-over-RDMA handles its own flow control. In addition, | |
604 | * make all RNR NAKs visible so we know that RPC-over-RDMA | |
605 | * flow control is working correctly (no NAKs should be seen). | |
606 | */ | |
c56c65fb TT |
607 | ep->rep_remote_cma.flow_control = 0; |
608 | ep->rep_remote_cma.rnr_retry_count = 0; | |
609 | ||
610 | return 0; | |
611 | ||
612 | out2: | |
2fa8f88d | 613 | ib_free_cq(sendcq); |
c56c65fb TT |
614 | out1: |
615 | return rc; | |
616 | } | |
617 | ||
618 | /* | |
619 | * rpcrdma_ep_destroy | |
620 | * | |
621 | * Disconnect and destroy endpoint. After this, the only | |
622 | * valid operations on the ep are to free it (if dynamically | |
623 | * allocated) or re-create it. | |
c56c65fb | 624 | */ |
7f1d5419 | 625 | void |
c56c65fb TT |
626 | rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
627 | { | |
c56c65fb TT |
628 | dprintk("RPC: %s: entering, connected is %d\n", |
629 | __func__, ep->rep_connected); | |
630 | ||
254f91e2 CL |
631 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
632 | ||
72c02173 | 633 | if (ia->ri_id->qp) { |
550d7502 | 634 | rpcrdma_ep_disconnect(ep, ia); |
fee08caf TT |
635 | rdma_destroy_qp(ia->ri_id); |
636 | ia->ri_id->qp = NULL; | |
c56c65fb TT |
637 | } |
638 | ||
552bf225 | 639 | ib_free_cq(ep->rep_attr.recv_cq); |
2fa8f88d | 640 | ib_free_cq(ep->rep_attr.send_cq); |
c56c65fb TT |
641 | } |
642 | ||
a9b0e381 CL |
643 | /* Re-establish a connection after a device removal event. |
644 | * Unlike a normal reconnection, a fresh PD and a new set | |
645 | * of MRs and buffers is needed. | |
646 | */ | |
647 | static int | |
648 | rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, | |
649 | struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
650 | { | |
a9b0e381 CL |
651 | int rc, err; |
652 | ||
653 | pr_info("%s: r_xprt = %p\n", __func__, r_xprt); | |
654 | ||
655 | rc = -EHOSTUNREACH; | |
dd229cee | 656 | if (rpcrdma_ia_open(r_xprt)) |
a9b0e381 CL |
657 | goto out1; |
658 | ||
659 | rc = -ENOMEM; | |
660 | err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data); | |
661 | if (err) { | |
662 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); | |
663 | goto out2; | |
664 | } | |
665 | ||
666 | rc = -ENETUNREACH; | |
667 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
668 | if (err) { | |
669 | pr_err("rpcrdma: rdma_create_qp returned %d\n", err); | |
670 | goto out3; | |
671 | } | |
672 | ||
96ceddea | 673 | rpcrdma_mrs_create(r_xprt); |
a9b0e381 CL |
674 | return 0; |
675 | ||
676 | out3: | |
677 | rpcrdma_ep_destroy(ep, ia); | |
678 | out2: | |
679 | rpcrdma_ia_close(ia); | |
680 | out1: | |
681 | return rc; | |
682 | } | |
683 | ||
1890896b CL |
684 | static int |
685 | rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep, | |
686 | struct rpcrdma_ia *ia) | |
687 | { | |
1890896b CL |
688 | struct rdma_cm_id *id, *old; |
689 | int err, rc; | |
690 | ||
691 | dprintk("RPC: %s: reconnecting...\n", __func__); | |
692 | ||
693 | rpcrdma_ep_disconnect(ep, ia); | |
694 | ||
695 | rc = -EHOSTUNREACH; | |
dd229cee | 696 | id = rpcrdma_create_id(r_xprt, ia); |
1890896b CL |
697 | if (IS_ERR(id)) |
698 | goto out; | |
699 | ||
700 | /* As long as the new ID points to the same device as the | |
701 | * old ID, we can reuse the transport's existing PD and all | |
702 | * previously allocated MRs. Also, the same device means | |
703 | * the transport's previous DMA mappings are still valid. | |
704 | * | |
705 | * This is a sanity check only. There should be no way these | |
706 | * point to two different devices here. | |
707 | */ | |
708 | old = id; | |
709 | rc = -ENETUNREACH; | |
710 | if (ia->ri_device != id->device) { | |
711 | pr_err("rpcrdma: can't reconnect on different device!\n"); | |
712 | goto out_destroy; | |
713 | } | |
714 | ||
715 | err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); | |
716 | if (err) { | |
717 | dprintk("RPC: %s: rdma_create_qp returned %d\n", | |
718 | __func__, err); | |
719 | goto out_destroy; | |
720 | } | |
721 | ||
722 | /* Atomically replace the transport's ID and QP. */ | |
723 | rc = 0; | |
724 | old = ia->ri_id; | |
725 | ia->ri_id = id; | |
726 | rdma_destroy_qp(old); | |
727 | ||
728 | out_destroy: | |
56a6bd15 | 729 | rdma_destroy_id(old); |
1890896b CL |
730 | out: |
731 | return rc; | |
732 | } | |
733 | ||
c56c65fb TT |
734 | /* |
735 | * Connect unconnected endpoint. | |
736 | */ | |
737 | int | |
738 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
739 | { | |
0a90487b CL |
740 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
741 | rx_ia); | |
0a90487b | 742 | unsigned int extras; |
1890896b | 743 | int rc; |
c56c65fb | 744 | |
c56c65fb | 745 | retry: |
1890896b CL |
746 | switch (ep->rep_connected) { |
747 | case 0: | |
ec62f40d CL |
748 | dprintk("RPC: %s: connecting...\n", __func__); |
749 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
750 | if (rc) { | |
751 | dprintk("RPC: %s: rdma_create_qp failed %i\n", | |
752 | __func__, rc); | |
1890896b CL |
753 | rc = -ENETUNREACH; |
754 | goto out_noupdate; | |
ec62f40d | 755 | } |
1890896b | 756 | break; |
a9b0e381 CL |
757 | case -ENODEV: |
758 | rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia); | |
759 | if (rc) | |
760 | goto out_noupdate; | |
761 | break; | |
1890896b CL |
762 | default: |
763 | rc = rpcrdma_ep_reconnect(r_xprt, ep, ia); | |
764 | if (rc) | |
765 | goto out; | |
c56c65fb TT |
766 | } |
767 | ||
c56c65fb TT |
768 | ep->rep_connected = 0; |
769 | ||
770 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); | |
771 | if (rc) { | |
772 | dprintk("RPC: %s: rdma_connect() failed with %i\n", | |
773 | __func__, rc); | |
774 | goto out; | |
775 | } | |
776 | ||
c56c65fb | 777 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
c56c65fb | 778 | if (ep->rep_connected <= 0) { |
0a90487b | 779 | if (ep->rep_connected == -EAGAIN) |
c56c65fb TT |
780 | goto retry; |
781 | rc = ep->rep_connected; | |
0a90487b | 782 | goto out; |
c56c65fb TT |
783 | } |
784 | ||
0a90487b CL |
785 | dprintk("RPC: %s: connected\n", __func__); |
786 | extras = r_xprt->rx_buf.rb_bc_srv_max_requests; | |
787 | if (extras) | |
788 | rpcrdma_ep_post_extra_recv(r_xprt, extras); | |
789 | ||
c56c65fb TT |
790 | out: |
791 | if (rc) | |
792 | ep->rep_connected = rc; | |
1890896b CL |
793 | |
794 | out_noupdate: | |
c56c65fb TT |
795 | return rc; |
796 | } | |
797 | ||
798 | /* | |
799 | * rpcrdma_ep_disconnect | |
800 | * | |
801 | * This is separate from destroy to facilitate the ability | |
802 | * to reconnect without recreating the endpoint. | |
803 | * | |
804 | * This call is not reentrant, and must not be made in parallel | |
805 | * on the same endpoint. | |
806 | */ | |
282191cb | 807 | void |
c56c65fb TT |
808 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
809 | { | |
810 | int rc; | |
811 | ||
c56c65fb TT |
812 | rc = rdma_disconnect(ia->ri_id); |
813 | if (!rc) { | |
814 | /* returns without wait if not connected */ | |
815 | wait_event_interruptible(ep->rep_connect_wait, | |
816 | ep->rep_connected != 1); | |
817 | dprintk("RPC: %s: after wait, %sconnected\n", __func__, | |
818 | (ep->rep_connected == 1) ? "still " : "dis"); | |
819 | } else { | |
820 | dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc); | |
821 | ep->rep_connected = rc; | |
822 | } | |
550d7502 CL |
823 | |
824 | ib_drain_qp(ia->ri_id->qp); | |
c56c65fb TT |
825 | } |
826 | ||
ae72950a CL |
827 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
828 | * lock-free. | |
829 | * | |
830 | * Consumer is the code path that posts Sends. This path dequeues a | |
831 | * sendctx for use by a Send operation. Multiple consumer threads | |
832 | * are serialized by the RPC transport lock, which allows only one | |
833 | * ->send_request call at a time. | |
834 | * | |
835 | * Producer is the code path that handles Send completions. This path | |
836 | * enqueues a sendctx that has been completed. Multiple producer | |
837 | * threads are serialized by the ib_poll_cq() function. | |
838 | */ | |
839 | ||
840 | /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced | |
841 | * queue activity, and ib_drain_qp has flushed all remaining Send | |
842 | * requests. | |
843 | */ | |
844 | static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) | |
845 | { | |
846 | unsigned long i; | |
847 | ||
848 | for (i = 0; i <= buf->rb_sc_last; i++) | |
849 | kfree(buf->rb_sc_ctxs[i]); | |
850 | kfree(buf->rb_sc_ctxs); | |
851 | } | |
852 | ||
853 | static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) | |
854 | { | |
855 | struct rpcrdma_sendctx *sc; | |
856 | ||
857 | sc = kzalloc(sizeof(*sc) + | |
858 | ia->ri_max_send_sges * sizeof(struct ib_sge), | |
859 | GFP_KERNEL); | |
860 | if (!sc) | |
861 | return NULL; | |
862 | ||
863 | sc->sc_wr.wr_cqe = &sc->sc_cqe; | |
864 | sc->sc_wr.sg_list = sc->sc_sges; | |
865 | sc->sc_wr.opcode = IB_WR_SEND; | |
866 | sc->sc_cqe.done = rpcrdma_wc_send; | |
867 | return sc; | |
868 | } | |
869 | ||
870 | static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) | |
871 | { | |
872 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
873 | struct rpcrdma_sendctx *sc; | |
874 | unsigned long i; | |
875 | ||
876 | /* Maximum number of concurrent outstanding Send WRs. Capping | |
877 | * the circular queue size stops Send Queue overflow by causing | |
878 | * the ->send_request call to fail temporarily before too many | |
879 | * Sends are posted. | |
880 | */ | |
881 | i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; | |
882 | dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); | |
883 | buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); | |
884 | if (!buf->rb_sc_ctxs) | |
885 | return -ENOMEM; | |
886 | ||
887 | buf->rb_sc_last = i - 1; | |
888 | for (i = 0; i <= buf->rb_sc_last; i++) { | |
889 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); | |
890 | if (!sc) | |
891 | goto out_destroy; | |
892 | ||
893 | sc->sc_xprt = r_xprt; | |
894 | buf->rb_sc_ctxs[i] = sc; | |
895 | } | |
896 | ||
897 | return 0; | |
898 | ||
899 | out_destroy: | |
900 | rpcrdma_sendctxs_destroy(buf); | |
901 | return -ENOMEM; | |
902 | } | |
903 | ||
904 | /* The sendctx queue is not guaranteed to have a size that is a | |
905 | * power of two, thus the helpers in circ_buf.h cannot be used. | |
906 | * The other option is to use modulus (%), which can be expensive. | |
907 | */ | |
908 | static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, | |
909 | unsigned long item) | |
910 | { | |
911 | return likely(item < buf->rb_sc_last) ? item + 1 : 0; | |
912 | } | |
913 | ||
914 | /** | |
915 | * rpcrdma_sendctx_get_locked - Acquire a send context | |
916 | * @buf: transport buffers from which to acquire an unused context | |
917 | * | |
918 | * Returns pointer to a free send completion context; or NULL if | |
919 | * the queue is empty. | |
920 | * | |
921 | * Usage: Called to acquire an SGE array before preparing a Send WR. | |
922 | * | |
923 | * The caller serializes calls to this function (per rpcrdma_buffer), | |
924 | * and provides an effective memory barrier that flushes the new value | |
925 | * of rb_sc_head. | |
926 | */ | |
927 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf) | |
928 | { | |
929 | struct rpcrdma_xprt *r_xprt; | |
930 | struct rpcrdma_sendctx *sc; | |
931 | unsigned long next_head; | |
932 | ||
933 | next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); | |
934 | ||
935 | if (next_head == READ_ONCE(buf->rb_sc_tail)) | |
936 | goto out_emptyq; | |
937 | ||
938 | /* ORDER: item must be accessed _before_ head is updated */ | |
939 | sc = buf->rb_sc_ctxs[next_head]; | |
940 | ||
941 | /* Releasing the lock in the caller acts as a memory | |
942 | * barrier that flushes rb_sc_head. | |
943 | */ | |
944 | buf->rb_sc_head = next_head; | |
945 | ||
946 | return sc; | |
947 | ||
948 | out_emptyq: | |
949 | /* The queue is "empty" if there have not been enough Send | |
950 | * completions recently. This is a sign the Send Queue is | |
951 | * backing up. Cause the caller to pause and try again. | |
952 | */ | |
953 | dprintk("RPC: %s: empty sendctx queue\n", __func__); | |
954 | r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf); | |
955 | r_xprt->rx_stats.empty_sendctx_q++; | |
956 | return NULL; | |
957 | } | |
958 | ||
959 | /** | |
960 | * rpcrdma_sendctx_put_locked - Release a send context | |
961 | * @sc: send context to release | |
962 | * | |
963 | * Usage: Called from Send completion to return a sendctxt | |
964 | * to the queue. | |
965 | * | |
966 | * The caller serializes calls to this function (per rpcrdma_buffer). | |
967 | */ | |
968 | void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc) | |
969 | { | |
970 | struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; | |
971 | unsigned long next_tail; | |
972 | ||
973 | /* Unmap SGEs of previously completed by unsignaled | |
974 | * Sends by walking up the queue until @sc is found. | |
975 | */ | |
976 | next_tail = buf->rb_sc_tail; | |
977 | do { | |
978 | next_tail = rpcrdma_sendctx_next(buf, next_tail); | |
979 | ||
980 | /* ORDER: item must be accessed _before_ tail is updated */ | |
981 | rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]); | |
982 | ||
983 | } while (buf->rb_sc_ctxs[next_tail] != sc); | |
984 | ||
985 | /* Paired with READ_ONCE */ | |
986 | smp_store_release(&buf->rb_sc_tail, next_tail); | |
987 | } | |
988 | ||
505bbe64 CL |
989 | static void |
990 | rpcrdma_mr_recovery_worker(struct work_struct *work) | |
991 | { | |
992 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
993 | rb_recovery_worker.work); | |
96ceddea | 994 | struct rpcrdma_mr *mr; |
505bbe64 CL |
995 | |
996 | spin_lock(&buf->rb_recovery_lock); | |
997 | while (!list_empty(&buf->rb_stale_mrs)) { | |
96ceddea | 998 | mr = rpcrdma_mr_pop(&buf->rb_stale_mrs); |
505bbe64 CL |
999 | spin_unlock(&buf->rb_recovery_lock); |
1000 | ||
96ceddea CL |
1001 | dprintk("RPC: %s: recovering MR %p\n", __func__, mr); |
1002 | mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr); | |
505bbe64 CL |
1003 | |
1004 | spin_lock(&buf->rb_recovery_lock); | |
53d78523 | 1005 | } |
505bbe64 CL |
1006 | spin_unlock(&buf->rb_recovery_lock); |
1007 | } | |
1008 | ||
1009 | void | |
96ceddea | 1010 | rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr) |
505bbe64 | 1011 | { |
96ceddea | 1012 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
505bbe64 CL |
1013 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1014 | ||
1015 | spin_lock(&buf->rb_recovery_lock); | |
96ceddea | 1016 | rpcrdma_mr_push(mr, &buf->rb_stale_mrs); |
505bbe64 CL |
1017 | spin_unlock(&buf->rb_recovery_lock); |
1018 | ||
1019 | schedule_delayed_work(&buf->rb_recovery_worker, 0); | |
1020 | } | |
1021 | ||
e2ac236c | 1022 | static void |
96ceddea | 1023 | rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
e2ac236c CL |
1024 | { |
1025 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
1026 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
1027 | unsigned int count; | |
1028 | LIST_HEAD(free); | |
1029 | LIST_HEAD(all); | |
1030 | ||
1031 | for (count = 0; count < 32; count++) { | |
96ceddea | 1032 | struct rpcrdma_mr *mr; |
e2ac236c CL |
1033 | int rc; |
1034 | ||
96ceddea CL |
1035 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
1036 | if (!mr) | |
e2ac236c CL |
1037 | break; |
1038 | ||
96ceddea | 1039 | rc = ia->ri_ops->ro_init_mr(ia, mr); |
e2ac236c | 1040 | if (rc) { |
96ceddea | 1041 | kfree(mr); |
e2ac236c CL |
1042 | break; |
1043 | } | |
1044 | ||
96ceddea | 1045 | mr->mr_xprt = r_xprt; |
e2ac236c | 1046 | |
96ceddea CL |
1047 | list_add(&mr->mr_list, &free); |
1048 | list_add(&mr->mr_all, &all); | |
e2ac236c CL |
1049 | } |
1050 | ||
96ceddea CL |
1051 | spin_lock(&buf->rb_mrlock); |
1052 | list_splice(&free, &buf->rb_mrs); | |
e2ac236c CL |
1053 | list_splice(&all, &buf->rb_all); |
1054 | r_xprt->rx_stats.mrs_allocated += count; | |
96ceddea | 1055 | spin_unlock(&buf->rb_mrlock); |
e2ac236c CL |
1056 | |
1057 | dprintk("RPC: %s: created %u MRs\n", __func__, count); | |
1058 | } | |
1059 | ||
1060 | static void | |
1061 | rpcrdma_mr_refresh_worker(struct work_struct *work) | |
1062 | { | |
1063 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
1064 | rb_refresh_worker.work); | |
1065 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1066 | rx_buf); | |
1067 | ||
96ceddea | 1068 | rpcrdma_mrs_create(r_xprt); |
e2ac236c CL |
1069 | } |
1070 | ||
f531a5db | 1071 | struct rpcrdma_req * |
1392402c CL |
1072 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) |
1073 | { | |
f531a5db | 1074 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
1392402c | 1075 | struct rpcrdma_req *req; |
1392402c | 1076 | |
85275c87 | 1077 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
1392402c | 1078 | if (req == NULL) |
85275c87 | 1079 | return ERR_PTR(-ENOMEM); |
1392402c | 1080 | |
f531a5db CL |
1081 | spin_lock(&buffer->rb_reqslock); |
1082 | list_add(&req->rl_all, &buffer->rb_allreqs); | |
1083 | spin_unlock(&buffer->rb_reqslock); | |
1392402c | 1084 | req->rl_buffer = &r_xprt->rx_buf; |
9d6b0409 | 1085 | INIT_LIST_HEAD(&req->rl_registered); |
1392402c | 1086 | return req; |
1392402c CL |
1087 | } |
1088 | ||
d698c4a0 CL |
1089 | /** |
1090 | * rpcrdma_create_rep - Allocate an rpcrdma_rep object | |
1091 | * @r_xprt: controlling transport | |
1092 | * | |
1093 | * Returns 0 on success or a negative errno on failure. | |
1094 | */ | |
1095 | int | |
1392402c CL |
1096 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) |
1097 | { | |
1098 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
d698c4a0 | 1099 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1392402c CL |
1100 | struct rpcrdma_rep *rep; |
1101 | int rc; | |
1102 | ||
1103 | rc = -ENOMEM; | |
6b1184cd | 1104 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
1392402c CL |
1105 | if (rep == NULL) |
1106 | goto out; | |
1392402c | 1107 | |
13650c23 | 1108 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, |
99ef4db3 | 1109 | DMA_FROM_DEVICE, GFP_KERNEL); |
6b1184cd CL |
1110 | if (IS_ERR(rep->rr_rdmabuf)) { |
1111 | rc = PTR_ERR(rep->rr_rdmabuf); | |
1392402c | 1112 | goto out_free; |
6b1184cd | 1113 | } |
96f8778f CL |
1114 | xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base, |
1115 | rdmab_length(rep->rr_rdmabuf)); | |
1392402c | 1116 | |
1519e969 | 1117 | rep->rr_cqe.done = rpcrdma_wc_receive; |
fed171b3 | 1118 | rep->rr_rxprt = r_xprt; |
d8f532d2 | 1119 | INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion); |
6ea8e711 CL |
1120 | rep->rr_recv_wr.next = NULL; |
1121 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; | |
1122 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; | |
1123 | rep->rr_recv_wr.num_sge = 1; | |
d698c4a0 CL |
1124 | |
1125 | spin_lock(&buf->rb_lock); | |
1126 | list_add(&rep->rr_list, &buf->rb_recv_bufs); | |
1127 | spin_unlock(&buf->rb_lock); | |
1128 | return 0; | |
1392402c CL |
1129 | |
1130 | out_free: | |
1131 | kfree(rep); | |
1132 | out: | |
d698c4a0 CL |
1133 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
1134 | __func__, rc); | |
1135 | return rc; | |
1392402c CL |
1136 | } |
1137 | ||
c56c65fb | 1138 | int |
ac920d04 | 1139 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
c56c65fb | 1140 | { |
ac920d04 | 1141 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
c56c65fb TT |
1142 | int i, rc; |
1143 | ||
1e465fd4 | 1144 | buf->rb_max_requests = r_xprt->rx_data.max_requests; |
f531a5db | 1145 | buf->rb_bc_srv_max_requests = 0; |
96ceddea | 1146 | spin_lock_init(&buf->rb_mrlock); |
505bbe64 CL |
1147 | spin_lock_init(&buf->rb_lock); |
1148 | spin_lock_init(&buf->rb_recovery_lock); | |
96ceddea | 1149 | INIT_LIST_HEAD(&buf->rb_mrs); |
e2ac236c | 1150 | INIT_LIST_HEAD(&buf->rb_all); |
505bbe64 | 1151 | INIT_LIST_HEAD(&buf->rb_stale_mrs); |
e2ac236c CL |
1152 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
1153 | rpcrdma_mr_refresh_worker); | |
505bbe64 CL |
1154 | INIT_DELAYED_WORK(&buf->rb_recovery_worker, |
1155 | rpcrdma_mr_recovery_worker); | |
c56c65fb | 1156 | |
96ceddea | 1157 | rpcrdma_mrs_create(r_xprt); |
c56c65fb | 1158 | |
1e465fd4 | 1159 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
f531a5db CL |
1160 | INIT_LIST_HEAD(&buf->rb_allreqs); |
1161 | spin_lock_init(&buf->rb_reqslock); | |
c56c65fb TT |
1162 | for (i = 0; i < buf->rb_max_requests; i++) { |
1163 | struct rpcrdma_req *req; | |
c56c65fb | 1164 | |
1392402c CL |
1165 | req = rpcrdma_create_req(r_xprt); |
1166 | if (IS_ERR(req)) { | |
c56c65fb TT |
1167 | dprintk("RPC: %s: request buffer %d alloc" |
1168 | " failed\n", __func__, i); | |
1392402c | 1169 | rc = PTR_ERR(req); |
c56c65fb TT |
1170 | goto out; |
1171 | } | |
a80d66c9 | 1172 | list_add(&req->rl_list, &buf->rb_send_bufs); |
1e465fd4 CL |
1173 | } |
1174 | ||
1175 | INIT_LIST_HEAD(&buf->rb_recv_bufs); | |
d698c4a0 CL |
1176 | for (i = 0; i <= buf->rb_max_requests; i++) { |
1177 | rc = rpcrdma_create_rep(r_xprt); | |
1178 | if (rc) | |
c56c65fb | 1179 | goto out; |
c56c65fb | 1180 | } |
1392402c | 1181 | |
ae72950a CL |
1182 | rc = rpcrdma_sendctxs_create(r_xprt); |
1183 | if (rc) | |
1184 | goto out; | |
1185 | ||
c56c65fb TT |
1186 | return 0; |
1187 | out: | |
1188 | rpcrdma_buffer_destroy(buf); | |
1189 | return rc; | |
1190 | } | |
1191 | ||
1e465fd4 CL |
1192 | static struct rpcrdma_req * |
1193 | rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf) | |
1194 | { | |
1195 | struct rpcrdma_req *req; | |
1196 | ||
1197 | req = list_first_entry(&buf->rb_send_bufs, | |
a80d66c9 | 1198 | struct rpcrdma_req, rl_list); |
431af645 | 1199 | list_del_init(&req->rl_list); |
1e465fd4 CL |
1200 | return req; |
1201 | } | |
1202 | ||
1203 | static struct rpcrdma_rep * | |
1204 | rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf) | |
1205 | { | |
1206 | struct rpcrdma_rep *rep; | |
1207 | ||
1208 | rep = list_first_entry(&buf->rb_recv_bufs, | |
1209 | struct rpcrdma_rep, rr_list); | |
1210 | list_del(&rep->rr_list); | |
1211 | return rep; | |
1212 | } | |
1213 | ||
1392402c | 1214 | static void |
13650c23 | 1215 | rpcrdma_destroy_rep(struct rpcrdma_rep *rep) |
1392402c | 1216 | { |
13650c23 | 1217 | rpcrdma_free_regbuf(rep->rr_rdmabuf); |
1392402c CL |
1218 | kfree(rep); |
1219 | } | |
1220 | ||
f531a5db | 1221 | void |
13650c23 | 1222 | rpcrdma_destroy_req(struct rpcrdma_req *req) |
1392402c | 1223 | { |
13650c23 CL |
1224 | rpcrdma_free_regbuf(req->rl_recvbuf); |
1225 | rpcrdma_free_regbuf(req->rl_sendbuf); | |
1226 | rpcrdma_free_regbuf(req->rl_rdmabuf); | |
1392402c CL |
1227 | kfree(req); |
1228 | } | |
1229 | ||
e2ac236c | 1230 | static void |
96ceddea | 1231 | rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
e2ac236c CL |
1232 | { |
1233 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1234 | rx_buf); | |
1235 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); | |
96ceddea | 1236 | struct rpcrdma_mr *mr; |
e2ac236c CL |
1237 | unsigned int count; |
1238 | ||
1239 | count = 0; | |
96ceddea | 1240 | spin_lock(&buf->rb_mrlock); |
e2ac236c | 1241 | while (!list_empty(&buf->rb_all)) { |
96ceddea CL |
1242 | mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); |
1243 | list_del(&mr->mr_all); | |
e2ac236c | 1244 | |
96ceddea CL |
1245 | spin_unlock(&buf->rb_mrlock); |
1246 | ia->ri_ops->ro_release_mr(mr); | |
e2ac236c | 1247 | count++; |
96ceddea | 1248 | spin_lock(&buf->rb_mrlock); |
e2ac236c | 1249 | } |
96ceddea | 1250 | spin_unlock(&buf->rb_mrlock); |
e2ac236c CL |
1251 | r_xprt->rx_stats.mrs_allocated = 0; |
1252 | ||
1253 | dprintk("RPC: %s: released %u MRs\n", __func__, count); | |
1254 | } | |
1255 | ||
c56c65fb TT |
1256 | void |
1257 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |
1258 | { | |
505bbe64 | 1259 | cancel_delayed_work_sync(&buf->rb_recovery_worker); |
9378b274 | 1260 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
505bbe64 | 1261 | |
ae72950a CL |
1262 | rpcrdma_sendctxs_destroy(buf); |
1263 | ||
1e465fd4 CL |
1264 | while (!list_empty(&buf->rb_recv_bufs)) { |
1265 | struct rpcrdma_rep *rep; | |
c56c65fb | 1266 | |
1e465fd4 | 1267 | rep = rpcrdma_buffer_get_rep_locked(buf); |
13650c23 | 1268 | rpcrdma_destroy_rep(rep); |
c56c65fb | 1269 | } |
05c97466 | 1270 | buf->rb_send_count = 0; |
c56c65fb | 1271 | |
f531a5db CL |
1272 | spin_lock(&buf->rb_reqslock); |
1273 | while (!list_empty(&buf->rb_allreqs)) { | |
1e465fd4 | 1274 | struct rpcrdma_req *req; |
4034ba04 | 1275 | |
f531a5db CL |
1276 | req = list_first_entry(&buf->rb_allreqs, |
1277 | struct rpcrdma_req, rl_all); | |
1278 | list_del(&req->rl_all); | |
1279 | ||
1280 | spin_unlock(&buf->rb_reqslock); | |
13650c23 | 1281 | rpcrdma_destroy_req(req); |
f531a5db | 1282 | spin_lock(&buf->rb_reqslock); |
1e465fd4 | 1283 | } |
f531a5db | 1284 | spin_unlock(&buf->rb_reqslock); |
05c97466 | 1285 | buf->rb_recv_count = 0; |
4034ba04 | 1286 | |
96ceddea | 1287 | rpcrdma_mrs_destroy(buf); |
c56c65fb TT |
1288 | } |
1289 | ||
96ceddea CL |
1290 | /** |
1291 | * rpcrdma_mr_get - Allocate an rpcrdma_mr object | |
1292 | * @r_xprt: controlling transport | |
1293 | * | |
1294 | * Returns an initialized rpcrdma_mr or NULL if no free | |
1295 | * rpcrdma_mr objects are available. | |
1296 | */ | |
1297 | struct rpcrdma_mr * | |
1298 | rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) | |
c2922c02 | 1299 | { |
346aa66b | 1300 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
96ceddea | 1301 | struct rpcrdma_mr *mr = NULL; |
346aa66b | 1302 | |
96ceddea CL |
1303 | spin_lock(&buf->rb_mrlock); |
1304 | if (!list_empty(&buf->rb_mrs)) | |
1305 | mr = rpcrdma_mr_pop(&buf->rb_mrs); | |
1306 | spin_unlock(&buf->rb_mrlock); | |
346aa66b | 1307 | |
96ceddea CL |
1308 | if (!mr) |
1309 | goto out_nomrs; | |
1310 | return mr; | |
e2ac236c | 1311 | |
96ceddea CL |
1312 | out_nomrs: |
1313 | dprintk("RPC: %s: no MRs available\n", __func__); | |
bebd0318 CL |
1314 | if (r_xprt->rx_ep.rep_connected != -ENODEV) |
1315 | schedule_delayed_work(&buf->rb_refresh_worker, 0); | |
e2ac236c CL |
1316 | |
1317 | /* Allow the reply handler and refresh worker to run */ | |
1318 | cond_resched(); | |
1319 | ||
1320 | return NULL; | |
c2922c02 CL |
1321 | } |
1322 | ||
ec12e479 CL |
1323 | static void |
1324 | __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr) | |
1325 | { | |
1326 | spin_lock(&buf->rb_mrlock); | |
1327 | rpcrdma_mr_push(mr, &buf->rb_mrs); | |
1328 | spin_unlock(&buf->rb_mrlock); | |
1329 | } | |
1330 | ||
96ceddea CL |
1331 | /** |
1332 | * rpcrdma_mr_put - Release an rpcrdma_mr object | |
1333 | * @mr: object to release | |
1334 | * | |
1335 | */ | |
346aa66b | 1336 | void |
96ceddea | 1337 | rpcrdma_mr_put(struct rpcrdma_mr *mr) |
ec12e479 CL |
1338 | { |
1339 | __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); | |
1340 | } | |
1341 | ||
1342 | /** | |
1343 | * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it | |
1344 | * @mr: object to release | |
1345 | * | |
1346 | */ | |
1347 | void | |
1348 | rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) | |
c2922c02 | 1349 | { |
96ceddea | 1350 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
c2922c02 | 1351 | |
ec12e479 CL |
1352 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
1353 | mr->mr_sg, mr->mr_nents, mr->mr_dir); | |
1354 | __rpcrdma_mr_put(&r_xprt->rx_buf, mr); | |
c2922c02 CL |
1355 | } |
1356 | ||
05c97466 CL |
1357 | static struct rpcrdma_rep * |
1358 | rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers) | |
1359 | { | |
1360 | /* If an RPC previously completed without a reply (say, a | |
1361 | * credential problem or a soft timeout occurs) then hold off | |
1362 | * on supplying more Receive buffers until the number of new | |
1363 | * pending RPCs catches up to the number of posted Receives. | |
1364 | */ | |
1365 | if (unlikely(buffers->rb_send_count < buffers->rb_recv_count)) | |
1366 | return NULL; | |
1367 | ||
1368 | if (unlikely(list_empty(&buffers->rb_recv_bufs))) | |
1369 | return NULL; | |
1370 | buffers->rb_recv_count++; | |
1371 | return rpcrdma_buffer_get_rep_locked(buffers); | |
1372 | } | |
1373 | ||
c56c65fb TT |
1374 | /* |
1375 | * Get a set of request/reply buffers. | |
78d506e1 CL |
1376 | * |
1377 | * Reply buffer (if available) is attached to send buffer upon return. | |
c56c65fb TT |
1378 | */ |
1379 | struct rpcrdma_req * | |
1380 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | |
1381 | { | |
1382 | struct rpcrdma_req *req; | |
c14d86e5 | 1383 | |
a5b027e1 | 1384 | spin_lock(&buffers->rb_lock); |
1e465fd4 CL |
1385 | if (list_empty(&buffers->rb_send_bufs)) |
1386 | goto out_reqbuf; | |
05c97466 | 1387 | buffers->rb_send_count++; |
1e465fd4 | 1388 | req = rpcrdma_buffer_get_req_locked(buffers); |
05c97466 | 1389 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
a5b027e1 | 1390 | spin_unlock(&buffers->rb_lock); |
1e465fd4 | 1391 | return req; |
ddb6bebc | 1392 | |
1e465fd4 | 1393 | out_reqbuf: |
a5b027e1 | 1394 | spin_unlock(&buffers->rb_lock); |
78d506e1 | 1395 | pr_warn("RPC: %s: out of request buffers\n", __func__); |
1e465fd4 | 1396 | return NULL; |
c56c65fb TT |
1397 | } |
1398 | ||
1399 | /* | |
1400 | * Put request/reply buffers back into pool. | |
1401 | * Pre-decrement counter/array index. | |
1402 | */ | |
1403 | void | |
1404 | rpcrdma_buffer_put(struct rpcrdma_req *req) | |
1405 | { | |
1406 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
1e465fd4 | 1407 | struct rpcrdma_rep *rep = req->rl_reply; |
c56c65fb | 1408 | |
1e465fd4 CL |
1409 | req->rl_reply = NULL; |
1410 | ||
a5b027e1 | 1411 | spin_lock(&buffers->rb_lock); |
05c97466 | 1412 | buffers->rb_send_count--; |
a80d66c9 | 1413 | list_add_tail(&req->rl_list, &buffers->rb_send_bufs); |
05c97466 CL |
1414 | if (rep) { |
1415 | buffers->rb_recv_count--; | |
1e465fd4 | 1416 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
05c97466 | 1417 | } |
a5b027e1 | 1418 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1419 | } |
1420 | ||
1421 | /* | |
1422 | * Recover reply buffers from pool. | |
1e465fd4 | 1423 | * This happens when recovering from disconnect. |
c56c65fb TT |
1424 | */ |
1425 | void | |
1426 | rpcrdma_recv_buffer_get(struct rpcrdma_req *req) | |
1427 | { | |
1428 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
c56c65fb | 1429 | |
a5b027e1 | 1430 | spin_lock(&buffers->rb_lock); |
05c97466 | 1431 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
a5b027e1 | 1432 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1433 | } |
1434 | ||
1435 | /* | |
1436 | * Put reply buffers back into pool when not attached to | |
b45ccfd2 | 1437 | * request. This happens in error conditions. |
c56c65fb TT |
1438 | */ |
1439 | void | |
1440 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | |
1441 | { | |
fed171b3 | 1442 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
c56c65fb | 1443 | |
a5b027e1 | 1444 | spin_lock(&buffers->rb_lock); |
05c97466 | 1445 | buffers->rb_recv_count--; |
1e465fd4 | 1446 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
a5b027e1 | 1447 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1448 | } |
1449 | ||
9128c3e7 | 1450 | /** |
99ef4db3 | 1451 | * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers |
9128c3e7 | 1452 | * @size: size of buffer to be allocated, in bytes |
99ef4db3 | 1453 | * @direction: direction of data movement |
9128c3e7 CL |
1454 | * @flags: GFP flags |
1455 | * | |
54cbd6b0 CL |
1456 | * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that |
1457 | * can be persistently DMA-mapped for I/O. | |
9128c3e7 CL |
1458 | * |
1459 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for | |
99ef4db3 CL |
1460 | * receiving the payload of RDMA RECV operations. During Long Calls |
1461 | * or Replies they may be registered externally via ro_map. | |
9128c3e7 CL |
1462 | */ |
1463 | struct rpcrdma_regbuf * | |
13650c23 CL |
1464 | rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, |
1465 | gfp_t flags) | |
9128c3e7 CL |
1466 | { |
1467 | struct rpcrdma_regbuf *rb; | |
9128c3e7 | 1468 | |
9128c3e7 CL |
1469 | rb = kmalloc(sizeof(*rb) + size, flags); |
1470 | if (rb == NULL) | |
54cbd6b0 | 1471 | return ERR_PTR(-ENOMEM); |
9128c3e7 | 1472 | |
54cbd6b0 | 1473 | rb->rg_device = NULL; |
99ef4db3 | 1474 | rb->rg_direction = direction; |
54cbd6b0 | 1475 | rb->rg_iov.length = size; |
9128c3e7 CL |
1476 | |
1477 | return rb; | |
54cbd6b0 | 1478 | } |
9128c3e7 | 1479 | |
54cbd6b0 CL |
1480 | /** |
1481 | * __rpcrdma_map_regbuf - DMA-map a regbuf | |
1482 | * @ia: controlling rpcrdma_ia | |
1483 | * @rb: regbuf to be mapped | |
1484 | */ | |
1485 | bool | |
1486 | __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) | |
1487 | { | |
91a10c52 CL |
1488 | struct ib_device *device = ia->ri_device; |
1489 | ||
54cbd6b0 CL |
1490 | if (rb->rg_direction == DMA_NONE) |
1491 | return false; | |
1492 | ||
91a10c52 | 1493 | rb->rg_iov.addr = ib_dma_map_single(device, |
54cbd6b0 CL |
1494 | (void *)rb->rg_base, |
1495 | rdmab_length(rb), | |
1496 | rb->rg_direction); | |
91a10c52 | 1497 | if (ib_dma_mapping_error(device, rdmab_addr(rb))) |
54cbd6b0 CL |
1498 | return false; |
1499 | ||
91a10c52 | 1500 | rb->rg_device = device; |
54cbd6b0 CL |
1501 | rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; |
1502 | return true; | |
1503 | } | |
1504 | ||
1505 | static void | |
1506 | rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) | |
1507 | { | |
1508 | if (!rpcrdma_regbuf_is_mapped(rb)) | |
1509 | return; | |
1510 | ||
1511 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), | |
1512 | rdmab_length(rb), rb->rg_direction); | |
1513 | rb->rg_device = NULL; | |
9128c3e7 CL |
1514 | } |
1515 | ||
1516 | /** | |
1517 | * rpcrdma_free_regbuf - deregister and free registered buffer | |
9128c3e7 CL |
1518 | * @rb: regbuf to be deregistered and freed |
1519 | */ | |
1520 | void | |
13650c23 | 1521 | rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) |
9128c3e7 | 1522 | { |
e531dcab CL |
1523 | if (!rb) |
1524 | return; | |
1525 | ||
54cbd6b0 | 1526 | rpcrdma_dma_unmap_regbuf(rb); |
e531dcab | 1527 | kfree(rb); |
9128c3e7 CL |
1528 | } |
1529 | ||
c56c65fb TT |
1530 | /* |
1531 | * Prepost any receive buffer, then post send. | |
1532 | * | |
1533 | * Receive buffer is donated to hardware, reclaimed upon recv completion. | |
1534 | */ | |
1535 | int | |
1536 | rpcrdma_ep_post(struct rpcrdma_ia *ia, | |
1537 | struct rpcrdma_ep *ep, | |
1538 | struct rpcrdma_req *req) | |
1539 | { | |
ae72950a | 1540 | struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; |
90aab602 | 1541 | struct ib_send_wr *send_wr_fail; |
655fec69 | 1542 | int rc; |
c56c65fb | 1543 | |
90aab602 CL |
1544 | if (req->rl_reply) { |
1545 | rc = rpcrdma_ep_post_recv(ia, req->rl_reply); | |
c56c65fb | 1546 | if (rc) |
7a89f9c6 | 1547 | return rc; |
c56c65fb TT |
1548 | req->rl_reply = NULL; |
1549 | } | |
1550 | ||
01bb35c8 CL |
1551 | if (!ep->rep_send_count || |
1552 | test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) { | |
ae72950a CL |
1553 | send_wr->send_flags |= IB_SEND_SIGNALED; |
1554 | ep->rep_send_count = ep->rep_send_batch; | |
1555 | } else { | |
1556 | send_wr->send_flags &= ~IB_SEND_SIGNALED; | |
1557 | --ep->rep_send_count; | |
1558 | } | |
ab03eff5 | 1559 | |
90aab602 | 1560 | rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail); |
ab03eff5 | 1561 | trace_xprtrdma_post_send(req, rc); |
c56c65fb | 1562 | if (rc) |
ab03eff5 | 1563 | return -ENOTCONN; |
7a89f9c6 | 1564 | return 0; |
c56c65fb TT |
1565 | } |
1566 | ||
c56c65fb TT |
1567 | int |
1568 | rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, | |
c56c65fb TT |
1569 | struct rpcrdma_rep *rep) |
1570 | { | |
6ea8e711 | 1571 | struct ib_recv_wr *recv_wr_fail; |
c56c65fb TT |
1572 | int rc; |
1573 | ||
54cbd6b0 CL |
1574 | if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf)) |
1575 | goto out_map; | |
6ea8e711 | 1576 | rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail); |
b4a7f91c | 1577 | trace_xprtrdma_post_recv(rep, rc); |
c56c65fb | 1578 | if (rc) |
b4a7f91c | 1579 | return -ENOTCONN; |
7a89f9c6 CL |
1580 | return 0; |
1581 | ||
54cbd6b0 CL |
1582 | out_map: |
1583 | pr_err("rpcrdma: failed to DMA map the Receive buffer\n"); | |
1584 | return -EIO; | |
c56c65fb | 1585 | } |
43e95988 | 1586 | |
f531a5db CL |
1587 | /** |
1588 | * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests | |
1589 | * @r_xprt: transport associated with these backchannel resources | |
1590 | * @min_reqs: minimum number of incoming requests expected | |
1591 | * | |
1592 | * Returns zero if all requested buffers were posted, or a negative errno. | |
1593 | */ | |
1594 | int | |
1595 | rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) | |
1596 | { | |
1597 | struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; | |
1598 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
f531a5db | 1599 | struct rpcrdma_rep *rep; |
f531a5db CL |
1600 | int rc; |
1601 | ||
1602 | while (count--) { | |
9b06688b | 1603 | spin_lock(&buffers->rb_lock); |
f531a5db CL |
1604 | if (list_empty(&buffers->rb_recv_bufs)) |
1605 | goto out_reqbuf; | |
1606 | rep = rpcrdma_buffer_get_rep_locked(buffers); | |
9b06688b | 1607 | spin_unlock(&buffers->rb_lock); |
f531a5db | 1608 | |
b157380a | 1609 | rc = rpcrdma_ep_post_recv(ia, rep); |
f531a5db CL |
1610 | if (rc) |
1611 | goto out_rc; | |
1612 | } | |
1613 | ||
1614 | return 0; | |
1615 | ||
1616 | out_reqbuf: | |
9b06688b | 1617 | spin_unlock(&buffers->rb_lock); |
f531a5db CL |
1618 | pr_warn("%s: no extra receive buffers\n", __func__); |
1619 | return -ENOMEM; | |
1620 | ||
1621 | out_rc: | |
1622 | rpcrdma_recv_buffer_put(rep); | |
1623 | return rc; | |
1624 | } |