Commit | Line | Data |
---|---|---|
f58851e6 | 1 | /* |
62b56a67 | 2 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
c56c65fb TT |
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the BSD-type | |
9 | * license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or without | |
12 | * modification, are permitted provided that the following conditions | |
13 | * are met: | |
14 | * | |
15 | * Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * | |
18 | * Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials provided | |
21 | * with the distribution. | |
22 | * | |
23 | * Neither the name of the Network Appliance, Inc. nor the names of | |
24 | * its contributors may be used to endorse or promote products | |
25 | * derived from this software without specific prior written | |
26 | * permission. | |
27 | * | |
28 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
29 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
30 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
31 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
32 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
33 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
34 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
35 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
36 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
37 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
38 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
f58851e6 TT |
39 | */ |
40 | ||
c56c65fb TT |
41 | /* |
42 | * verbs.c | |
43 | * | |
44 | * Encapsulates the major functions managing: | |
45 | * o adapters | |
46 | * o endpoints | |
47 | * o connections | |
48 | * o buffer memory | |
49 | */ | |
50 | ||
a6b7a407 | 51 | #include <linux/interrupt.h> |
5a0e3ad6 | 52 | #include <linux/slab.h> |
0dd39cae | 53 | #include <linux/sunrpc/addr.h> |
05c97466 | 54 | #include <linux/sunrpc/svc_rdma.h> |
ae72950a CL |
55 | |
56 | #include <asm-generic/barrier.h> | |
65866f82 | 57 | #include <asm/bitops.h> |
56a6bd15 | 58 | |
0a90487b | 59 | #include <rdma/ib_cm.h> |
c56c65fb | 60 | |
f58851e6 TT |
61 | #include "xprt_rdma.h" |
62 | ||
c56c65fb TT |
63 | /* |
64 | * Globals/Macros | |
65 | */ | |
66 | ||
f895b252 | 67 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
c56c65fb TT |
68 | # define RPCDBG_FACILITY RPCDBG_TRANS |
69 | #endif | |
70 | ||
71 | /* | |
72 | * internal functions | |
73 | */ | |
96ceddea CL |
74 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
75 | static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); | |
bebd0318 | 76 | static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb); |
c56c65fb | 77 | |
d8f532d2 | 78 | struct workqueue_struct *rpcrdma_receive_wq __read_mostly; |
c56c65fb | 79 | |
fe97b47c CL |
80 | int |
81 | rpcrdma_alloc_wq(void) | |
c56c65fb | 82 | { |
fe97b47c | 83 | struct workqueue_struct *recv_wq; |
c56c65fb | 84 | |
fe97b47c | 85 | recv_wq = alloc_workqueue("xprtrdma_receive", |
ccede759 | 86 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
fe97b47c CL |
87 | 0); |
88 | if (!recv_wq) | |
89 | return -ENOMEM; | |
c56c65fb | 90 | |
fe97b47c CL |
91 | rpcrdma_receive_wq = recv_wq; |
92 | return 0; | |
c56c65fb TT |
93 | } |
94 | ||
fe97b47c CL |
95 | void |
96 | rpcrdma_destroy_wq(void) | |
f1a03b76 | 97 | { |
fe97b47c | 98 | struct workqueue_struct *wq; |
f1a03b76 | 99 | |
fe97b47c CL |
100 | if (rpcrdma_receive_wq) { |
101 | wq = rpcrdma_receive_wq; | |
102 | rpcrdma_receive_wq = NULL; | |
103 | destroy_workqueue(wq); | |
104 | } | |
f1a03b76 CL |
105 | } |
106 | ||
c56c65fb TT |
107 | static void |
108 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | |
109 | { | |
110 | struct rpcrdma_ep *ep = context; | |
111 | ||
2f6922ca CL |
112 | pr_err("rpcrdma: %s on device %s ep %p\n", |
113 | ib_event_msg(event->event), event->device->name, context); | |
114 | ||
c56c65fb TT |
115 | if (ep->rep_connected == 1) { |
116 | ep->rep_connected = -EIO; | |
afadc468 | 117 | rpcrdma_conn_func(ep); |
c56c65fb TT |
118 | wake_up_all(&ep->rep_connect_wait); |
119 | } | |
120 | } | |
121 | ||
2fa8f88d CL |
122 | /** |
123 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC | |
124 | * @cq: completion queue (ignored) | |
125 | * @wc: completed WR | |
126 | * | |
fc664485 CL |
127 | */ |
128 | static void | |
2fa8f88d | 129 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 130 | { |
ae72950a CL |
131 | struct ib_cqe *cqe = wc->wr_cqe; |
132 | struct rpcrdma_sendctx *sc = | |
133 | container_of(cqe, struct rpcrdma_sendctx, sc_cqe); | |
134 | ||
2fa8f88d CL |
135 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
136 | if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) | |
137 | pr_err("rpcrdma: Send: %s (%u/0x%x)\n", | |
138 | ib_wc_status_msg(wc->status), | |
139 | wc->status, wc->vendor_err); | |
ae72950a CL |
140 | |
141 | rpcrdma_sendctx_put_locked(sc); | |
fc664485 | 142 | } |
c56c65fb | 143 | |
552bf225 | 144 | /** |
1519e969 | 145 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
552bf225 CL |
146 | * @cq: completion queue (ignored) |
147 | * @wc: completed WR | |
148 | * | |
149 | */ | |
fc664485 | 150 | static void |
1519e969 | 151 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 152 | { |
552bf225 CL |
153 | struct ib_cqe *cqe = wc->wr_cqe; |
154 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, | |
155 | rr_cqe); | |
fc664485 | 156 | |
8502427c CL |
157 | /* WARNING: Only wr_id and status are reliable at this point */ |
158 | if (wc->status != IB_WC_SUCCESS) | |
159 | goto out_fail; | |
fc664485 | 160 | |
8502427c | 161 | /* status == SUCCESS means all fields in wc are trustworthy */ |
8502427c CL |
162 | dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n", |
163 | __func__, rep, wc->byte_len); | |
164 | ||
96f8778f | 165 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len); |
c8b920bb CL |
166 | rep->rr_wc_flags = wc->wc_flags; |
167 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; | |
168 | ||
91a10c52 | 169 | ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf), |
6b1184cd | 170 | rdmab_addr(rep->rr_rdmabuf), |
e2a67190 | 171 | wc->byte_len, DMA_FROM_DEVICE); |
23826c7a | 172 | |
fc664485 | 173 | out_schedule: |
d8f532d2 | 174 | rpcrdma_reply_handler(rep); |
8502427c | 175 | return; |
fe97b47c | 176 | |
8502427c CL |
177 | out_fail: |
178 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
552bf225 CL |
179 | pr_err("rpcrdma: Recv: %s (%u/0x%x)\n", |
180 | ib_wc_status_msg(wc->status), | |
181 | wc->status, wc->vendor_err); | |
e2a67190 | 182 | rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0); |
8502427c | 183 | goto out_schedule; |
fc664485 CL |
184 | } |
185 | ||
87cfb9a0 CL |
186 | static void |
187 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, | |
188 | struct rdma_conn_param *param) | |
189 | { | |
190 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
191 | const struct rpcrdma_connect_private *pmsg = param->private_data; | |
192 | unsigned int rsize, wsize; | |
193 | ||
c8b920bb | 194 | /* Default settings for RPC-over-RDMA Version One */ |
b5f0afbe | 195 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
87cfb9a0 CL |
196 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
197 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; | |
198 | ||
199 | if (pmsg && | |
200 | pmsg->cp_magic == rpcrdma_cmp_magic && | |
201 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { | |
c95a3c6b | 202 | r_xprt->rx_ia.ri_implicit_roundup = true; |
87cfb9a0 CL |
203 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
204 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); | |
205 | } | |
206 | ||
207 | if (rsize < cdata->inline_rsize) | |
208 | cdata->inline_rsize = rsize; | |
209 | if (wsize < cdata->inline_wsize) | |
210 | cdata->inline_wsize = wsize; | |
6d6bf72d CL |
211 | dprintk("RPC: %s: max send %u, max recv %u\n", |
212 | __func__, cdata->inline_wsize, cdata->inline_rsize); | |
87cfb9a0 CL |
213 | rpcrdma_set_max_header_sizes(r_xprt); |
214 | } | |
215 | ||
c56c65fb TT |
216 | static int |
217 | rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |
218 | { | |
219 | struct rpcrdma_xprt *xprt = id->context; | |
220 | struct rpcrdma_ia *ia = &xprt->rx_ia; | |
221 | struct rpcrdma_ep *ep = &xprt->rx_ep; | |
c56c65fb TT |
222 | int connstate = 0; |
223 | ||
224 | switch (event->event) { | |
225 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
226 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
5675add3 | 227 | ia->ri_async_rc = 0; |
c56c65fb TT |
228 | complete(&ia->ri_done); |
229 | break; | |
230 | case RDMA_CM_EVENT_ADDR_ERROR: | |
231 | ia->ri_async_rc = -EHOSTUNREACH; | |
232 | dprintk("RPC: %s: CM address resolution error, ep 0x%p\n", | |
233 | __func__, ep); | |
234 | complete(&ia->ri_done); | |
235 | break; | |
236 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
237 | ia->ri_async_rc = -ENETUNREACH; | |
238 | dprintk("RPC: %s: CM route resolution error, ep 0x%p\n", | |
239 | __func__, ep); | |
240 | complete(&ia->ri_done); | |
241 | break; | |
bebd0318 CL |
242 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
243 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
d461f1f2 | 244 | pr_info("rpcrdma: removing device %s for %s:%s\n", |
173b8f49 | 245 | ia->ri_device->name, |
d461f1f2 | 246 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt)); |
bebd0318 CL |
247 | #endif |
248 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); | |
249 | ep->rep_connected = -ENODEV; | |
250 | xprt_force_disconnect(&xprt->rx_xprt); | |
251 | wait_for_completion(&ia->ri_remove_done); | |
252 | ||
253 | ia->ri_id = NULL; | |
254 | ia->ri_pd = NULL; | |
255 | ia->ri_device = NULL; | |
256 | /* Return 1 to ensure the core destroys the id. */ | |
257 | return 1; | |
c56c65fb TT |
258 | case RDMA_CM_EVENT_ESTABLISHED: |
259 | connstate = 1; | |
87cfb9a0 | 260 | rpcrdma_update_connect_private(xprt, &event->param.conn); |
c56c65fb TT |
261 | goto connected; |
262 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
263 | connstate = -ENOTCONN; | |
264 | goto connected; | |
265 | case RDMA_CM_EVENT_UNREACHABLE: | |
266 | connstate = -ENETDOWN; | |
267 | goto connected; | |
268 | case RDMA_CM_EVENT_REJECTED: | |
d461f1f2 CL |
269 | dprintk("rpcrdma: connection to %s:%s rejected: %s\n", |
270 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt), | |
0a90487b | 271 | rdma_reject_msg(id, event->status)); |
c56c65fb | 272 | connstate = -ECONNREFUSED; |
0a90487b CL |
273 | if (event->status == IB_CM_REJ_STALE_CONN) |
274 | connstate = -EAGAIN; | |
c56c65fb TT |
275 | goto connected; |
276 | case RDMA_CM_EVENT_DISCONNECTED: | |
277 | connstate = -ECONNABORTED; | |
c56c65fb | 278 | connected: |
be798f90 | 279 | xprt->rx_buf.rb_credits = 1; |
c56c65fb | 280 | ep->rep_connected = connstate; |
afadc468 | 281 | rpcrdma_conn_func(ep); |
c56c65fb | 282 | wake_up_all(&ep->rep_connect_wait); |
8079fb78 | 283 | /*FALLTHROUGH*/ |
c56c65fb | 284 | default: |
d461f1f2 CL |
285 | dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n", |
286 | __func__, | |
287 | rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt), | |
173b8f49 CL |
288 | ia->ri_device->name, ia->ri_ops->ro_displayname, |
289 | ep, rdma_event_msg(event->event)); | |
c56c65fb TT |
290 | break; |
291 | } | |
292 | ||
293 | return 0; | |
294 | } | |
295 | ||
296 | static struct rdma_cm_id * | |
dd229cee | 297 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
c56c65fb | 298 | { |
109b88ab | 299 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
c56c65fb TT |
300 | struct rdma_cm_id *id; |
301 | int rc; | |
302 | ||
1a954051 | 303 | init_completion(&ia->ri_done); |
bebd0318 | 304 | init_completion(&ia->ri_remove_done); |
1a954051 | 305 | |
fa20105e GS |
306 | id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, |
307 | IB_QPT_RC); | |
c56c65fb TT |
308 | if (IS_ERR(id)) { |
309 | rc = PTR_ERR(id); | |
310 | dprintk("RPC: %s: rdma_create_id() failed %i\n", | |
311 | __func__, rc); | |
312 | return id; | |
313 | } | |
314 | ||
5675add3 | 315 | ia->ri_async_rc = -ETIMEDOUT; |
dd229cee CL |
316 | rc = rdma_resolve_addr(id, NULL, |
317 | (struct sockaddr *)&xprt->rx_xprt.addr, | |
318 | RDMA_RESOLVE_TIMEOUT); | |
c56c65fb TT |
319 | if (rc) { |
320 | dprintk("RPC: %s: rdma_resolve_addr() failed %i\n", | |
321 | __func__, rc); | |
322 | goto out; | |
323 | } | |
109b88ab CL |
324 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
325 | if (rc < 0) { | |
326 | dprintk("RPC: %s: wait() exited: %i\n", | |
327 | __func__, rc); | |
328 | goto out; | |
329 | } | |
d0f36c46 | 330 | |
c56c65fb TT |
331 | rc = ia->ri_async_rc; |
332 | if (rc) | |
333 | goto out; | |
334 | ||
5675add3 | 335 | ia->ri_async_rc = -ETIMEDOUT; |
c56c65fb TT |
336 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
337 | if (rc) { | |
338 | dprintk("RPC: %s: rdma_resolve_route() failed %i\n", | |
339 | __func__, rc); | |
56a6bd15 | 340 | goto out; |
c56c65fb | 341 | } |
109b88ab CL |
342 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
343 | if (rc < 0) { | |
344 | dprintk("RPC: %s: wait() exited: %i\n", | |
345 | __func__, rc); | |
56a6bd15 | 346 | goto out; |
109b88ab | 347 | } |
c56c65fb TT |
348 | rc = ia->ri_async_rc; |
349 | if (rc) | |
56a6bd15 | 350 | goto out; |
c56c65fb TT |
351 | |
352 | return id; | |
56a6bd15 | 353 | |
c56c65fb TT |
354 | out: |
355 | rdma_destroy_id(id); | |
356 | return ERR_PTR(rc); | |
357 | } | |
358 | ||
c56c65fb TT |
359 | /* |
360 | * Exported functions. | |
361 | */ | |
362 | ||
fff09594 CL |
363 | /** |
364 | * rpcrdma_ia_open - Open and initialize an Interface Adapter. | |
dd229cee | 365 | * @xprt: transport with IA to (re)initialize |
fff09594 CL |
366 | * |
367 | * Returns 0 on success, negative errno if an appropriate | |
368 | * Interface Adapter could not be found and opened. | |
c56c65fb TT |
369 | */ |
370 | int | |
dd229cee | 371 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt) |
c56c65fb | 372 | { |
c56c65fb | 373 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
d1ed857e CL |
374 | int rc; |
375 | ||
dd229cee | 376 | ia->ri_id = rpcrdma_create_id(xprt, ia); |
c56c65fb TT |
377 | if (IS_ERR(ia->ri_id)) { |
378 | rc = PTR_ERR(ia->ri_id); | |
fff09594 | 379 | goto out_err; |
c56c65fb | 380 | } |
89e0d112 | 381 | ia->ri_device = ia->ri_id->device; |
c56c65fb | 382 | |
ed082d36 | 383 | ia->ri_pd = ib_alloc_pd(ia->ri_device, 0); |
c56c65fb TT |
384 | if (IS_ERR(ia->ri_pd)) { |
385 | rc = PTR_ERR(ia->ri_pd); | |
b54054ca | 386 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
fff09594 | 387 | goto out_err; |
c56c65fb TT |
388 | } |
389 | ||
fff09594 | 390 | switch (xprt_rdma_memreg_strategy) { |
ce5b3717 | 391 | case RPCRDMA_FRWR: |
b54054ca CL |
392 | if (frwr_is_supported(ia)) { |
393 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; | |
394 | break; | |
395 | } | |
396 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 397 | case RPCRDMA_MTHCAFMR: |
b54054ca CL |
398 | if (fmr_is_supported(ia)) { |
399 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; | |
400 | break; | |
401 | } | |
402 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 403 | default: |
fff09594 CL |
404 | pr_err("rpcrdma: Device %s does not support memreg mode %d\n", |
405 | ia->ri_device->name, xprt_rdma_memreg_strategy); | |
b54054ca | 406 | rc = -EINVAL; |
fff09594 | 407 | goto out_err; |
c56c65fb TT |
408 | } |
409 | ||
c56c65fb | 410 | return 0; |
5ae711a2 | 411 | |
fff09594 CL |
412 | out_err: |
413 | rpcrdma_ia_close(ia); | |
c56c65fb TT |
414 | return rc; |
415 | } | |
416 | ||
bebd0318 CL |
417 | /** |
418 | * rpcrdma_ia_remove - Handle device driver unload | |
419 | * @ia: interface adapter being removed | |
420 | * | |
421 | * Divest transport H/W resources associated with this adapter, | |
422 | * but allow it to be restored later. | |
423 | */ | |
424 | void | |
425 | rpcrdma_ia_remove(struct rpcrdma_ia *ia) | |
426 | { | |
427 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, | |
428 | rx_ia); | |
429 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; | |
430 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
431 | struct rpcrdma_req *req; | |
432 | struct rpcrdma_rep *rep; | |
433 | ||
434 | cancel_delayed_work_sync(&buf->rb_refresh_worker); | |
435 | ||
436 | /* This is similar to rpcrdma_ep_destroy, but: | |
437 | * - Don't cancel the connect worker. | |
438 | * - Don't call rpcrdma_ep_disconnect, which waits | |
439 | * for another conn upcall, which will deadlock. | |
440 | * - rdma_disconnect is unneeded, the underlying | |
441 | * connection is already gone. | |
442 | */ | |
443 | if (ia->ri_id->qp) { | |
444 | ib_drain_qp(ia->ri_id->qp); | |
445 | rdma_destroy_qp(ia->ri_id); | |
446 | ia->ri_id->qp = NULL; | |
447 | } | |
448 | ib_free_cq(ep->rep_attr.recv_cq); | |
449 | ib_free_cq(ep->rep_attr.send_cq); | |
450 | ||
451 | /* The ULP is responsible for ensuring all DMA | |
452 | * mappings and MRs are gone. | |
453 | */ | |
454 | list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) | |
455 | rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf); | |
456 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { | |
457 | rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf); | |
458 | rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); | |
459 | rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); | |
460 | } | |
96ceddea | 461 | rpcrdma_mrs_destroy(buf); |
bebd0318 CL |
462 | |
463 | /* Allow waiters to continue */ | |
464 | complete(&ia->ri_remove_done); | |
465 | } | |
466 | ||
fff09594 CL |
467 | /** |
468 | * rpcrdma_ia_close - Clean up/close an IA. | |
469 | * @ia: interface adapter to close | |
470 | * | |
c56c65fb TT |
471 | */ |
472 | void | |
473 | rpcrdma_ia_close(struct rpcrdma_ia *ia) | |
474 | { | |
c56c65fb | 475 | dprintk("RPC: %s: entering\n", __func__); |
fee08caf TT |
476 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
477 | if (ia->ri_id->qp) | |
478 | rdma_destroy_qp(ia->ri_id); | |
56a6bd15 | 479 | rdma_destroy_id(ia->ri_id); |
fee08caf | 480 | } |
fff09594 CL |
481 | ia->ri_id = NULL; |
482 | ia->ri_device = NULL; | |
6d44698d CL |
483 | |
484 | /* If the pd is still busy, xprtrdma missed freeing a resource */ | |
485 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) | |
7dd78647 | 486 | ib_dealloc_pd(ia->ri_pd); |
fff09594 | 487 | ia->ri_pd = NULL; |
c56c65fb TT |
488 | } |
489 | ||
490 | /* | |
491 | * Create unconnected endpoint. | |
492 | */ | |
493 | int | |
494 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |
16f906d6 | 495 | struct rpcrdma_create_data_internal *cdata) |
c56c65fb | 496 | { |
87cfb9a0 | 497 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
16f906d6 | 498 | unsigned int max_qp_wr, max_sge; |
fc664485 | 499 | struct ib_cq *sendcq, *recvcq; |
2fa8f88d | 500 | int rc; |
c56c65fb | 501 | |
eed50879 CL |
502 | max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
503 | RPCRDMA_MAX_SEND_SGES); | |
16f906d6 CL |
504 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
505 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); | |
b3221d6a CL |
506 | return -ENOMEM; |
507 | } | |
16f906d6 | 508 | ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; |
b3221d6a | 509 | |
e3e45b1b | 510 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { |
124fa17d CL |
511 | dprintk("RPC: %s: insufficient wqe's available\n", |
512 | __func__); | |
513 | return -ENOMEM; | |
514 | } | |
550d7502 | 515 | max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1; |
124fa17d | 516 | |
c56c65fb | 517 | /* check provider's send/recv wr limits */ |
124fa17d CL |
518 | if (cdata->max_requests > max_qp_wr) |
519 | cdata->max_requests = max_qp_wr; | |
c56c65fb TT |
520 | |
521 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; | |
522 | ep->rep_attr.qp_context = ep; | |
c56c65fb TT |
523 | ep->rep_attr.srq = NULL; |
524 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; | |
124fa17d | 525 | ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
550d7502 | 526 | ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */ |
3968cb58 CL |
527 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
528 | if (rc) | |
529 | return rc; | |
c56c65fb | 530 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
124fa17d | 531 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
550d7502 | 532 | ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ |
16f906d6 | 533 | ep->rep_attr.cap.max_send_sge = max_sge; |
c56c65fb TT |
534 | ep->rep_attr.cap.max_recv_sge = 1; |
535 | ep->rep_attr.cap.max_inline_data = 0; | |
536 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
537 | ep->rep_attr.qp_type = IB_QPT_RC; | |
538 | ep->rep_attr.port_num = ~0; | |
539 | ||
540 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " | |
541 | "iovs: send %d recv %d\n", | |
542 | __func__, | |
543 | ep->rep_attr.cap.max_send_wr, | |
544 | ep->rep_attr.cap.max_recv_wr, | |
545 | ep->rep_attr.cap.max_send_sge, | |
546 | ep->rep_attr.cap.max_recv_sge); | |
547 | ||
548 | /* set trigger for requesting send completion */ | |
ae72950a CL |
549 | ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, |
550 | cdata->max_requests >> 2); | |
551 | ep->rep_send_count = ep->rep_send_batch; | |
c56c65fb | 552 | init_waitqueue_head(&ep->rep_connect_wait); |
254f91e2 | 553 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
c56c65fb | 554 | |
2fa8f88d CL |
555 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
556 | ep->rep_attr.cap.max_send_wr + 1, | |
a4699f56 | 557 | 1, IB_POLL_WORKQUEUE); |
fc664485 CL |
558 | if (IS_ERR(sendcq)) { |
559 | rc = PTR_ERR(sendcq); | |
560 | dprintk("RPC: %s: failed to create send CQ: %i\n", | |
c56c65fb TT |
561 | __func__, rc); |
562 | goto out1; | |
563 | } | |
564 | ||
552bf225 CL |
565 | recvcq = ib_alloc_cq(ia->ri_device, NULL, |
566 | ep->rep_attr.cap.max_recv_wr + 1, | |
d8f532d2 | 567 | 0, IB_POLL_WORKQUEUE); |
fc664485 CL |
568 | if (IS_ERR(recvcq)) { |
569 | rc = PTR_ERR(recvcq); | |
570 | dprintk("RPC: %s: failed to create recv CQ: %i\n", | |
571 | __func__, rc); | |
572 | goto out2; | |
573 | } | |
574 | ||
fc664485 CL |
575 | ep->rep_attr.send_cq = sendcq; |
576 | ep->rep_attr.recv_cq = recvcq; | |
c56c65fb TT |
577 | |
578 | /* Initialize cma parameters */ | |
b2dde94b | 579 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
c56c65fb | 580 | |
87cfb9a0 CL |
581 | /* Prepare RDMA-CM private message */ |
582 | pmsg->cp_magic = rpcrdma_cmp_magic; | |
583 | pmsg->cp_version = RPCRDMA_CMP_VERSION; | |
c8b920bb | 584 | pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; |
87cfb9a0 CL |
585 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); |
586 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); | |
587 | ep->rep_remote_cma.private_data = pmsg; | |
588 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); | |
c56c65fb TT |
589 | |
590 | /* Client offers RDMA Read but does not initiate */ | |
b334eaab | 591 | ep->rep_remote_cma.initiator_depth = 0; |
e3e45b1b | 592 | if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ |
b334eaab TT |
593 | ep->rep_remote_cma.responder_resources = 32; |
594 | else | |
7bc7972c | 595 | ep->rep_remote_cma.responder_resources = |
e3e45b1b | 596 | ia->ri_device->attrs.max_qp_rd_atom; |
c56c65fb | 597 | |
b2dde94b CL |
598 | /* Limit transport retries so client can detect server |
599 | * GID changes quickly. RPC layer handles re-establishing | |
600 | * transport connection and retransmission. | |
601 | */ | |
602 | ep->rep_remote_cma.retry_count = 6; | |
603 | ||
604 | /* RPC-over-RDMA handles its own flow control. In addition, | |
605 | * make all RNR NAKs visible so we know that RPC-over-RDMA | |
606 | * flow control is working correctly (no NAKs should be seen). | |
607 | */ | |
c56c65fb TT |
608 | ep->rep_remote_cma.flow_control = 0; |
609 | ep->rep_remote_cma.rnr_retry_count = 0; | |
610 | ||
611 | return 0; | |
612 | ||
613 | out2: | |
2fa8f88d | 614 | ib_free_cq(sendcq); |
c56c65fb TT |
615 | out1: |
616 | return rc; | |
617 | } | |
618 | ||
619 | /* | |
620 | * rpcrdma_ep_destroy | |
621 | * | |
622 | * Disconnect and destroy endpoint. After this, the only | |
623 | * valid operations on the ep are to free it (if dynamically | |
624 | * allocated) or re-create it. | |
c56c65fb | 625 | */ |
7f1d5419 | 626 | void |
c56c65fb TT |
627 | rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
628 | { | |
c56c65fb TT |
629 | dprintk("RPC: %s: entering, connected is %d\n", |
630 | __func__, ep->rep_connected); | |
631 | ||
254f91e2 CL |
632 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
633 | ||
72c02173 | 634 | if (ia->ri_id->qp) { |
550d7502 | 635 | rpcrdma_ep_disconnect(ep, ia); |
fee08caf TT |
636 | rdma_destroy_qp(ia->ri_id); |
637 | ia->ri_id->qp = NULL; | |
c56c65fb TT |
638 | } |
639 | ||
552bf225 | 640 | ib_free_cq(ep->rep_attr.recv_cq); |
2fa8f88d | 641 | ib_free_cq(ep->rep_attr.send_cq); |
c56c65fb TT |
642 | } |
643 | ||
a9b0e381 CL |
644 | /* Re-establish a connection after a device removal event. |
645 | * Unlike a normal reconnection, a fresh PD and a new set | |
646 | * of MRs and buffers is needed. | |
647 | */ | |
648 | static int | |
649 | rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, | |
650 | struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
651 | { | |
a9b0e381 CL |
652 | int rc, err; |
653 | ||
654 | pr_info("%s: r_xprt = %p\n", __func__, r_xprt); | |
655 | ||
656 | rc = -EHOSTUNREACH; | |
dd229cee | 657 | if (rpcrdma_ia_open(r_xprt)) |
a9b0e381 CL |
658 | goto out1; |
659 | ||
660 | rc = -ENOMEM; | |
661 | err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data); | |
662 | if (err) { | |
663 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); | |
664 | goto out2; | |
665 | } | |
666 | ||
667 | rc = -ENETUNREACH; | |
668 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
669 | if (err) { | |
670 | pr_err("rpcrdma: rdma_create_qp returned %d\n", err); | |
671 | goto out3; | |
672 | } | |
673 | ||
96ceddea | 674 | rpcrdma_mrs_create(r_xprt); |
a9b0e381 CL |
675 | return 0; |
676 | ||
677 | out3: | |
678 | rpcrdma_ep_destroy(ep, ia); | |
679 | out2: | |
680 | rpcrdma_ia_close(ia); | |
681 | out1: | |
682 | return rc; | |
683 | } | |
684 | ||
1890896b CL |
685 | static int |
686 | rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep, | |
687 | struct rpcrdma_ia *ia) | |
688 | { | |
1890896b CL |
689 | struct rdma_cm_id *id, *old; |
690 | int err, rc; | |
691 | ||
692 | dprintk("RPC: %s: reconnecting...\n", __func__); | |
693 | ||
694 | rpcrdma_ep_disconnect(ep, ia); | |
695 | ||
696 | rc = -EHOSTUNREACH; | |
dd229cee | 697 | id = rpcrdma_create_id(r_xprt, ia); |
1890896b CL |
698 | if (IS_ERR(id)) |
699 | goto out; | |
700 | ||
701 | /* As long as the new ID points to the same device as the | |
702 | * old ID, we can reuse the transport's existing PD and all | |
703 | * previously allocated MRs. Also, the same device means | |
704 | * the transport's previous DMA mappings are still valid. | |
705 | * | |
706 | * This is a sanity check only. There should be no way these | |
707 | * point to two different devices here. | |
708 | */ | |
709 | old = id; | |
710 | rc = -ENETUNREACH; | |
711 | if (ia->ri_device != id->device) { | |
712 | pr_err("rpcrdma: can't reconnect on different device!\n"); | |
713 | goto out_destroy; | |
714 | } | |
715 | ||
716 | err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); | |
717 | if (err) { | |
718 | dprintk("RPC: %s: rdma_create_qp returned %d\n", | |
719 | __func__, err); | |
720 | goto out_destroy; | |
721 | } | |
722 | ||
723 | /* Atomically replace the transport's ID and QP. */ | |
724 | rc = 0; | |
725 | old = ia->ri_id; | |
726 | ia->ri_id = id; | |
727 | rdma_destroy_qp(old); | |
728 | ||
729 | out_destroy: | |
56a6bd15 | 730 | rdma_destroy_id(old); |
1890896b CL |
731 | out: |
732 | return rc; | |
733 | } | |
734 | ||
c56c65fb TT |
735 | /* |
736 | * Connect unconnected endpoint. | |
737 | */ | |
738 | int | |
739 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
740 | { | |
0a90487b CL |
741 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
742 | rx_ia); | |
0a90487b | 743 | unsigned int extras; |
1890896b | 744 | int rc; |
c56c65fb | 745 | |
c56c65fb | 746 | retry: |
1890896b CL |
747 | switch (ep->rep_connected) { |
748 | case 0: | |
ec62f40d CL |
749 | dprintk("RPC: %s: connecting...\n", __func__); |
750 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
751 | if (rc) { | |
752 | dprintk("RPC: %s: rdma_create_qp failed %i\n", | |
753 | __func__, rc); | |
1890896b CL |
754 | rc = -ENETUNREACH; |
755 | goto out_noupdate; | |
ec62f40d | 756 | } |
1890896b | 757 | break; |
a9b0e381 CL |
758 | case -ENODEV: |
759 | rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia); | |
760 | if (rc) | |
761 | goto out_noupdate; | |
762 | break; | |
1890896b CL |
763 | default: |
764 | rc = rpcrdma_ep_reconnect(r_xprt, ep, ia); | |
765 | if (rc) | |
766 | goto out; | |
c56c65fb TT |
767 | } |
768 | ||
c56c65fb TT |
769 | ep->rep_connected = 0; |
770 | ||
771 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); | |
772 | if (rc) { | |
773 | dprintk("RPC: %s: rdma_connect() failed with %i\n", | |
774 | __func__, rc); | |
775 | goto out; | |
776 | } | |
777 | ||
c56c65fb | 778 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
c56c65fb | 779 | if (ep->rep_connected <= 0) { |
0a90487b | 780 | if (ep->rep_connected == -EAGAIN) |
c56c65fb TT |
781 | goto retry; |
782 | rc = ep->rep_connected; | |
0a90487b | 783 | goto out; |
c56c65fb TT |
784 | } |
785 | ||
0a90487b CL |
786 | dprintk("RPC: %s: connected\n", __func__); |
787 | extras = r_xprt->rx_buf.rb_bc_srv_max_requests; | |
788 | if (extras) | |
789 | rpcrdma_ep_post_extra_recv(r_xprt, extras); | |
790 | ||
c56c65fb TT |
791 | out: |
792 | if (rc) | |
793 | ep->rep_connected = rc; | |
1890896b CL |
794 | |
795 | out_noupdate: | |
c56c65fb TT |
796 | return rc; |
797 | } | |
798 | ||
799 | /* | |
800 | * rpcrdma_ep_disconnect | |
801 | * | |
802 | * This is separate from destroy to facilitate the ability | |
803 | * to reconnect without recreating the endpoint. | |
804 | * | |
805 | * This call is not reentrant, and must not be made in parallel | |
806 | * on the same endpoint. | |
807 | */ | |
282191cb | 808 | void |
c56c65fb TT |
809 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
810 | { | |
811 | int rc; | |
812 | ||
c56c65fb TT |
813 | rc = rdma_disconnect(ia->ri_id); |
814 | if (!rc) { | |
815 | /* returns without wait if not connected */ | |
816 | wait_event_interruptible(ep->rep_connect_wait, | |
817 | ep->rep_connected != 1); | |
818 | dprintk("RPC: %s: after wait, %sconnected\n", __func__, | |
819 | (ep->rep_connected == 1) ? "still " : "dis"); | |
820 | } else { | |
821 | dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc); | |
822 | ep->rep_connected = rc; | |
823 | } | |
550d7502 CL |
824 | |
825 | ib_drain_qp(ia->ri_id->qp); | |
c56c65fb TT |
826 | } |
827 | ||
ae72950a CL |
828 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
829 | * lock-free. | |
830 | * | |
831 | * Consumer is the code path that posts Sends. This path dequeues a | |
832 | * sendctx for use by a Send operation. Multiple consumer threads | |
833 | * are serialized by the RPC transport lock, which allows only one | |
834 | * ->send_request call at a time. | |
835 | * | |
836 | * Producer is the code path that handles Send completions. This path | |
837 | * enqueues a sendctx that has been completed. Multiple producer | |
838 | * threads are serialized by the ib_poll_cq() function. | |
839 | */ | |
840 | ||
841 | /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced | |
842 | * queue activity, and ib_drain_qp has flushed all remaining Send | |
843 | * requests. | |
844 | */ | |
845 | static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf) | |
846 | { | |
847 | unsigned long i; | |
848 | ||
849 | for (i = 0; i <= buf->rb_sc_last; i++) | |
850 | kfree(buf->rb_sc_ctxs[i]); | |
851 | kfree(buf->rb_sc_ctxs); | |
852 | } | |
853 | ||
854 | static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia) | |
855 | { | |
856 | struct rpcrdma_sendctx *sc; | |
857 | ||
858 | sc = kzalloc(sizeof(*sc) + | |
859 | ia->ri_max_send_sges * sizeof(struct ib_sge), | |
860 | GFP_KERNEL); | |
861 | if (!sc) | |
862 | return NULL; | |
863 | ||
864 | sc->sc_wr.wr_cqe = &sc->sc_cqe; | |
865 | sc->sc_wr.sg_list = sc->sc_sges; | |
866 | sc->sc_wr.opcode = IB_WR_SEND; | |
867 | sc->sc_cqe.done = rpcrdma_wc_send; | |
868 | return sc; | |
869 | } | |
870 | ||
871 | static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt) | |
872 | { | |
873 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
874 | struct rpcrdma_sendctx *sc; | |
875 | unsigned long i; | |
876 | ||
877 | /* Maximum number of concurrent outstanding Send WRs. Capping | |
878 | * the circular queue size stops Send Queue overflow by causing | |
879 | * the ->send_request call to fail temporarily before too many | |
880 | * Sends are posted. | |
881 | */ | |
882 | i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; | |
883 | dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i); | |
884 | buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); | |
885 | if (!buf->rb_sc_ctxs) | |
886 | return -ENOMEM; | |
887 | ||
888 | buf->rb_sc_last = i - 1; | |
889 | for (i = 0; i <= buf->rb_sc_last; i++) { | |
890 | sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); | |
891 | if (!sc) | |
892 | goto out_destroy; | |
893 | ||
894 | sc->sc_xprt = r_xprt; | |
895 | buf->rb_sc_ctxs[i] = sc; | |
896 | } | |
897 | ||
898 | return 0; | |
899 | ||
900 | out_destroy: | |
901 | rpcrdma_sendctxs_destroy(buf); | |
902 | return -ENOMEM; | |
903 | } | |
904 | ||
905 | /* The sendctx queue is not guaranteed to have a size that is a | |
906 | * power of two, thus the helpers in circ_buf.h cannot be used. | |
907 | * The other option is to use modulus (%), which can be expensive. | |
908 | */ | |
909 | static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, | |
910 | unsigned long item) | |
911 | { | |
912 | return likely(item < buf->rb_sc_last) ? item + 1 : 0; | |
913 | } | |
914 | ||
915 | /** | |
916 | * rpcrdma_sendctx_get_locked - Acquire a send context | |
917 | * @buf: transport buffers from which to acquire an unused context | |
918 | * | |
919 | * Returns pointer to a free send completion context; or NULL if | |
920 | * the queue is empty. | |
921 | * | |
922 | * Usage: Called to acquire an SGE array before preparing a Send WR. | |
923 | * | |
924 | * The caller serializes calls to this function (per rpcrdma_buffer), | |
925 | * and provides an effective memory barrier that flushes the new value | |
926 | * of rb_sc_head. | |
927 | */ | |
928 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf) | |
929 | { | |
930 | struct rpcrdma_xprt *r_xprt; | |
931 | struct rpcrdma_sendctx *sc; | |
932 | unsigned long next_head; | |
933 | ||
934 | next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); | |
935 | ||
936 | if (next_head == READ_ONCE(buf->rb_sc_tail)) | |
937 | goto out_emptyq; | |
938 | ||
939 | /* ORDER: item must be accessed _before_ head is updated */ | |
940 | sc = buf->rb_sc_ctxs[next_head]; | |
941 | ||
942 | /* Releasing the lock in the caller acts as a memory | |
943 | * barrier that flushes rb_sc_head. | |
944 | */ | |
945 | buf->rb_sc_head = next_head; | |
946 | ||
947 | return sc; | |
948 | ||
949 | out_emptyq: | |
950 | /* The queue is "empty" if there have not been enough Send | |
951 | * completions recently. This is a sign the Send Queue is | |
952 | * backing up. Cause the caller to pause and try again. | |
953 | */ | |
954 | dprintk("RPC: %s: empty sendctx queue\n", __func__); | |
955 | r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf); | |
956 | r_xprt->rx_stats.empty_sendctx_q++; | |
957 | return NULL; | |
958 | } | |
959 | ||
960 | /** | |
961 | * rpcrdma_sendctx_put_locked - Release a send context | |
962 | * @sc: send context to release | |
963 | * | |
964 | * Usage: Called from Send completion to return a sendctxt | |
965 | * to the queue. | |
966 | * | |
967 | * The caller serializes calls to this function (per rpcrdma_buffer). | |
968 | */ | |
969 | void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc) | |
970 | { | |
971 | struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf; | |
972 | unsigned long next_tail; | |
973 | ||
974 | /* Unmap SGEs of previously completed by unsignaled | |
975 | * Sends by walking up the queue until @sc is found. | |
976 | */ | |
977 | next_tail = buf->rb_sc_tail; | |
978 | do { | |
979 | next_tail = rpcrdma_sendctx_next(buf, next_tail); | |
980 | ||
981 | /* ORDER: item must be accessed _before_ tail is updated */ | |
982 | rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]); | |
983 | ||
984 | } while (buf->rb_sc_ctxs[next_tail] != sc); | |
985 | ||
986 | /* Paired with READ_ONCE */ | |
987 | smp_store_release(&buf->rb_sc_tail, next_tail); | |
988 | } | |
989 | ||
505bbe64 CL |
990 | static void |
991 | rpcrdma_mr_recovery_worker(struct work_struct *work) | |
992 | { | |
993 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
994 | rb_recovery_worker.work); | |
96ceddea | 995 | struct rpcrdma_mr *mr; |
505bbe64 CL |
996 | |
997 | spin_lock(&buf->rb_recovery_lock); | |
998 | while (!list_empty(&buf->rb_stale_mrs)) { | |
96ceddea | 999 | mr = rpcrdma_mr_pop(&buf->rb_stale_mrs); |
505bbe64 CL |
1000 | spin_unlock(&buf->rb_recovery_lock); |
1001 | ||
96ceddea CL |
1002 | dprintk("RPC: %s: recovering MR %p\n", __func__, mr); |
1003 | mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr); | |
505bbe64 CL |
1004 | |
1005 | spin_lock(&buf->rb_recovery_lock); | |
53d78523 | 1006 | } |
505bbe64 CL |
1007 | spin_unlock(&buf->rb_recovery_lock); |
1008 | } | |
1009 | ||
1010 | void | |
96ceddea | 1011 | rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr) |
505bbe64 | 1012 | { |
96ceddea | 1013 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
505bbe64 CL |
1014 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1015 | ||
1016 | spin_lock(&buf->rb_recovery_lock); | |
96ceddea | 1017 | rpcrdma_mr_push(mr, &buf->rb_stale_mrs); |
505bbe64 CL |
1018 | spin_unlock(&buf->rb_recovery_lock); |
1019 | ||
1020 | schedule_delayed_work(&buf->rb_recovery_worker, 0); | |
1021 | } | |
1022 | ||
e2ac236c | 1023 | static void |
96ceddea | 1024 | rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
e2ac236c CL |
1025 | { |
1026 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
1027 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
1028 | unsigned int count; | |
1029 | LIST_HEAD(free); | |
1030 | LIST_HEAD(all); | |
1031 | ||
1032 | for (count = 0; count < 32; count++) { | |
96ceddea | 1033 | struct rpcrdma_mr *mr; |
e2ac236c CL |
1034 | int rc; |
1035 | ||
96ceddea CL |
1036 | mr = kzalloc(sizeof(*mr), GFP_KERNEL); |
1037 | if (!mr) | |
e2ac236c CL |
1038 | break; |
1039 | ||
96ceddea | 1040 | rc = ia->ri_ops->ro_init_mr(ia, mr); |
e2ac236c | 1041 | if (rc) { |
96ceddea | 1042 | kfree(mr); |
e2ac236c CL |
1043 | break; |
1044 | } | |
1045 | ||
96ceddea | 1046 | mr->mr_xprt = r_xprt; |
e2ac236c | 1047 | |
96ceddea CL |
1048 | list_add(&mr->mr_list, &free); |
1049 | list_add(&mr->mr_all, &all); | |
e2ac236c CL |
1050 | } |
1051 | ||
96ceddea CL |
1052 | spin_lock(&buf->rb_mrlock); |
1053 | list_splice(&free, &buf->rb_mrs); | |
e2ac236c CL |
1054 | list_splice(&all, &buf->rb_all); |
1055 | r_xprt->rx_stats.mrs_allocated += count; | |
96ceddea | 1056 | spin_unlock(&buf->rb_mrlock); |
e2ac236c CL |
1057 | |
1058 | dprintk("RPC: %s: created %u MRs\n", __func__, count); | |
1059 | } | |
1060 | ||
1061 | static void | |
1062 | rpcrdma_mr_refresh_worker(struct work_struct *work) | |
1063 | { | |
1064 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
1065 | rb_refresh_worker.work); | |
1066 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1067 | rx_buf); | |
1068 | ||
96ceddea | 1069 | rpcrdma_mrs_create(r_xprt); |
e2ac236c CL |
1070 | } |
1071 | ||
f531a5db | 1072 | struct rpcrdma_req * |
1392402c CL |
1073 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) |
1074 | { | |
f531a5db | 1075 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
1392402c | 1076 | struct rpcrdma_req *req; |
1392402c | 1077 | |
85275c87 | 1078 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
1392402c | 1079 | if (req == NULL) |
85275c87 | 1080 | return ERR_PTR(-ENOMEM); |
1392402c | 1081 | |
f531a5db CL |
1082 | spin_lock(&buffer->rb_reqslock); |
1083 | list_add(&req->rl_all, &buffer->rb_allreqs); | |
1084 | spin_unlock(&buffer->rb_reqslock); | |
1392402c | 1085 | req->rl_buffer = &r_xprt->rx_buf; |
9d6b0409 | 1086 | INIT_LIST_HEAD(&req->rl_registered); |
1392402c | 1087 | return req; |
1392402c CL |
1088 | } |
1089 | ||
d698c4a0 CL |
1090 | /** |
1091 | * rpcrdma_create_rep - Allocate an rpcrdma_rep object | |
1092 | * @r_xprt: controlling transport | |
1093 | * | |
1094 | * Returns 0 on success or a negative errno on failure. | |
1095 | */ | |
1096 | int | |
1392402c CL |
1097 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) |
1098 | { | |
1099 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
d698c4a0 | 1100 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1392402c CL |
1101 | struct rpcrdma_rep *rep; |
1102 | int rc; | |
1103 | ||
1104 | rc = -ENOMEM; | |
6b1184cd | 1105 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
1392402c CL |
1106 | if (rep == NULL) |
1107 | goto out; | |
1392402c | 1108 | |
13650c23 | 1109 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, |
99ef4db3 | 1110 | DMA_FROM_DEVICE, GFP_KERNEL); |
6b1184cd CL |
1111 | if (IS_ERR(rep->rr_rdmabuf)) { |
1112 | rc = PTR_ERR(rep->rr_rdmabuf); | |
1392402c | 1113 | goto out_free; |
6b1184cd | 1114 | } |
96f8778f CL |
1115 | xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base, |
1116 | rdmab_length(rep->rr_rdmabuf)); | |
1392402c | 1117 | |
1519e969 | 1118 | rep->rr_cqe.done = rpcrdma_wc_receive; |
fed171b3 | 1119 | rep->rr_rxprt = r_xprt; |
d8f532d2 | 1120 | INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion); |
6ea8e711 CL |
1121 | rep->rr_recv_wr.next = NULL; |
1122 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; | |
1123 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; | |
1124 | rep->rr_recv_wr.num_sge = 1; | |
d698c4a0 CL |
1125 | |
1126 | spin_lock(&buf->rb_lock); | |
1127 | list_add(&rep->rr_list, &buf->rb_recv_bufs); | |
1128 | spin_unlock(&buf->rb_lock); | |
1129 | return 0; | |
1392402c CL |
1130 | |
1131 | out_free: | |
1132 | kfree(rep); | |
1133 | out: | |
d698c4a0 CL |
1134 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
1135 | __func__, rc); | |
1136 | return rc; | |
1392402c CL |
1137 | } |
1138 | ||
c56c65fb | 1139 | int |
ac920d04 | 1140 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
c56c65fb | 1141 | { |
ac920d04 | 1142 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
c56c65fb TT |
1143 | int i, rc; |
1144 | ||
1e465fd4 | 1145 | buf->rb_max_requests = r_xprt->rx_data.max_requests; |
f531a5db | 1146 | buf->rb_bc_srv_max_requests = 0; |
96ceddea | 1147 | spin_lock_init(&buf->rb_mrlock); |
505bbe64 CL |
1148 | spin_lock_init(&buf->rb_lock); |
1149 | spin_lock_init(&buf->rb_recovery_lock); | |
96ceddea | 1150 | INIT_LIST_HEAD(&buf->rb_mrs); |
e2ac236c | 1151 | INIT_LIST_HEAD(&buf->rb_all); |
505bbe64 | 1152 | INIT_LIST_HEAD(&buf->rb_stale_mrs); |
e2ac236c CL |
1153 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
1154 | rpcrdma_mr_refresh_worker); | |
505bbe64 CL |
1155 | INIT_DELAYED_WORK(&buf->rb_recovery_worker, |
1156 | rpcrdma_mr_recovery_worker); | |
c56c65fb | 1157 | |
96ceddea | 1158 | rpcrdma_mrs_create(r_xprt); |
c56c65fb | 1159 | |
1e465fd4 | 1160 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
f531a5db CL |
1161 | INIT_LIST_HEAD(&buf->rb_allreqs); |
1162 | spin_lock_init(&buf->rb_reqslock); | |
c56c65fb TT |
1163 | for (i = 0; i < buf->rb_max_requests; i++) { |
1164 | struct rpcrdma_req *req; | |
c56c65fb | 1165 | |
1392402c CL |
1166 | req = rpcrdma_create_req(r_xprt); |
1167 | if (IS_ERR(req)) { | |
c56c65fb TT |
1168 | dprintk("RPC: %s: request buffer %d alloc" |
1169 | " failed\n", __func__, i); | |
1392402c | 1170 | rc = PTR_ERR(req); |
c56c65fb TT |
1171 | goto out; |
1172 | } | |
a80d66c9 | 1173 | list_add(&req->rl_list, &buf->rb_send_bufs); |
1e465fd4 CL |
1174 | } |
1175 | ||
1176 | INIT_LIST_HEAD(&buf->rb_recv_bufs); | |
d698c4a0 CL |
1177 | for (i = 0; i <= buf->rb_max_requests; i++) { |
1178 | rc = rpcrdma_create_rep(r_xprt); | |
1179 | if (rc) | |
c56c65fb | 1180 | goto out; |
c56c65fb | 1181 | } |
1392402c | 1182 | |
ae72950a CL |
1183 | rc = rpcrdma_sendctxs_create(r_xprt); |
1184 | if (rc) | |
1185 | goto out; | |
1186 | ||
c56c65fb TT |
1187 | return 0; |
1188 | out: | |
1189 | rpcrdma_buffer_destroy(buf); | |
1190 | return rc; | |
1191 | } | |
1192 | ||
1e465fd4 CL |
1193 | static struct rpcrdma_req * |
1194 | rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf) | |
1195 | { | |
1196 | struct rpcrdma_req *req; | |
1197 | ||
1198 | req = list_first_entry(&buf->rb_send_bufs, | |
a80d66c9 | 1199 | struct rpcrdma_req, rl_list); |
431af645 | 1200 | list_del_init(&req->rl_list); |
1e465fd4 CL |
1201 | return req; |
1202 | } | |
1203 | ||
1204 | static struct rpcrdma_rep * | |
1205 | rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf) | |
1206 | { | |
1207 | struct rpcrdma_rep *rep; | |
1208 | ||
1209 | rep = list_first_entry(&buf->rb_recv_bufs, | |
1210 | struct rpcrdma_rep, rr_list); | |
1211 | list_del(&rep->rr_list); | |
1212 | return rep; | |
1213 | } | |
1214 | ||
1392402c | 1215 | static void |
13650c23 | 1216 | rpcrdma_destroy_rep(struct rpcrdma_rep *rep) |
1392402c | 1217 | { |
13650c23 | 1218 | rpcrdma_free_regbuf(rep->rr_rdmabuf); |
1392402c CL |
1219 | kfree(rep); |
1220 | } | |
1221 | ||
f531a5db | 1222 | void |
13650c23 | 1223 | rpcrdma_destroy_req(struct rpcrdma_req *req) |
1392402c | 1224 | { |
13650c23 CL |
1225 | rpcrdma_free_regbuf(req->rl_recvbuf); |
1226 | rpcrdma_free_regbuf(req->rl_sendbuf); | |
1227 | rpcrdma_free_regbuf(req->rl_rdmabuf); | |
1392402c CL |
1228 | kfree(req); |
1229 | } | |
1230 | ||
e2ac236c | 1231 | static void |
96ceddea | 1232 | rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
e2ac236c CL |
1233 | { |
1234 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1235 | rx_buf); | |
1236 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); | |
96ceddea | 1237 | struct rpcrdma_mr *mr; |
e2ac236c CL |
1238 | unsigned int count; |
1239 | ||
1240 | count = 0; | |
96ceddea | 1241 | spin_lock(&buf->rb_mrlock); |
e2ac236c | 1242 | while (!list_empty(&buf->rb_all)) { |
96ceddea CL |
1243 | mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all); |
1244 | list_del(&mr->mr_all); | |
e2ac236c | 1245 | |
96ceddea CL |
1246 | spin_unlock(&buf->rb_mrlock); |
1247 | ia->ri_ops->ro_release_mr(mr); | |
e2ac236c | 1248 | count++; |
96ceddea | 1249 | spin_lock(&buf->rb_mrlock); |
e2ac236c | 1250 | } |
96ceddea | 1251 | spin_unlock(&buf->rb_mrlock); |
e2ac236c CL |
1252 | r_xprt->rx_stats.mrs_allocated = 0; |
1253 | ||
1254 | dprintk("RPC: %s: released %u MRs\n", __func__, count); | |
1255 | } | |
1256 | ||
c56c65fb TT |
1257 | void |
1258 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |
1259 | { | |
505bbe64 | 1260 | cancel_delayed_work_sync(&buf->rb_recovery_worker); |
9378b274 | 1261 | cancel_delayed_work_sync(&buf->rb_refresh_worker); |
505bbe64 | 1262 | |
ae72950a CL |
1263 | rpcrdma_sendctxs_destroy(buf); |
1264 | ||
1e465fd4 CL |
1265 | while (!list_empty(&buf->rb_recv_bufs)) { |
1266 | struct rpcrdma_rep *rep; | |
c56c65fb | 1267 | |
1e465fd4 | 1268 | rep = rpcrdma_buffer_get_rep_locked(buf); |
13650c23 | 1269 | rpcrdma_destroy_rep(rep); |
c56c65fb | 1270 | } |
05c97466 | 1271 | buf->rb_send_count = 0; |
c56c65fb | 1272 | |
f531a5db CL |
1273 | spin_lock(&buf->rb_reqslock); |
1274 | while (!list_empty(&buf->rb_allreqs)) { | |
1e465fd4 | 1275 | struct rpcrdma_req *req; |
4034ba04 | 1276 | |
f531a5db CL |
1277 | req = list_first_entry(&buf->rb_allreqs, |
1278 | struct rpcrdma_req, rl_all); | |
1279 | list_del(&req->rl_all); | |
1280 | ||
1281 | spin_unlock(&buf->rb_reqslock); | |
13650c23 | 1282 | rpcrdma_destroy_req(req); |
f531a5db | 1283 | spin_lock(&buf->rb_reqslock); |
1e465fd4 | 1284 | } |
f531a5db | 1285 | spin_unlock(&buf->rb_reqslock); |
05c97466 | 1286 | buf->rb_recv_count = 0; |
4034ba04 | 1287 | |
96ceddea | 1288 | rpcrdma_mrs_destroy(buf); |
c56c65fb TT |
1289 | } |
1290 | ||
96ceddea CL |
1291 | /** |
1292 | * rpcrdma_mr_get - Allocate an rpcrdma_mr object | |
1293 | * @r_xprt: controlling transport | |
1294 | * | |
1295 | * Returns an initialized rpcrdma_mr or NULL if no free | |
1296 | * rpcrdma_mr objects are available. | |
1297 | */ | |
1298 | struct rpcrdma_mr * | |
1299 | rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) | |
c2922c02 | 1300 | { |
346aa66b | 1301 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
96ceddea | 1302 | struct rpcrdma_mr *mr = NULL; |
346aa66b | 1303 | |
96ceddea CL |
1304 | spin_lock(&buf->rb_mrlock); |
1305 | if (!list_empty(&buf->rb_mrs)) | |
1306 | mr = rpcrdma_mr_pop(&buf->rb_mrs); | |
1307 | spin_unlock(&buf->rb_mrlock); | |
346aa66b | 1308 | |
96ceddea CL |
1309 | if (!mr) |
1310 | goto out_nomrs; | |
1311 | return mr; | |
e2ac236c | 1312 | |
96ceddea CL |
1313 | out_nomrs: |
1314 | dprintk("RPC: %s: no MRs available\n", __func__); | |
bebd0318 CL |
1315 | if (r_xprt->rx_ep.rep_connected != -ENODEV) |
1316 | schedule_delayed_work(&buf->rb_refresh_worker, 0); | |
e2ac236c CL |
1317 | |
1318 | /* Allow the reply handler and refresh worker to run */ | |
1319 | cond_resched(); | |
1320 | ||
1321 | return NULL; | |
c2922c02 CL |
1322 | } |
1323 | ||
ec12e479 CL |
1324 | static void |
1325 | __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr) | |
1326 | { | |
1327 | spin_lock(&buf->rb_mrlock); | |
1328 | rpcrdma_mr_push(mr, &buf->rb_mrs); | |
1329 | spin_unlock(&buf->rb_mrlock); | |
1330 | } | |
1331 | ||
96ceddea CL |
1332 | /** |
1333 | * rpcrdma_mr_put - Release an rpcrdma_mr object | |
1334 | * @mr: object to release | |
1335 | * | |
1336 | */ | |
346aa66b | 1337 | void |
96ceddea | 1338 | rpcrdma_mr_put(struct rpcrdma_mr *mr) |
ec12e479 CL |
1339 | { |
1340 | __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr); | |
1341 | } | |
1342 | ||
1343 | /** | |
1344 | * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it | |
1345 | * @mr: object to release | |
1346 | * | |
1347 | */ | |
1348 | void | |
1349 | rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr) | |
c2922c02 | 1350 | { |
96ceddea | 1351 | struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
c2922c02 | 1352 | |
ec12e479 CL |
1353 | ib_dma_unmap_sg(r_xprt->rx_ia.ri_device, |
1354 | mr->mr_sg, mr->mr_nents, mr->mr_dir); | |
1355 | __rpcrdma_mr_put(&r_xprt->rx_buf, mr); | |
c2922c02 CL |
1356 | } |
1357 | ||
05c97466 CL |
1358 | static struct rpcrdma_rep * |
1359 | rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers) | |
1360 | { | |
1361 | /* If an RPC previously completed without a reply (say, a | |
1362 | * credential problem or a soft timeout occurs) then hold off | |
1363 | * on supplying more Receive buffers until the number of new | |
1364 | * pending RPCs catches up to the number of posted Receives. | |
1365 | */ | |
1366 | if (unlikely(buffers->rb_send_count < buffers->rb_recv_count)) | |
1367 | return NULL; | |
1368 | ||
1369 | if (unlikely(list_empty(&buffers->rb_recv_bufs))) | |
1370 | return NULL; | |
1371 | buffers->rb_recv_count++; | |
1372 | return rpcrdma_buffer_get_rep_locked(buffers); | |
1373 | } | |
1374 | ||
c56c65fb TT |
1375 | /* |
1376 | * Get a set of request/reply buffers. | |
78d506e1 CL |
1377 | * |
1378 | * Reply buffer (if available) is attached to send buffer upon return. | |
c56c65fb TT |
1379 | */ |
1380 | struct rpcrdma_req * | |
1381 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | |
1382 | { | |
1383 | struct rpcrdma_req *req; | |
c14d86e5 | 1384 | |
a5b027e1 | 1385 | spin_lock(&buffers->rb_lock); |
1e465fd4 CL |
1386 | if (list_empty(&buffers->rb_send_bufs)) |
1387 | goto out_reqbuf; | |
05c97466 | 1388 | buffers->rb_send_count++; |
1e465fd4 | 1389 | req = rpcrdma_buffer_get_req_locked(buffers); |
05c97466 | 1390 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
a5b027e1 | 1391 | spin_unlock(&buffers->rb_lock); |
1e465fd4 | 1392 | return req; |
ddb6bebc | 1393 | |
1e465fd4 | 1394 | out_reqbuf: |
a5b027e1 | 1395 | spin_unlock(&buffers->rb_lock); |
78d506e1 | 1396 | pr_warn("RPC: %s: out of request buffers\n", __func__); |
1e465fd4 | 1397 | return NULL; |
c56c65fb TT |
1398 | } |
1399 | ||
1400 | /* | |
1401 | * Put request/reply buffers back into pool. | |
1402 | * Pre-decrement counter/array index. | |
1403 | */ | |
1404 | void | |
1405 | rpcrdma_buffer_put(struct rpcrdma_req *req) | |
1406 | { | |
1407 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
1e465fd4 | 1408 | struct rpcrdma_rep *rep = req->rl_reply; |
c56c65fb | 1409 | |
1e465fd4 CL |
1410 | req->rl_reply = NULL; |
1411 | ||
a5b027e1 | 1412 | spin_lock(&buffers->rb_lock); |
05c97466 | 1413 | buffers->rb_send_count--; |
a80d66c9 | 1414 | list_add_tail(&req->rl_list, &buffers->rb_send_bufs); |
05c97466 CL |
1415 | if (rep) { |
1416 | buffers->rb_recv_count--; | |
1e465fd4 | 1417 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
05c97466 | 1418 | } |
a5b027e1 | 1419 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1420 | } |
1421 | ||
1422 | /* | |
1423 | * Recover reply buffers from pool. | |
1e465fd4 | 1424 | * This happens when recovering from disconnect. |
c56c65fb TT |
1425 | */ |
1426 | void | |
1427 | rpcrdma_recv_buffer_get(struct rpcrdma_req *req) | |
1428 | { | |
1429 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
c56c65fb | 1430 | |
a5b027e1 | 1431 | spin_lock(&buffers->rb_lock); |
05c97466 | 1432 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
a5b027e1 | 1433 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1434 | } |
1435 | ||
1436 | /* | |
1437 | * Put reply buffers back into pool when not attached to | |
b45ccfd2 | 1438 | * request. This happens in error conditions. |
c56c65fb TT |
1439 | */ |
1440 | void | |
1441 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | |
1442 | { | |
fed171b3 | 1443 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
c56c65fb | 1444 | |
a5b027e1 | 1445 | spin_lock(&buffers->rb_lock); |
05c97466 | 1446 | buffers->rb_recv_count--; |
1e465fd4 | 1447 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
a5b027e1 | 1448 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1449 | } |
1450 | ||
9128c3e7 | 1451 | /** |
99ef4db3 | 1452 | * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers |
9128c3e7 | 1453 | * @size: size of buffer to be allocated, in bytes |
99ef4db3 | 1454 | * @direction: direction of data movement |
9128c3e7 CL |
1455 | * @flags: GFP flags |
1456 | * | |
54cbd6b0 CL |
1457 | * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that |
1458 | * can be persistently DMA-mapped for I/O. | |
9128c3e7 CL |
1459 | * |
1460 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for | |
99ef4db3 CL |
1461 | * receiving the payload of RDMA RECV operations. During Long Calls |
1462 | * or Replies they may be registered externally via ro_map. | |
9128c3e7 CL |
1463 | */ |
1464 | struct rpcrdma_regbuf * | |
13650c23 CL |
1465 | rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, |
1466 | gfp_t flags) | |
9128c3e7 CL |
1467 | { |
1468 | struct rpcrdma_regbuf *rb; | |
9128c3e7 | 1469 | |
9128c3e7 CL |
1470 | rb = kmalloc(sizeof(*rb) + size, flags); |
1471 | if (rb == NULL) | |
54cbd6b0 | 1472 | return ERR_PTR(-ENOMEM); |
9128c3e7 | 1473 | |
54cbd6b0 | 1474 | rb->rg_device = NULL; |
99ef4db3 | 1475 | rb->rg_direction = direction; |
54cbd6b0 | 1476 | rb->rg_iov.length = size; |
9128c3e7 CL |
1477 | |
1478 | return rb; | |
54cbd6b0 | 1479 | } |
9128c3e7 | 1480 | |
54cbd6b0 CL |
1481 | /** |
1482 | * __rpcrdma_map_regbuf - DMA-map a regbuf | |
1483 | * @ia: controlling rpcrdma_ia | |
1484 | * @rb: regbuf to be mapped | |
1485 | */ | |
1486 | bool | |
1487 | __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) | |
1488 | { | |
91a10c52 CL |
1489 | struct ib_device *device = ia->ri_device; |
1490 | ||
54cbd6b0 CL |
1491 | if (rb->rg_direction == DMA_NONE) |
1492 | return false; | |
1493 | ||
91a10c52 | 1494 | rb->rg_iov.addr = ib_dma_map_single(device, |
54cbd6b0 CL |
1495 | (void *)rb->rg_base, |
1496 | rdmab_length(rb), | |
1497 | rb->rg_direction); | |
91a10c52 | 1498 | if (ib_dma_mapping_error(device, rdmab_addr(rb))) |
54cbd6b0 CL |
1499 | return false; |
1500 | ||
91a10c52 | 1501 | rb->rg_device = device; |
54cbd6b0 CL |
1502 | rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; |
1503 | return true; | |
1504 | } | |
1505 | ||
1506 | static void | |
1507 | rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) | |
1508 | { | |
1509 | if (!rpcrdma_regbuf_is_mapped(rb)) | |
1510 | return; | |
1511 | ||
1512 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), | |
1513 | rdmab_length(rb), rb->rg_direction); | |
1514 | rb->rg_device = NULL; | |
9128c3e7 CL |
1515 | } |
1516 | ||
1517 | /** | |
1518 | * rpcrdma_free_regbuf - deregister and free registered buffer | |
9128c3e7 CL |
1519 | * @rb: regbuf to be deregistered and freed |
1520 | */ | |
1521 | void | |
13650c23 | 1522 | rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) |
9128c3e7 | 1523 | { |
e531dcab CL |
1524 | if (!rb) |
1525 | return; | |
1526 | ||
54cbd6b0 | 1527 | rpcrdma_dma_unmap_regbuf(rb); |
e531dcab | 1528 | kfree(rb); |
9128c3e7 CL |
1529 | } |
1530 | ||
c56c65fb TT |
1531 | /* |
1532 | * Prepost any receive buffer, then post send. | |
1533 | * | |
1534 | * Receive buffer is donated to hardware, reclaimed upon recv completion. | |
1535 | */ | |
1536 | int | |
1537 | rpcrdma_ep_post(struct rpcrdma_ia *ia, | |
1538 | struct rpcrdma_ep *ep, | |
1539 | struct rpcrdma_req *req) | |
1540 | { | |
ae72950a | 1541 | struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr; |
90aab602 | 1542 | struct ib_send_wr *send_wr_fail; |
655fec69 | 1543 | int rc; |
c56c65fb | 1544 | |
90aab602 CL |
1545 | if (req->rl_reply) { |
1546 | rc = rpcrdma_ep_post_recv(ia, req->rl_reply); | |
c56c65fb | 1547 | if (rc) |
7a89f9c6 | 1548 | return rc; |
c56c65fb TT |
1549 | req->rl_reply = NULL; |
1550 | } | |
1551 | ||
b3221d6a | 1552 | dprintk("RPC: %s: posting %d s/g entries\n", |
90aab602 | 1553 | __func__, send_wr->num_sge); |
c56c65fb | 1554 | |
01bb35c8 CL |
1555 | if (!ep->rep_send_count || |
1556 | test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) { | |
ae72950a CL |
1557 | send_wr->send_flags |= IB_SEND_SIGNALED; |
1558 | ep->rep_send_count = ep->rep_send_batch; | |
1559 | } else { | |
1560 | send_wr->send_flags &= ~IB_SEND_SIGNALED; | |
1561 | --ep->rep_send_count; | |
1562 | } | |
90aab602 | 1563 | rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail); |
c56c65fb | 1564 | if (rc) |
7a89f9c6 CL |
1565 | goto out_postsend_err; |
1566 | return 0; | |
1567 | ||
1568 | out_postsend_err: | |
1569 | pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc); | |
1570 | return -ENOTCONN; | |
c56c65fb TT |
1571 | } |
1572 | ||
c56c65fb TT |
1573 | int |
1574 | rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, | |
c56c65fb TT |
1575 | struct rpcrdma_rep *rep) |
1576 | { | |
6ea8e711 | 1577 | struct ib_recv_wr *recv_wr_fail; |
c56c65fb TT |
1578 | int rc; |
1579 | ||
54cbd6b0 CL |
1580 | if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf)) |
1581 | goto out_map; | |
6ea8e711 | 1582 | rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail); |
c56c65fb | 1583 | if (rc) |
7a89f9c6 CL |
1584 | goto out_postrecv; |
1585 | return 0; | |
1586 | ||
54cbd6b0 CL |
1587 | out_map: |
1588 | pr_err("rpcrdma: failed to DMA map the Receive buffer\n"); | |
1589 | return -EIO; | |
1590 | ||
7a89f9c6 CL |
1591 | out_postrecv: |
1592 | pr_err("rpcrdma: ib_post_recv returned %i\n", rc); | |
1593 | return -ENOTCONN; | |
c56c65fb | 1594 | } |
43e95988 | 1595 | |
f531a5db CL |
1596 | /** |
1597 | * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests | |
1598 | * @r_xprt: transport associated with these backchannel resources | |
1599 | * @min_reqs: minimum number of incoming requests expected | |
1600 | * | |
1601 | * Returns zero if all requested buffers were posted, or a negative errno. | |
1602 | */ | |
1603 | int | |
1604 | rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) | |
1605 | { | |
1606 | struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; | |
1607 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
f531a5db | 1608 | struct rpcrdma_rep *rep; |
f531a5db CL |
1609 | int rc; |
1610 | ||
1611 | while (count--) { | |
9b06688b | 1612 | spin_lock(&buffers->rb_lock); |
f531a5db CL |
1613 | if (list_empty(&buffers->rb_recv_bufs)) |
1614 | goto out_reqbuf; | |
1615 | rep = rpcrdma_buffer_get_rep_locked(buffers); | |
9b06688b | 1616 | spin_unlock(&buffers->rb_lock); |
f531a5db | 1617 | |
b157380a | 1618 | rc = rpcrdma_ep_post_recv(ia, rep); |
f531a5db CL |
1619 | if (rc) |
1620 | goto out_rc; | |
1621 | } | |
1622 | ||
1623 | return 0; | |
1624 | ||
1625 | out_reqbuf: | |
9b06688b | 1626 | spin_unlock(&buffers->rb_lock); |
f531a5db CL |
1627 | pr_warn("%s: no extra receive buffers\n", __func__); |
1628 | return -ENOMEM; | |
1629 | ||
1630 | out_rc: | |
1631 | rpcrdma_recv_buffer_put(rep); | |
1632 | return rc; | |
1633 | } |