Commit | Line | Data |
---|---|---|
377f9b2f TT |
1 | /* |
2 | * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | * | |
39 | * Author: Tom Tucker <tom@opengridcomputing.com> | |
40 | */ | |
41 | ||
42 | #include <linux/sunrpc/svc_xprt.h> | |
43 | #include <linux/sunrpc/debug.h> | |
44 | #include <linux/sunrpc/rpc_rdma.h> | |
45 | #include <linux/spinlock.h> | |
46 | #include <rdma/ib_verbs.h> | |
47 | #include <rdma/rdma_cm.h> | |
48 | #include <linux/sunrpc/svc_rdma.h> | |
49 | ||
50 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | |
51 | ||
52 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |
53 | struct sockaddr *sa, int salen, | |
54 | int flags); | |
55 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); | |
56 | static void svc_rdma_release_rqst(struct svc_rqst *); | |
377f9b2f TT |
57 | static void dto_tasklet_func(unsigned long data); |
58 | static void svc_rdma_detach(struct svc_xprt *xprt); | |
59 | static void svc_rdma_free(struct svc_xprt *xprt); | |
60 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); | |
61 | static void rq_cq_reap(struct svcxprt_rdma *xprt); | |
62 | static void sq_cq_reap(struct svcxprt_rdma *xprt); | |
63 | ||
64 | DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL); | |
65 | static DEFINE_SPINLOCK(dto_lock); | |
66 | static LIST_HEAD(dto_xprt_q); | |
67 | ||
68 | static struct svc_xprt_ops svc_rdma_ops = { | |
69 | .xpo_create = svc_rdma_create, | |
70 | .xpo_recvfrom = svc_rdma_recvfrom, | |
71 | .xpo_sendto = svc_rdma_sendto, | |
72 | .xpo_release_rqst = svc_rdma_release_rqst, | |
73 | .xpo_detach = svc_rdma_detach, | |
74 | .xpo_free = svc_rdma_free, | |
75 | .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr, | |
76 | .xpo_has_wspace = svc_rdma_has_wspace, | |
77 | .xpo_accept = svc_rdma_accept, | |
78 | }; | |
79 | ||
80 | struct svc_xprt_class svc_rdma_class = { | |
81 | .xcl_name = "rdma", | |
82 | .xcl_owner = THIS_MODULE, | |
83 | .xcl_ops = &svc_rdma_ops, | |
84 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, | |
85 | }; | |
86 | ||
87 | static int rdma_bump_context_cache(struct svcxprt_rdma *xprt) | |
88 | { | |
89 | int target; | |
90 | int at_least_one = 0; | |
91 | struct svc_rdma_op_ctxt *ctxt; | |
92 | ||
93 | target = min(xprt->sc_ctxt_cnt + xprt->sc_ctxt_bump, | |
94 | xprt->sc_ctxt_max); | |
95 | ||
96 | spin_lock_bh(&xprt->sc_ctxt_lock); | |
97 | while (xprt->sc_ctxt_cnt < target) { | |
98 | xprt->sc_ctxt_cnt++; | |
99 | spin_unlock_bh(&xprt->sc_ctxt_lock); | |
100 | ||
101 | ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); | |
102 | ||
103 | spin_lock_bh(&xprt->sc_ctxt_lock); | |
104 | if (ctxt) { | |
105 | at_least_one = 1; | |
87407673 TT |
106 | INIT_LIST_HEAD(&ctxt->free_list); |
107 | list_add(&ctxt->free_list, &xprt->sc_ctxt_free); | |
377f9b2f TT |
108 | } else { |
109 | /* kmalloc failed...give up for now */ | |
110 | xprt->sc_ctxt_cnt--; | |
111 | break; | |
112 | } | |
113 | } | |
114 | spin_unlock_bh(&xprt->sc_ctxt_lock); | |
115 | dprintk("svcrdma: sc_ctxt_max=%d, sc_ctxt_cnt=%d\n", | |
116 | xprt->sc_ctxt_max, xprt->sc_ctxt_cnt); | |
117 | return at_least_one; | |
118 | } | |
119 | ||
120 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) | |
121 | { | |
122 | struct svc_rdma_op_ctxt *ctxt; | |
123 | ||
124 | while (1) { | |
125 | spin_lock_bh(&xprt->sc_ctxt_lock); | |
87407673 | 126 | if (unlikely(list_empty(&xprt->sc_ctxt_free))) { |
377f9b2f TT |
127 | /* Try to bump my cache. */ |
128 | spin_unlock_bh(&xprt->sc_ctxt_lock); | |
129 | ||
130 | if (rdma_bump_context_cache(xprt)) | |
131 | continue; | |
132 | ||
133 | printk(KERN_INFO "svcrdma: sleeping waiting for " | |
134 | "context memory on xprt=%p\n", | |
135 | xprt); | |
136 | schedule_timeout_uninterruptible(msecs_to_jiffies(500)); | |
137 | continue; | |
138 | } | |
87407673 TT |
139 | ctxt = list_entry(xprt->sc_ctxt_free.next, |
140 | struct svc_rdma_op_ctxt, | |
141 | free_list); | |
142 | list_del_init(&ctxt->free_list); | |
377f9b2f TT |
143 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
144 | ctxt->xprt = xprt; | |
145 | INIT_LIST_HEAD(&ctxt->dto_q); | |
146 | ctxt->count = 0; | |
87407673 | 147 | atomic_inc(&xprt->sc_ctxt_used); |
377f9b2f TT |
148 | break; |
149 | } | |
150 | return ctxt; | |
151 | } | |
152 | ||
153 | void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) | |
154 | { | |
155 | struct svcxprt_rdma *xprt; | |
156 | int i; | |
157 | ||
158 | BUG_ON(!ctxt); | |
159 | xprt = ctxt->xprt; | |
160 | if (free_pages) | |
161 | for (i = 0; i < ctxt->count; i++) | |
162 | put_page(ctxt->pages[i]); | |
163 | ||
164 | for (i = 0; i < ctxt->count; i++) | |
165 | dma_unmap_single(xprt->sc_cm_id->device->dma_device, | |
166 | ctxt->sge[i].addr, | |
167 | ctxt->sge[i].length, | |
168 | ctxt->direction); | |
87407673 | 169 | |
377f9b2f | 170 | spin_lock_bh(&xprt->sc_ctxt_lock); |
87407673 | 171 | list_add(&ctxt->free_list, &xprt->sc_ctxt_free); |
377f9b2f | 172 | spin_unlock_bh(&xprt->sc_ctxt_lock); |
87407673 | 173 | atomic_dec(&xprt->sc_ctxt_used); |
377f9b2f TT |
174 | } |
175 | ||
176 | /* ib_cq event handler */ | |
177 | static void cq_event_handler(struct ib_event *event, void *context) | |
178 | { | |
179 | struct svc_xprt *xprt = context; | |
180 | dprintk("svcrdma: received CQ event id=%d, context=%p\n", | |
181 | event->event, context); | |
182 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
183 | } | |
184 | ||
185 | /* QP event handler */ | |
186 | static void qp_event_handler(struct ib_event *event, void *context) | |
187 | { | |
188 | struct svc_xprt *xprt = context; | |
189 | ||
190 | switch (event->event) { | |
191 | /* These are considered benign events */ | |
192 | case IB_EVENT_PATH_MIG: | |
193 | case IB_EVENT_COMM_EST: | |
194 | case IB_EVENT_SQ_DRAINED: | |
195 | case IB_EVENT_QP_LAST_WQE_REACHED: | |
196 | dprintk("svcrdma: QP event %d received for QP=%p\n", | |
197 | event->event, event->element.qp); | |
198 | break; | |
199 | /* These are considered fatal events */ | |
200 | case IB_EVENT_PATH_MIG_ERR: | |
201 | case IB_EVENT_QP_FATAL: | |
202 | case IB_EVENT_QP_REQ_ERR: | |
203 | case IB_EVENT_QP_ACCESS_ERR: | |
204 | case IB_EVENT_DEVICE_FATAL: | |
205 | default: | |
206 | dprintk("svcrdma: QP ERROR event %d received for QP=%p, " | |
207 | "closing transport\n", | |
208 | event->event, event->element.qp); | |
209 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
210 | break; | |
211 | } | |
212 | } | |
213 | ||
214 | /* | |
215 | * Data Transfer Operation Tasklet | |
216 | * | |
217 | * Walks a list of transports with I/O pending, removing entries as | |
218 | * they are added to the server's I/O pending list. Two bits indicate | |
219 | * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave | |
220 | * spinlock that serializes access to the transport list with the RQ | |
221 | * and SQ interrupt handlers. | |
222 | */ | |
223 | static void dto_tasklet_func(unsigned long data) | |
224 | { | |
225 | struct svcxprt_rdma *xprt; | |
226 | unsigned long flags; | |
227 | ||
228 | spin_lock_irqsave(&dto_lock, flags); | |
229 | while (!list_empty(&dto_xprt_q)) { | |
230 | xprt = list_entry(dto_xprt_q.next, | |
231 | struct svcxprt_rdma, sc_dto_q); | |
232 | list_del_init(&xprt->sc_dto_q); | |
233 | spin_unlock_irqrestore(&dto_lock, flags); | |
234 | ||
dbcd00eb TT |
235 | rq_cq_reap(xprt); |
236 | sq_cq_reap(xprt); | |
377f9b2f | 237 | |
c48cbb40 | 238 | svc_xprt_put(&xprt->sc_xprt); |
377f9b2f TT |
239 | spin_lock_irqsave(&dto_lock, flags); |
240 | } | |
241 | spin_unlock_irqrestore(&dto_lock, flags); | |
242 | } | |
243 | ||
244 | /* | |
245 | * Receive Queue Completion Handler | |
246 | * | |
247 | * Since an RQ completion handler is called on interrupt context, we | |
248 | * need to defer the handling of the I/O to a tasklet | |
249 | */ | |
250 | static void rq_comp_handler(struct ib_cq *cq, void *cq_context) | |
251 | { | |
252 | struct svcxprt_rdma *xprt = cq_context; | |
253 | unsigned long flags; | |
254 | ||
255 | /* | |
256 | * Set the bit regardless of whether or not it's on the list | |
257 | * because it may be on the list already due to an SQ | |
258 | * completion. | |
259 | */ | |
260 | set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags); | |
261 | ||
262 | /* | |
263 | * If this transport is not already on the DTO transport queue, | |
264 | * add it | |
265 | */ | |
266 | spin_lock_irqsave(&dto_lock, flags); | |
c48cbb40 TT |
267 | if (list_empty(&xprt->sc_dto_q)) { |
268 | svc_xprt_get(&xprt->sc_xprt); | |
377f9b2f | 269 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
c48cbb40 | 270 | } |
377f9b2f TT |
271 | spin_unlock_irqrestore(&dto_lock, flags); |
272 | ||
273 | /* Tasklet does all the work to avoid irqsave locks. */ | |
274 | tasklet_schedule(&dto_tasklet); | |
275 | } | |
276 | ||
277 | /* | |
278 | * rq_cq_reap - Process the RQ CQ. | |
279 | * | |
280 | * Take all completing WC off the CQE and enqueue the associated DTO | |
281 | * context on the dto_q for the transport. | |
282 | */ | |
283 | static void rq_cq_reap(struct svcxprt_rdma *xprt) | |
284 | { | |
285 | int ret; | |
286 | struct ib_wc wc; | |
287 | struct svc_rdma_op_ctxt *ctxt = NULL; | |
288 | ||
dbcd00eb TT |
289 | if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) |
290 | return; | |
291 | ||
292 | ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP); | |
377f9b2f TT |
293 | atomic_inc(&rdma_stat_rq_poll); |
294 | ||
377f9b2f TT |
295 | while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) { |
296 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; | |
297 | ctxt->wc_status = wc.status; | |
298 | ctxt->byte_len = wc.byte_len; | |
299 | if (wc.status != IB_WC_SUCCESS) { | |
300 | /* Close the transport */ | |
301 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
302 | svc_rdma_put_context(ctxt, 1); | |
303 | continue; | |
304 | } | |
47698e08 | 305 | spin_lock_bh(&xprt->sc_rq_dto_lock); |
377f9b2f | 306 | list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); |
47698e08 | 307 | spin_unlock_bh(&xprt->sc_rq_dto_lock); |
377f9b2f | 308 | } |
377f9b2f TT |
309 | |
310 | if (ctxt) | |
311 | atomic_inc(&rdma_stat_rq_prod); | |
dbcd00eb TT |
312 | |
313 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); | |
314 | /* | |
315 | * If data arrived before established event, | |
316 | * don't enqueue. This defers RPC I/O until the | |
317 | * RDMA connection is complete. | |
318 | */ | |
319 | if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) | |
320 | svc_xprt_enqueue(&xprt->sc_xprt); | |
377f9b2f TT |
321 | } |
322 | ||
323 | /* | |
324 | * Send Queue Completion Handler - potentially called on interrupt context. | |
325 | */ | |
326 | static void sq_cq_reap(struct svcxprt_rdma *xprt) | |
327 | { | |
328 | struct svc_rdma_op_ctxt *ctxt = NULL; | |
329 | struct ib_wc wc; | |
330 | struct ib_cq *cq = xprt->sc_sq_cq; | |
331 | int ret; | |
332 | ||
dbcd00eb TT |
333 | |
334 | if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) | |
335 | return; | |
336 | ||
337 | ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP); | |
377f9b2f TT |
338 | atomic_inc(&rdma_stat_sq_poll); |
339 | while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { | |
340 | ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; | |
341 | xprt = ctxt->xprt; | |
342 | ||
343 | if (wc.status != IB_WC_SUCCESS) | |
344 | /* Close the transport */ | |
345 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
346 | ||
347 | /* Decrement used SQ WR count */ | |
348 | atomic_dec(&xprt->sc_sq_count); | |
349 | wake_up(&xprt->sc_send_wait); | |
350 | ||
351 | switch (ctxt->wr_op) { | |
352 | case IB_WR_SEND: | |
353 | case IB_WR_RDMA_WRITE: | |
354 | svc_rdma_put_context(ctxt, 1); | |
355 | break; | |
356 | ||
357 | case IB_WR_RDMA_READ: | |
358 | if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) { | |
02e7452d TT |
359 | struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr; |
360 | BUG_ON(!read_hdr); | |
377f9b2f | 361 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); |
377f9b2f | 362 | spin_lock_bh(&xprt->sc_read_complete_lock); |
02e7452d | 363 | list_add_tail(&read_hdr->dto_q, |
377f9b2f TT |
364 | &xprt->sc_read_complete_q); |
365 | spin_unlock_bh(&xprt->sc_read_complete_lock); | |
366 | svc_xprt_enqueue(&xprt->sc_xprt); | |
367 | } | |
02e7452d | 368 | svc_rdma_put_context(ctxt, 0); |
377f9b2f TT |
369 | break; |
370 | ||
371 | default: | |
372 | printk(KERN_ERR "svcrdma: unexpected completion type, " | |
373 | "opcode=%d, status=%d\n", | |
374 | wc.opcode, wc.status); | |
375 | break; | |
376 | } | |
377 | } | |
378 | ||
379 | if (ctxt) | |
380 | atomic_inc(&rdma_stat_sq_prod); | |
381 | } | |
382 | ||
383 | static void sq_comp_handler(struct ib_cq *cq, void *cq_context) | |
384 | { | |
385 | struct svcxprt_rdma *xprt = cq_context; | |
386 | unsigned long flags; | |
387 | ||
388 | /* | |
389 | * Set the bit regardless of whether or not it's on the list | |
390 | * because it may be on the list already due to an RQ | |
391 | * completion. | |
392 | */ | |
393 | set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags); | |
394 | ||
395 | /* | |
396 | * If this transport is not already on the DTO transport queue, | |
397 | * add it | |
398 | */ | |
399 | spin_lock_irqsave(&dto_lock, flags); | |
c48cbb40 TT |
400 | if (list_empty(&xprt->sc_dto_q)) { |
401 | svc_xprt_get(&xprt->sc_xprt); | |
377f9b2f | 402 | list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); |
c48cbb40 | 403 | } |
377f9b2f TT |
404 | spin_unlock_irqrestore(&dto_lock, flags); |
405 | ||
406 | /* Tasklet does all the work to avoid irqsave locks. */ | |
407 | tasklet_schedule(&dto_tasklet); | |
408 | } | |
409 | ||
410 | static void create_context_cache(struct svcxprt_rdma *xprt, | |
411 | int ctxt_count, int ctxt_bump, int ctxt_max) | |
412 | { | |
413 | struct svc_rdma_op_ctxt *ctxt; | |
414 | int i; | |
415 | ||
416 | xprt->sc_ctxt_max = ctxt_max; | |
417 | xprt->sc_ctxt_bump = ctxt_bump; | |
418 | xprt->sc_ctxt_cnt = 0; | |
87407673 TT |
419 | atomic_set(&xprt->sc_ctxt_used, 0); |
420 | ||
421 | INIT_LIST_HEAD(&xprt->sc_ctxt_free); | |
377f9b2f TT |
422 | for (i = 0; i < ctxt_count; i++) { |
423 | ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); | |
424 | if (ctxt) { | |
87407673 TT |
425 | INIT_LIST_HEAD(&ctxt->free_list); |
426 | list_add(&ctxt->free_list, &xprt->sc_ctxt_free); | |
377f9b2f TT |
427 | xprt->sc_ctxt_cnt++; |
428 | } | |
429 | } | |
430 | } | |
431 | ||
87407673 | 432 | static void destroy_context_cache(struct svcxprt_rdma *xprt) |
377f9b2f | 433 | { |
87407673 TT |
434 | while (!list_empty(&xprt->sc_ctxt_free)) { |
435 | struct svc_rdma_op_ctxt *ctxt; | |
436 | ctxt = list_entry(xprt->sc_ctxt_free.next, | |
437 | struct svc_rdma_op_ctxt, | |
438 | free_list); | |
439 | list_del_init(&ctxt->free_list); | |
377f9b2f | 440 | kfree(ctxt); |
87407673 | 441 | } |
377f9b2f TT |
442 | } |
443 | ||
444 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, | |
445 | int listener) | |
446 | { | |
447 | struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); | |
448 | ||
449 | if (!cma_xprt) | |
450 | return NULL; | |
451 | svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv); | |
452 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); | |
453 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); | |
454 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); | |
455 | INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); | |
456 | init_waitqueue_head(&cma_xprt->sc_send_wait); | |
457 | ||
458 | spin_lock_init(&cma_xprt->sc_lock); | |
459 | spin_lock_init(&cma_xprt->sc_read_complete_lock); | |
460 | spin_lock_init(&cma_xprt->sc_ctxt_lock); | |
461 | spin_lock_init(&cma_xprt->sc_rq_dto_lock); | |
462 | ||
463 | cma_xprt->sc_ord = svcrdma_ord; | |
464 | ||
465 | cma_xprt->sc_max_req_size = svcrdma_max_req_size; | |
466 | cma_xprt->sc_max_requests = svcrdma_max_requests; | |
467 | cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT; | |
468 | atomic_set(&cma_xprt->sc_sq_count, 0); | |
469 | ||
470 | if (!listener) { | |
471 | int reqs = cma_xprt->sc_max_requests; | |
472 | create_context_cache(cma_xprt, | |
473 | reqs << 1, /* starting size */ | |
474 | reqs, /* bump amount */ | |
475 | reqs + | |
476 | cma_xprt->sc_sq_depth + | |
477 | RPCRDMA_MAX_THREADS + 1); /* max */ | |
87407673 | 478 | if (list_empty(&cma_xprt->sc_ctxt_free)) { |
377f9b2f TT |
479 | kfree(cma_xprt); |
480 | return NULL; | |
481 | } | |
482 | clear_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); | |
483 | } else | |
484 | set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); | |
485 | ||
486 | return cma_xprt; | |
487 | } | |
488 | ||
489 | struct page *svc_rdma_get_page(void) | |
490 | { | |
491 | struct page *page; | |
492 | ||
493 | while ((page = alloc_page(GFP_KERNEL)) == NULL) { | |
494 | /* If we can't get memory, wait a bit and try again */ | |
495 | printk(KERN_INFO "svcrdma: out of memory...retrying in 1000 " | |
496 | "jiffies.\n"); | |
497 | schedule_timeout_uninterruptible(msecs_to_jiffies(1000)); | |
498 | } | |
499 | return page; | |
500 | } | |
501 | ||
502 | int svc_rdma_post_recv(struct svcxprt_rdma *xprt) | |
503 | { | |
504 | struct ib_recv_wr recv_wr, *bad_recv_wr; | |
505 | struct svc_rdma_op_ctxt *ctxt; | |
506 | struct page *page; | |
507 | unsigned long pa; | |
508 | int sge_no; | |
509 | int buflen; | |
510 | int ret; | |
511 | ||
512 | ctxt = svc_rdma_get_context(xprt); | |
513 | buflen = 0; | |
514 | ctxt->direction = DMA_FROM_DEVICE; | |
515 | for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) { | |
516 | BUG_ON(sge_no >= xprt->sc_max_sge); | |
517 | page = svc_rdma_get_page(); | |
518 | ctxt->pages[sge_no] = page; | |
519 | pa = ib_dma_map_page(xprt->sc_cm_id->device, | |
520 | page, 0, PAGE_SIZE, | |
521 | DMA_FROM_DEVICE); | |
522 | ctxt->sge[sge_no].addr = pa; | |
523 | ctxt->sge[sge_no].length = PAGE_SIZE; | |
524 | ctxt->sge[sge_no].lkey = xprt->sc_phys_mr->lkey; | |
525 | buflen += PAGE_SIZE; | |
526 | } | |
527 | ctxt->count = sge_no; | |
528 | recv_wr.next = NULL; | |
529 | recv_wr.sg_list = &ctxt->sge[0]; | |
530 | recv_wr.num_sge = ctxt->count; | |
531 | recv_wr.wr_id = (u64)(unsigned long)ctxt; | |
532 | ||
533 | ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr); | |
05a0826a TT |
534 | if (ret) |
535 | svc_rdma_put_context(ctxt, 1); | |
377f9b2f TT |
536 | return ret; |
537 | } | |
538 | ||
539 | /* | |
540 | * This function handles the CONNECT_REQUEST event on a listening | |
541 | * endpoint. It is passed the cma_id for the _new_ connection. The context in | |
542 | * this cma_id is inherited from the listening cma_id and is the svc_xprt | |
543 | * structure for the listening endpoint. | |
544 | * | |
545 | * This function creates a new xprt for the new connection and enqueues it on | |
546 | * the accept queue for the listent xprt. When the listen thread is kicked, it | |
547 | * will call the recvfrom method on the listen xprt which will accept the new | |
548 | * connection. | |
549 | */ | |
550 | static void handle_connect_req(struct rdma_cm_id *new_cma_id) | |
551 | { | |
552 | struct svcxprt_rdma *listen_xprt = new_cma_id->context; | |
553 | struct svcxprt_rdma *newxprt; | |
554 | ||
555 | /* Create a new transport */ | |
556 | newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0); | |
557 | if (!newxprt) { | |
558 | dprintk("svcrdma: failed to create new transport\n"); | |
559 | return; | |
560 | } | |
561 | newxprt->sc_cm_id = new_cma_id; | |
562 | new_cma_id->context = newxprt; | |
563 | dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", | |
564 | newxprt, newxprt->sc_cm_id, listen_xprt); | |
565 | ||
566 | /* | |
567 | * Enqueue the new transport on the accept queue of the listening | |
568 | * transport | |
569 | */ | |
570 | spin_lock_bh(&listen_xprt->sc_lock); | |
571 | list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q); | |
572 | spin_unlock_bh(&listen_xprt->sc_lock); | |
573 | ||
574 | /* | |
575 | * Can't use svc_xprt_received here because we are not on a | |
576 | * rqstp thread | |
577 | */ | |
578 | set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags); | |
579 | svc_xprt_enqueue(&listen_xprt->sc_xprt); | |
580 | } | |
581 | ||
582 | /* | |
583 | * Handles events generated on the listening endpoint. These events will be | |
584 | * either be incoming connect requests or adapter removal events. | |
585 | */ | |
586 | static int rdma_listen_handler(struct rdma_cm_id *cma_id, | |
587 | struct rdma_cm_event *event) | |
588 | { | |
589 | struct svcxprt_rdma *xprt = cma_id->context; | |
590 | int ret = 0; | |
591 | ||
592 | switch (event->event) { | |
593 | case RDMA_CM_EVENT_CONNECT_REQUEST: | |
594 | dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " | |
595 | "event=%d\n", cma_id, cma_id->context, event->event); | |
596 | handle_connect_req(cma_id); | |
597 | break; | |
598 | ||
599 | case RDMA_CM_EVENT_ESTABLISHED: | |
600 | /* Accept complete */ | |
601 | dprintk("svcrdma: Connection completed on LISTEN xprt=%p, " | |
602 | "cm_id=%p\n", xprt, cma_id); | |
603 | break; | |
604 | ||
605 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
606 | dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", | |
607 | xprt, cma_id); | |
608 | if (xprt) | |
609 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | |
610 | break; | |
611 | ||
612 | default: | |
613 | dprintk("svcrdma: Unexpected event on listening endpoint %p, " | |
614 | "event=%d\n", cma_id, event->event); | |
615 | break; | |
616 | } | |
617 | ||
618 | return ret; | |
619 | } | |
620 | ||
621 | static int rdma_cma_handler(struct rdma_cm_id *cma_id, | |
622 | struct rdma_cm_event *event) | |
623 | { | |
624 | struct svc_xprt *xprt = cma_id->context; | |
625 | struct svcxprt_rdma *rdma = | |
626 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
627 | switch (event->event) { | |
628 | case RDMA_CM_EVENT_ESTABLISHED: | |
629 | /* Accept complete */ | |
c48cbb40 | 630 | svc_xprt_get(xprt); |
377f9b2f TT |
631 | dprintk("svcrdma: Connection completed on DTO xprt=%p, " |
632 | "cm_id=%p\n", xprt, cma_id); | |
633 | clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); | |
634 | svc_xprt_enqueue(xprt); | |
635 | break; | |
636 | case RDMA_CM_EVENT_DISCONNECTED: | |
637 | dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n", | |
638 | xprt, cma_id); | |
639 | if (xprt) { | |
640 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
641 | svc_xprt_enqueue(xprt); | |
120693d1 | 642 | svc_xprt_put(xprt); |
377f9b2f TT |
643 | } |
644 | break; | |
645 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
646 | dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, " | |
647 | "event=%d\n", cma_id, xprt, event->event); | |
648 | if (xprt) { | |
649 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | |
650 | svc_xprt_enqueue(xprt); | |
651 | } | |
652 | break; | |
653 | default: | |
654 | dprintk("svcrdma: Unexpected event on DTO endpoint %p, " | |
655 | "event=%d\n", cma_id, event->event); | |
656 | break; | |
657 | } | |
658 | return 0; | |
659 | } | |
660 | ||
661 | /* | |
662 | * Create a listening RDMA service endpoint. | |
663 | */ | |
664 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |
665 | struct sockaddr *sa, int salen, | |
666 | int flags) | |
667 | { | |
668 | struct rdma_cm_id *listen_id; | |
669 | struct svcxprt_rdma *cma_xprt; | |
670 | struct svc_xprt *xprt; | |
671 | int ret; | |
672 | ||
673 | dprintk("svcrdma: Creating RDMA socket\n"); | |
674 | ||
675 | cma_xprt = rdma_create_xprt(serv, 1); | |
676 | if (!cma_xprt) | |
58e8f621 | 677 | return ERR_PTR(-ENOMEM); |
377f9b2f TT |
678 | xprt = &cma_xprt->sc_xprt; |
679 | ||
680 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); | |
681 | if (IS_ERR(listen_id)) { | |
58e8f621 TT |
682 | ret = PTR_ERR(listen_id); |
683 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); | |
684 | goto err0; | |
377f9b2f | 685 | } |
58e8f621 | 686 | |
377f9b2f TT |
687 | ret = rdma_bind_addr(listen_id, sa); |
688 | if (ret) { | |
377f9b2f | 689 | dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); |
58e8f621 | 690 | goto err1; |
377f9b2f TT |
691 | } |
692 | cma_xprt->sc_cm_id = listen_id; | |
693 | ||
694 | ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); | |
695 | if (ret) { | |
377f9b2f | 696 | dprintk("svcrdma: rdma_listen failed = %d\n", ret); |
58e8f621 | 697 | goto err1; |
377f9b2f TT |
698 | } |
699 | ||
700 | /* | |
701 | * We need to use the address from the cm_id in case the | |
702 | * caller specified 0 for the port number. | |
703 | */ | |
704 | sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; | |
705 | svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen); | |
706 | ||
707 | return &cma_xprt->sc_xprt; | |
58e8f621 TT |
708 | |
709 | err1: | |
710 | rdma_destroy_id(listen_id); | |
711 | err0: | |
712 | kfree(cma_xprt); | |
713 | return ERR_PTR(ret); | |
377f9b2f TT |
714 | } |
715 | ||
716 | /* | |
717 | * This is the xpo_recvfrom function for listening endpoints. Its | |
718 | * purpose is to accept incoming connections. The CMA callback handler | |
719 | * has already created a new transport and attached it to the new CMA | |
720 | * ID. | |
721 | * | |
722 | * There is a queue of pending connections hung on the listening | |
723 | * transport. This queue contains the new svc_xprt structure. This | |
724 | * function takes svc_xprt structures off the accept_q and completes | |
725 | * the connection. | |
726 | */ | |
727 | static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |
728 | { | |
729 | struct svcxprt_rdma *listen_rdma; | |
730 | struct svcxprt_rdma *newxprt = NULL; | |
731 | struct rdma_conn_param conn_param; | |
732 | struct ib_qp_init_attr qp_attr; | |
733 | struct ib_device_attr devattr; | |
734 | struct sockaddr *sa; | |
735 | int ret; | |
736 | int i; | |
737 | ||
738 | listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
739 | clear_bit(XPT_CONN, &xprt->xpt_flags); | |
740 | /* Get the next entry off the accept list */ | |
741 | spin_lock_bh(&listen_rdma->sc_lock); | |
742 | if (!list_empty(&listen_rdma->sc_accept_q)) { | |
743 | newxprt = list_entry(listen_rdma->sc_accept_q.next, | |
744 | struct svcxprt_rdma, sc_accept_q); | |
745 | list_del_init(&newxprt->sc_accept_q); | |
746 | } | |
747 | if (!list_empty(&listen_rdma->sc_accept_q)) | |
748 | set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags); | |
749 | spin_unlock_bh(&listen_rdma->sc_lock); | |
750 | if (!newxprt) | |
751 | return NULL; | |
752 | ||
753 | dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n", | |
754 | newxprt, newxprt->sc_cm_id); | |
755 | ||
756 | ret = ib_query_device(newxprt->sc_cm_id->device, &devattr); | |
757 | if (ret) { | |
758 | dprintk("svcrdma: could not query device attributes on " | |
759 | "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret); | |
760 | goto errout; | |
761 | } | |
762 | ||
763 | /* Qualify the transport resource defaults with the | |
764 | * capabilities of this particular device */ | |
765 | newxprt->sc_max_sge = min((size_t)devattr.max_sge, | |
766 | (size_t)RPCSVC_MAXPAGES); | |
767 | newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr, | |
768 | (size_t)svcrdma_max_requests); | |
769 | newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests; | |
770 | ||
771 | newxprt->sc_ord = min((size_t)devattr.max_qp_rd_atom, | |
772 | (size_t)svcrdma_ord); | |
773 | ||
774 | newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device); | |
775 | if (IS_ERR(newxprt->sc_pd)) { | |
776 | dprintk("svcrdma: error creating PD for connect request\n"); | |
777 | goto errout; | |
778 | } | |
779 | newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, | |
780 | sq_comp_handler, | |
781 | cq_event_handler, | |
782 | newxprt, | |
783 | newxprt->sc_sq_depth, | |
784 | 0); | |
785 | if (IS_ERR(newxprt->sc_sq_cq)) { | |
786 | dprintk("svcrdma: error creating SQ CQ for connect request\n"); | |
787 | goto errout; | |
788 | } | |
789 | newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, | |
790 | rq_comp_handler, | |
791 | cq_event_handler, | |
792 | newxprt, | |
793 | newxprt->sc_max_requests, | |
794 | 0); | |
795 | if (IS_ERR(newxprt->sc_rq_cq)) { | |
796 | dprintk("svcrdma: error creating RQ CQ for connect request\n"); | |
797 | goto errout; | |
798 | } | |
799 | ||
800 | memset(&qp_attr, 0, sizeof qp_attr); | |
801 | qp_attr.event_handler = qp_event_handler; | |
802 | qp_attr.qp_context = &newxprt->sc_xprt; | |
803 | qp_attr.cap.max_send_wr = newxprt->sc_sq_depth; | |
804 | qp_attr.cap.max_recv_wr = newxprt->sc_max_requests; | |
805 | qp_attr.cap.max_send_sge = newxprt->sc_max_sge; | |
806 | qp_attr.cap.max_recv_sge = newxprt->sc_max_sge; | |
807 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
808 | qp_attr.qp_type = IB_QPT_RC; | |
809 | qp_attr.send_cq = newxprt->sc_sq_cq; | |
810 | qp_attr.recv_cq = newxprt->sc_rq_cq; | |
811 | dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n" | |
812 | " cm_id->device=%p, sc_pd->device=%p\n" | |
813 | " cap.max_send_wr = %d\n" | |
814 | " cap.max_recv_wr = %d\n" | |
815 | " cap.max_send_sge = %d\n" | |
816 | " cap.max_recv_sge = %d\n", | |
817 | newxprt->sc_cm_id, newxprt->sc_pd, | |
818 | newxprt->sc_cm_id->device, newxprt->sc_pd->device, | |
819 | qp_attr.cap.max_send_wr, | |
820 | qp_attr.cap.max_recv_wr, | |
821 | qp_attr.cap.max_send_sge, | |
822 | qp_attr.cap.max_recv_sge); | |
823 | ||
824 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr); | |
825 | if (ret) { | |
826 | /* | |
827 | * XXX: This is a hack. We need a xx_request_qp interface | |
828 | * that will adjust the qp_attr's with a best-effort | |
829 | * number | |
830 | */ | |
831 | qp_attr.cap.max_send_sge -= 2; | |
832 | qp_attr.cap.max_recv_sge -= 2; | |
833 | ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, | |
834 | &qp_attr); | |
835 | if (ret) { | |
836 | dprintk("svcrdma: failed to create QP, ret=%d\n", ret); | |
837 | goto errout; | |
838 | } | |
839 | newxprt->sc_max_sge = qp_attr.cap.max_send_sge; | |
840 | newxprt->sc_max_sge = qp_attr.cap.max_recv_sge; | |
841 | newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; | |
842 | newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; | |
843 | } | |
c48cbb40 | 844 | svc_xprt_get(&newxprt->sc_xprt); |
377f9b2f TT |
845 | newxprt->sc_qp = newxprt->sc_cm_id->qp; |
846 | ||
847 | /* Register all of physical memory */ | |
848 | newxprt->sc_phys_mr = ib_get_dma_mr(newxprt->sc_pd, | |
849 | IB_ACCESS_LOCAL_WRITE | | |
850 | IB_ACCESS_REMOTE_WRITE); | |
851 | if (IS_ERR(newxprt->sc_phys_mr)) { | |
852 | dprintk("svcrdma: Failed to create DMA MR ret=%d\n", ret); | |
853 | goto errout; | |
854 | } | |
855 | ||
856 | /* Post receive buffers */ | |
857 | for (i = 0; i < newxprt->sc_max_requests; i++) { | |
858 | ret = svc_rdma_post_recv(newxprt); | |
859 | if (ret) { | |
860 | dprintk("svcrdma: failure posting receive buffers\n"); | |
861 | goto errout; | |
862 | } | |
863 | } | |
864 | ||
865 | /* Swap out the handler */ | |
866 | newxprt->sc_cm_id->event_handler = rdma_cma_handler; | |
867 | ||
868 | /* Accept Connection */ | |
869 | set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); | |
870 | memset(&conn_param, 0, sizeof conn_param); | |
871 | conn_param.responder_resources = 0; | |
872 | conn_param.initiator_depth = newxprt->sc_ord; | |
873 | ret = rdma_accept(newxprt->sc_cm_id, &conn_param); | |
874 | if (ret) { | |
875 | dprintk("svcrdma: failed to accept new connection, ret=%d\n", | |
876 | ret); | |
877 | goto errout; | |
878 | } | |
879 | ||
880 | dprintk("svcrdma: new connection %p accepted with the following " | |
881 | "attributes:\n" | |
882 | " local_ip : %d.%d.%d.%d\n" | |
883 | " local_port : %d\n" | |
884 | " remote_ip : %d.%d.%d.%d\n" | |
885 | " remote_port : %d\n" | |
886 | " max_sge : %d\n" | |
887 | " sq_depth : %d\n" | |
888 | " max_requests : %d\n" | |
889 | " ord : %d\n", | |
890 | newxprt, | |
891 | NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id-> | |
892 | route.addr.src_addr)->sin_addr.s_addr), | |
893 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> | |
894 | route.addr.src_addr)->sin_port), | |
895 | NIPQUAD(((struct sockaddr_in *)&newxprt->sc_cm_id-> | |
896 | route.addr.dst_addr)->sin_addr.s_addr), | |
897 | ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id-> | |
898 | route.addr.dst_addr)->sin_port), | |
899 | newxprt->sc_max_sge, | |
900 | newxprt->sc_sq_depth, | |
901 | newxprt->sc_max_requests, | |
902 | newxprt->sc_ord); | |
903 | ||
904 | /* Set the local and remote addresses in the transport */ | |
905 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; | |
906 | svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa)); | |
907 | sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; | |
908 | svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa)); | |
909 | ||
910 | ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP); | |
911 | ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP); | |
912 | return &newxprt->sc_xprt; | |
913 | ||
914 | errout: | |
915 | dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); | |
c48cbb40 TT |
916 | /* Take a reference in case the DTO handler runs */ |
917 | svc_xprt_get(&newxprt->sc_xprt); | |
918 | if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) { | |
919 | ib_destroy_qp(newxprt->sc_qp); | |
920 | svc_xprt_put(&newxprt->sc_xprt); | |
921 | } | |
377f9b2f | 922 | rdma_destroy_id(newxprt->sc_cm_id); |
c48cbb40 TT |
923 | /* This call to put will destroy the transport */ |
924 | svc_xprt_put(&newxprt->sc_xprt); | |
377f9b2f TT |
925 | return NULL; |
926 | } | |
927 | ||
377f9b2f TT |
928 | static void svc_rdma_release_rqst(struct svc_rqst *rqstp) |
929 | { | |
377f9b2f TT |
930 | } |
931 | ||
c48cbb40 TT |
932 | /* |
933 | * When connected, an svc_xprt has at least three references: | |
934 | * | |
935 | * - A reference held by the QP. We still hold that here because this | |
936 | * code deletes the QP and puts the reference. | |
937 | * | |
938 | * - A reference held by the cm_id between the ESTABLISHED and | |
939 | * DISCONNECTED events. If the remote peer disconnected first, this | |
940 | * reference could be gone. | |
941 | * | |
942 | * - A reference held by the svc_recv code that called this function | |
943 | * as part of close processing. | |
944 | * | |
945 | * At a minimum two references should still be held. | |
946 | */ | |
377f9b2f TT |
947 | static void svc_rdma_detach(struct svc_xprt *xprt) |
948 | { | |
949 | struct svcxprt_rdma *rdma = | |
950 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
377f9b2f | 951 | dprintk("svc: svc_rdma_detach(%p)\n", xprt); |
c48cbb40 TT |
952 | |
953 | /* Disconnect and flush posted WQE */ | |
377f9b2f | 954 | rdma_disconnect(rdma->sc_cm_id); |
377f9b2f | 955 | |
c48cbb40 TT |
956 | /* Destroy the QP if present (not a listener) */ |
957 | if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) { | |
958 | ib_destroy_qp(rdma->sc_qp); | |
959 | svc_xprt_put(xprt); | |
960 | } | |
961 | ||
962 | /* Destroy the CM ID */ | |
963 | rdma_destroy_id(rdma->sc_cm_id); | |
377f9b2f TT |
964 | } |
965 | ||
8da91ea8 | 966 | static void __svc_rdma_free(struct work_struct *work) |
377f9b2f | 967 | { |
8da91ea8 TT |
968 | struct svcxprt_rdma *rdma = |
969 | container_of(work, struct svcxprt_rdma, sc_work); | |
377f9b2f | 970 | dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); |
8da91ea8 | 971 | |
c48cbb40 | 972 | /* We should only be called from kref_put */ |
8da91ea8 TT |
973 | BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0); |
974 | ||
c48cbb40 TT |
975 | if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) |
976 | ib_destroy_cq(rdma->sc_sq_cq); | |
377f9b2f | 977 | |
c48cbb40 TT |
978 | if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) |
979 | ib_destroy_cq(rdma->sc_rq_cq); | |
377f9b2f | 980 | |
c48cbb40 TT |
981 | if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) |
982 | ib_dereg_mr(rdma->sc_phys_mr); | |
377f9b2f | 983 | |
c48cbb40 TT |
984 | if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) |
985 | ib_dealloc_pd(rdma->sc_pd); | |
377f9b2f | 986 | |
87407673 | 987 | destroy_context_cache(rdma); |
c48cbb40 | 988 | kfree(rdma); |
377f9b2f TT |
989 | } |
990 | ||
8da91ea8 TT |
991 | static void svc_rdma_free(struct svc_xprt *xprt) |
992 | { | |
993 | struct svcxprt_rdma *rdma = | |
994 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
995 | INIT_WORK(&rdma->sc_work, __svc_rdma_free); | |
996 | schedule_work(&rdma->sc_work); | |
997 | } | |
998 | ||
377f9b2f TT |
999 | static int svc_rdma_has_wspace(struct svc_xprt *xprt) |
1000 | { | |
1001 | struct svcxprt_rdma *rdma = | |
1002 | container_of(xprt, struct svcxprt_rdma, sc_xprt); | |
1003 | ||
1004 | /* | |
1005 | * If there are fewer SQ WR available than required to send a | |
1006 | * simple response, return false. | |
1007 | */ | |
1008 | if ((rdma->sc_sq_depth - atomic_read(&rdma->sc_sq_count) < 3)) | |
1009 | return 0; | |
1010 | ||
1011 | /* | |
1012 | * ...or there are already waiters on the SQ, | |
1013 | * return false. | |
1014 | */ | |
1015 | if (waitqueue_active(&rdma->sc_send_wait)) | |
1016 | return 0; | |
1017 | ||
1018 | /* Otherwise return true. */ | |
1019 | return 1; | |
1020 | } | |
1021 | ||
1022 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | |
1023 | { | |
1024 | struct ib_send_wr *bad_wr; | |
1025 | int ret; | |
1026 | ||
1027 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) | |
9d6347ac | 1028 | return -ENOTCONN; |
377f9b2f TT |
1029 | |
1030 | BUG_ON(wr->send_flags != IB_SEND_SIGNALED); | |
1031 | BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op != | |
1032 | wr->opcode); | |
1033 | /* If the SQ is full, wait until an SQ entry is available */ | |
1034 | while (1) { | |
1035 | spin_lock_bh(&xprt->sc_lock); | |
1036 | if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) { | |
1037 | spin_unlock_bh(&xprt->sc_lock); | |
1038 | atomic_inc(&rdma_stat_sq_starve); | |
dbcd00eb TT |
1039 | |
1040 | /* See if we can opportunistically reap SQ WR to make room */ | |
377f9b2f TT |
1041 | sq_cq_reap(xprt); |
1042 | ||
1043 | /* Wait until SQ WR available if SQ still full */ | |
1044 | wait_event(xprt->sc_send_wait, | |
1045 | atomic_read(&xprt->sc_sq_count) < | |
1046 | xprt->sc_sq_depth); | |
830bb59b TT |
1047 | if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags)) |
1048 | return 0; | |
377f9b2f TT |
1049 | continue; |
1050 | } | |
1051 | /* Bumped used SQ WR count and post */ | |
1052 | ret = ib_post_send(xprt->sc_qp, wr, &bad_wr); | |
1053 | if (!ret) | |
1054 | atomic_inc(&xprt->sc_sq_count); | |
1055 | else | |
1056 | dprintk("svcrdma: failed to post SQ WR rc=%d, " | |
1057 | "sc_sq_count=%d, sc_sq_depth=%d\n", | |
1058 | ret, atomic_read(&xprt->sc_sq_count), | |
1059 | xprt->sc_sq_depth); | |
1060 | spin_unlock_bh(&xprt->sc_lock); | |
1061 | break; | |
1062 | } | |
1063 | return ret; | |
1064 | } | |
1065 | ||
1066 | int svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |
1067 | enum rpcrdma_errcode err) | |
1068 | { | |
1069 | struct ib_send_wr err_wr; | |
1070 | struct ib_sge sge; | |
1071 | struct page *p; | |
1072 | struct svc_rdma_op_ctxt *ctxt; | |
1073 | u32 *va; | |
1074 | int length; | |
1075 | int ret; | |
1076 | ||
1077 | p = svc_rdma_get_page(); | |
1078 | va = page_address(p); | |
1079 | ||
1080 | /* XDR encode error */ | |
1081 | length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); | |
1082 | ||
1083 | /* Prepare SGE for local address */ | |
1084 | sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, | |
1085 | p, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
1086 | sge.lkey = xprt->sc_phys_mr->lkey; | |
1087 | sge.length = length; | |
1088 | ||
1089 | ctxt = svc_rdma_get_context(xprt); | |
1090 | ctxt->count = 1; | |
1091 | ctxt->pages[0] = p; | |
1092 | ||
1093 | /* Prepare SEND WR */ | |
1094 | memset(&err_wr, 0, sizeof err_wr); | |
1095 | ctxt->wr_op = IB_WR_SEND; | |
1096 | err_wr.wr_id = (unsigned long)ctxt; | |
1097 | err_wr.sg_list = &sge; | |
1098 | err_wr.num_sge = 1; | |
1099 | err_wr.opcode = IB_WR_SEND; | |
1100 | err_wr.send_flags = IB_SEND_SIGNALED; | |
1101 | ||
1102 | /* Post It */ | |
1103 | ret = svc_rdma_send(xprt, &err_wr); | |
1104 | if (ret) { | |
1105 | dprintk("svcrdma: Error posting send = %d\n", ret); | |
1106 | svc_rdma_put_context(ctxt, 1); | |
1107 | } | |
1108 | ||
1109 | return ret; | |
1110 | } |