sunrpc: move address copy/cmp/convert routines and prototypes from clnt.h to addr.h
[linux-block.git] / net / sunrpc / xprtrdma / transport.c
CommitLineData
f58851e6
TT
1/*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * transport.c
42 *
43 * This file contains the top-level implementation of an RPC RDMA
44 * transport.
45 *
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
48 */
49
50#include <linux/module.h>
51#include <linux/init.h>
5a0e3ad6 52#include <linux/slab.h>
f58851e6 53#include <linux/seq_file.h>
5976687a 54#include <linux/sunrpc/addr.h>
f58851e6
TT
55
56#include "xprt_rdma.h"
57
58#ifdef RPC_DEBUG
59# define RPCDBG_FACILITY RPCDBG_TRANS
60#endif
61
62MODULE_LICENSE("Dual BSD/GPL");
63
64MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
65MODULE_AUTHOR("Network Appliance, Inc.");
66
67/*
68 * tunables
69 */
70
71static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
72static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
73static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
74static unsigned int xprt_rdma_inline_write_padding;
3197d309 75static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
9191ca3b 76 int xprt_rdma_pad_optimize = 0;
f58851e6
TT
77
78#ifdef RPC_DEBUG
79
80static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
81static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
82static unsigned int zero;
83static unsigned int max_padding = PAGE_SIZE;
84static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
85static unsigned int max_memreg = RPCRDMA_LAST - 1;
86
87static struct ctl_table_header *sunrpc_table_header;
88
89static ctl_table xr_tunables_table[] = {
90 {
f58851e6
TT
91 .procname = "rdma_slot_table_entries",
92 .data = &xprt_rdma_slot_table_entries,
93 .maxlen = sizeof(unsigned int),
94 .mode = 0644,
6d456111 95 .proc_handler = proc_dointvec_minmax,
f58851e6
TT
96 .extra1 = &min_slot_table_size,
97 .extra2 = &max_slot_table_size
98 },
99 {
f58851e6
TT
100 .procname = "rdma_max_inline_read",
101 .data = &xprt_rdma_max_inline_read,
102 .maxlen = sizeof(unsigned int),
103 .mode = 0644,
6d456111 104 .proc_handler = proc_dointvec,
f58851e6
TT
105 },
106 {
f58851e6
TT
107 .procname = "rdma_max_inline_write",
108 .data = &xprt_rdma_max_inline_write,
109 .maxlen = sizeof(unsigned int),
110 .mode = 0644,
6d456111 111 .proc_handler = proc_dointvec,
f58851e6
TT
112 },
113 {
f58851e6
TT
114 .procname = "rdma_inline_write_padding",
115 .data = &xprt_rdma_inline_write_padding,
116 .maxlen = sizeof(unsigned int),
117 .mode = 0644,
6d456111 118 .proc_handler = proc_dointvec_minmax,
f58851e6
TT
119 .extra1 = &zero,
120 .extra2 = &max_padding,
121 },
122 {
f58851e6
TT
123 .procname = "rdma_memreg_strategy",
124 .data = &xprt_rdma_memreg_strategy,
125 .maxlen = sizeof(unsigned int),
126 .mode = 0644,
6d456111 127 .proc_handler = proc_dointvec_minmax,
f58851e6
TT
128 .extra1 = &min_memreg,
129 .extra2 = &max_memreg,
130 },
9191ca3b 131 {
9191ca3b
TT
132 .procname = "rdma_pad_optimize",
133 .data = &xprt_rdma_pad_optimize,
134 .maxlen = sizeof(unsigned int),
135 .mode = 0644,
6d456111 136 .proc_handler = proc_dointvec,
9191ca3b 137 },
f8572d8f 138 { },
f58851e6
TT
139};
140
141static ctl_table sunrpc_table[] = {
142 {
f58851e6
TT
143 .procname = "sunrpc",
144 .mode = 0555,
145 .child = xr_tunables_table
146 },
f8572d8f 147 { },
f58851e6
TT
148};
149
150#endif
151
152static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */
153
154static void
155xprt_rdma_format_addresses(struct rpc_xprt *xprt)
156{
c877b849 157 struct sockaddr *sap = (struct sockaddr *)
f58851e6 158 &rpcx_to_rdmad(xprt).addr;
c877b849
CL
159 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
160 char buf[64];
f58851e6 161
c877b849
CL
162 (void)rpc_ntop(sap, buf, sizeof(buf));
163 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
f58851e6 164
81160e66 165 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
c877b849 166 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
f58851e6
TT
167
168 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
169
81160e66 170 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
c877b849
CL
171 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
172
81160e66 173 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
c877b849 174 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
f58851e6 175
f58851e6
TT
176 /* netid */
177 xprt->address_strings[RPC_DISPLAY_NETID] = "rdma";
178}
179
180static void
181xprt_rdma_free_addresses(struct rpc_xprt *xprt)
182{
33e01dc7
CL
183 unsigned int i;
184
185 for (i = 0; i < RPC_DISPLAY_MAX; i++)
186 switch (i) {
187 case RPC_DISPLAY_PROTO:
188 case RPC_DISPLAY_NETID:
189 continue;
190 default:
191 kfree(xprt->address_strings[i]);
192 }
f58851e6
TT
193}
194
195static void
196xprt_rdma_connect_worker(struct work_struct *work)
197{
198 struct rpcrdma_xprt *r_xprt =
199 container_of(work, struct rpcrdma_xprt, rdma_connect.work);
200 struct rpc_xprt *xprt = &r_xprt->xprt;
201 int rc = 0;
202
d19751e7
TM
203 current->flags |= PF_FSTRANS;
204 xprt_clear_connected(xprt);
205
206 dprintk("RPC: %s: %sconnect\n", __func__,
207 r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
208 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
209 if (rc)
210 xprt_wake_pending_tasks(xprt, rc);
211
f58851e6
TT
212 dprintk("RPC: %s: exit\n", __func__);
213 xprt_clear_connecting(xprt);
5cf02d09 214 current->flags &= ~PF_FSTRANS;
f58851e6
TT
215}
216
217/*
218 * xprt_rdma_destroy
219 *
220 * Destroy the xprt.
221 * Free all memory associated with the object, including its own.
222 * NOTE: none of the *destroy methods free memory for their top-level
223 * objects, even though they may have allocated it (they do free
224 * private memory). It's up to the caller to handle it. In this
225 * case (RDMA transport), all structure memory is inlined with the
226 * struct rpcrdma_xprt.
227 */
228static void
229xprt_rdma_destroy(struct rpc_xprt *xprt)
230{
231 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
232 int rc;
233
234 dprintk("RPC: %s: called\n", __func__);
235
a25e758c 236 cancel_delayed_work_sync(&r_xprt->rdma_connect);
f58851e6
TT
237
238 xprt_clear_connected(xprt);
239
240 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
241 rc = rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
242 if (rc)
243 dprintk("RPC: %s: rpcrdma_ep_destroy returned %i\n",
244 __func__, rc);
245 rpcrdma_ia_close(&r_xprt->rx_ia);
246
247 xprt_rdma_free_addresses(xprt);
248
e204e621 249 xprt_free(xprt);
f58851e6
TT
250
251 dprintk("RPC: %s: returning\n", __func__);
252
253 module_put(THIS_MODULE);
254}
255
2881ae74
TM
256static const struct rpc_timeout xprt_rdma_default_timeout = {
257 .to_initval = 60 * HZ,
258 .to_maxval = 60 * HZ,
259};
260
f58851e6
TT
261/**
262 * xprt_setup_rdma - Set up transport to use RDMA
263 *
264 * @args: rpc transport arguments
265 */
266static struct rpc_xprt *
267xprt_setup_rdma(struct xprt_create *args)
268{
269 struct rpcrdma_create_data_internal cdata;
270 struct rpc_xprt *xprt;
271 struct rpcrdma_xprt *new_xprt;
272 struct rpcrdma_ep *new_ep;
273 struct sockaddr_in *sin;
274 int rc;
275
276 if (args->addrlen > sizeof(xprt->addr)) {
277 dprintk("RPC: %s: address too large\n", __func__);
278 return ERR_PTR(-EBADF);
279 }
280
37aa2133 281 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
d9ba131d 282 xprt_rdma_slot_table_entries,
bd1722d4 283 xprt_rdma_slot_table_entries);
f58851e6
TT
284 if (xprt == NULL) {
285 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
286 __func__);
287 return ERR_PTR(-ENOMEM);
288 }
289
f58851e6 290 /* 60 second timeout, no retries */
ba7392bb 291 xprt->timeout = &xprt_rdma_default_timeout;
f58851e6 292 xprt->bind_timeout = (60U * HZ);
f58851e6
TT
293 xprt->reestablish_timeout = (5U * HZ);
294 xprt->idle_timeout = (5U * 60 * HZ);
295
296 xprt->resvport = 0; /* privileged port not needed */
297 xprt->tsh_size = 0; /* RPC-RDMA handles framing */
298 xprt->max_payload = RPCRDMA_MAX_DATA_SEGS * PAGE_SIZE;
299 xprt->ops = &xprt_rdma_procs;
300
301 /*
302 * Set up RDMA-specific connect data.
303 */
304
305 /* Put server RDMA address in local cdata */
306 memcpy(&cdata.addr, args->dstaddr, args->addrlen);
307
308 /* Ensure xprt->addr holds valid server TCP (not RDMA)
309 * address, for any side protocols which peek at it */
310 xprt->prot = IPPROTO_TCP;
311 xprt->addrlen = args->addrlen;
312 memcpy(&xprt->addr, &cdata.addr, xprt->addrlen);
313
314 sin = (struct sockaddr_in *)&cdata.addr;
315 if (ntohs(sin->sin_port) != 0)
316 xprt_set_bound(xprt);
317
21454aaa
HH
318 dprintk("RPC: %s: %pI4:%u\n",
319 __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port));
f58851e6
TT
320
321 /* Set max requests */
322 cdata.max_requests = xprt->max_reqs;
323
324 /* Set some length limits */
325 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
326 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
327
328 cdata.inline_wsize = xprt_rdma_max_inline_write;
329 if (cdata.inline_wsize > cdata.wsize)
330 cdata.inline_wsize = cdata.wsize;
331
332 cdata.inline_rsize = xprt_rdma_max_inline_read;
333 if (cdata.inline_rsize > cdata.rsize)
334 cdata.inline_rsize = cdata.rsize;
335
336 cdata.padding = xprt_rdma_inline_write_padding;
337
338 /*
339 * Create new transport instance, which includes initialized
340 * o ia
341 * o endpoint
342 * o buffers
343 */
344
345 new_xprt = rpcx_to_rdmax(xprt);
346
347 rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr,
348 xprt_rdma_memreg_strategy);
349 if (rc)
350 goto out1;
351
352 /*
353 * initialize and create ep
354 */
355 new_xprt->rx_data = cdata;
356 new_ep = &new_xprt->rx_ep;
357 new_ep->rep_remote_addr = cdata.addr;
358
359 rc = rpcrdma_ep_create(&new_xprt->rx_ep,
360 &new_xprt->rx_ia, &new_xprt->rx_data);
361 if (rc)
362 goto out2;
363
364 /*
365 * Allocate pre-registered send and receive buffers for headers and
366 * any inline data. Also specify any padding which will be provided
367 * from a preregistered zero buffer.
368 */
369 rc = rpcrdma_buffer_create(&new_xprt->rx_buf, new_ep, &new_xprt->rx_ia,
370 &new_xprt->rx_data);
371 if (rc)
372 goto out3;
373
374 /*
375 * Register a callback for connection events. This is necessary because
376 * connection loss notification is async. We also catch connection loss
377 * when reaping receives.
378 */
379 INIT_DELAYED_WORK(&new_xprt->rdma_connect, xprt_rdma_connect_worker);
380 new_ep->rep_func = rpcrdma_conn_func;
381 new_ep->rep_xprt = xprt;
382
383 xprt_rdma_format_addresses(xprt);
384
385 if (!try_module_get(THIS_MODULE))
386 goto out4;
387
388 return xprt;
389
390out4:
391 xprt_rdma_free_addresses(xprt);
392 rc = -EINVAL;
393out3:
394 (void) rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
395out2:
396 rpcrdma_ia_close(&new_xprt->rx_ia);
397out1:
e204e621 398 xprt_free(xprt);
f58851e6
TT
399 return ERR_PTR(rc);
400}
401
402/*
403 * Close a connection, during shutdown or timeout/reconnect
404 */
405static void
406xprt_rdma_close(struct rpc_xprt *xprt)
407{
408 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
409
410 dprintk("RPC: %s: closing\n", __func__);
08ca0dce
TT
411 if (r_xprt->rx_ep.rep_connected > 0)
412 xprt->reestablish_timeout = 0;
62da3b24 413 xprt_disconnect_done(xprt);
f58851e6
TT
414 (void) rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia);
415}
416
417static void
418xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
419{
420 struct sockaddr_in *sap;
421
422 sap = (struct sockaddr_in *)&xprt->addr;
423 sap->sin_port = htons(port);
424 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
425 sap->sin_port = htons(port);
426 dprintk("RPC: %s: %u\n", __func__, port);
427}
428
429static void
430xprt_rdma_connect(struct rpc_task *task)
431{
432 struct rpc_xprt *xprt = (struct rpc_xprt *)task->tk_xprt;
433 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
434
0b9e7943
TM
435 if (r_xprt->rx_ep.rep_connected != 0) {
436 /* Reconnect */
437 schedule_delayed_work(&r_xprt->rdma_connect,
438 xprt->reestablish_timeout);
439 xprt->reestablish_timeout <<= 1;
440 if (xprt->reestablish_timeout > (30 * HZ))
441 xprt->reestablish_timeout = (30 * HZ);
442 else if (xprt->reestablish_timeout < (5 * HZ))
443 xprt->reestablish_timeout = (5 * HZ);
444 } else {
445 schedule_delayed_work(&r_xprt->rdma_connect, 0);
446 if (!RPC_IS_ASYNC(task))
a25e758c 447 flush_delayed_work(&r_xprt->rdma_connect);
f58851e6
TT
448 }
449}
450
451static int
43cedbf0 452xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
f58851e6 453{
f58851e6
TT
454 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
455 int credits = atomic_read(&r_xprt->rx_buf.rb_credits);
456
457 /* == RPC_CWNDSCALE @ init, but *after* setup */
458 if (r_xprt->rx_buf.rb_cwndscale == 0UL) {
459 r_xprt->rx_buf.rb_cwndscale = xprt->cwnd;
460 dprintk("RPC: %s: cwndscale %lu\n", __func__,
461 r_xprt->rx_buf.rb_cwndscale);
462 BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0);
463 }
464 xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale;
43cedbf0 465 return xprt_reserve_xprt_cong(xprt, task);
f58851e6
TT
466}
467
468/*
469 * The RDMA allocate/free functions need the task structure as a place
470 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
471 * sequence. For this reason, the recv buffers are attached to send
472 * buffers for portions of the RPC. Note that the RPC layer allocates
473 * both send and receive buffers in the same call. We may register
474 * the receive buffer portion when using reply chunks.
475 */
476static void *
477xprt_rdma_allocate(struct rpc_task *task, size_t size)
478{
479 struct rpc_xprt *xprt = task->tk_xprt;
480 struct rpcrdma_req *req, *nreq;
481
482 req = rpcrdma_buffer_get(&rpcx_to_rdmax(xprt)->rx_buf);
483 BUG_ON(NULL == req);
484
485 if (size > req->rl_size) {
486 dprintk("RPC: %s: size %zd too large for buffer[%zd]: "
487 "prog %d vers %d proc %d\n",
488 __func__, size, req->rl_size,
489 task->tk_client->cl_prog, task->tk_client->cl_vers,
490 task->tk_msg.rpc_proc->p_proc);
491 /*
492 * Outgoing length shortage. Our inline write max must have
493 * been configured to perform direct i/o.
494 *
495 * This is therefore a large metadata operation, and the
496 * allocate call was made on the maximum possible message,
497 * e.g. containing long filename(s) or symlink data. In
498 * fact, while these metadata operations *might* carry
499 * large outgoing payloads, they rarely *do*. However, we
500 * have to commit to the request here, so reallocate and
501 * register it now. The data path will never require this
502 * reallocation.
503 *
504 * If the allocation or registration fails, the RPC framework
505 * will (doggedly) retry.
506 */
507 if (rpcx_to_rdmax(xprt)->rx_ia.ri_memreg_strategy ==
508 RPCRDMA_BOUNCEBUFFERS) {
509 /* forced to "pure inline" */
510 dprintk("RPC: %s: too much data (%zd) for inline "
511 "(r/w max %d/%d)\n", __func__, size,
512 rpcx_to_rdmad(xprt).inline_rsize,
513 rpcx_to_rdmad(xprt).inline_wsize);
514 size = req->rl_size;
515 rpc_exit(task, -EIO); /* fail the operation */
516 rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++;
517 goto out;
518 }
519 if (task->tk_flags & RPC_TASK_SWAPPER)
520 nreq = kmalloc(sizeof *req + size, GFP_ATOMIC);
521 else
522 nreq = kmalloc(sizeof *req + size, GFP_NOFS);
523 if (nreq == NULL)
524 goto outfail;
525
526 if (rpcrdma_register_internal(&rpcx_to_rdmax(xprt)->rx_ia,
527 nreq->rl_base, size + sizeof(struct rpcrdma_req)
528 - offsetof(struct rpcrdma_req, rl_base),
529 &nreq->rl_handle, &nreq->rl_iov)) {
530 kfree(nreq);
531 goto outfail;
532 }
533 rpcx_to_rdmax(xprt)->rx_stats.hardway_register_count += size;
534 nreq->rl_size = size;
535 nreq->rl_niovs = 0;
536 nreq->rl_nchunks = 0;
537 nreq->rl_buffer = (struct rpcrdma_buffer *)req;
538 nreq->rl_reply = req->rl_reply;
539 memcpy(nreq->rl_segments,
540 req->rl_segments, sizeof nreq->rl_segments);
541 /* flag the swap with an unused field */
542 nreq->rl_iov.length = 0;
543 req->rl_reply = NULL;
544 req = nreq;
545 }
546 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
547out:
575448bd 548 req->rl_connect_cookie = 0; /* our reserved value */
f58851e6
TT
549 return req->rl_xdr_buf;
550
551outfail:
552 rpcrdma_buffer_put(req);
553 rpcx_to_rdmax(xprt)->rx_stats.failed_marshal_count++;
554 return NULL;
555}
556
557/*
558 * This function returns all RDMA resources to the pool.
559 */
560static void
561xprt_rdma_free(void *buffer)
562{
563 struct rpcrdma_req *req;
564 struct rpcrdma_xprt *r_xprt;
565 struct rpcrdma_rep *rep;
566 int i;
567
568 if (buffer == NULL)
569 return;
570
571 req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]);
ee1a2c56
TT
572 if (req->rl_iov.length == 0) { /* see allocate above */
573 r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer,
574 struct rpcrdma_xprt, rx_buf);
575 } else
576 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
f58851e6
TT
577 rep = req->rl_reply;
578
579 dprintk("RPC: %s: called on 0x%p%s\n",
580 __func__, rep, (rep && rep->rr_func) ? " (with waiter)" : "");
581
582 /*
583 * Finish the deregistration. When using mw bind, this was
584 * begun in rpcrdma_reply_handler(). In all other modes, we
585 * do it here, in thread context. The process is considered
586 * complete when the rr_func vector becomes NULL - this
587 * was put in place during rpcrdma_reply_handler() - the wait
588 * call below will not block if the dereg is "done". If
589 * interrupted, our framework will clean up.
590 */
591 for (i = 0; req->rl_nchunks;) {
592 --req->rl_nchunks;
593 i += rpcrdma_deregister_external(
594 &req->rl_segments[i], r_xprt, NULL);
595 }
596
597 if (rep && wait_event_interruptible(rep->rr_unbind, !rep->rr_func)) {
598 rep->rr_func = NULL; /* abandon the callback */
599 req->rl_reply = NULL;
600 }
601
602 if (req->rl_iov.length == 0) { /* see allocate above */
603 struct rpcrdma_req *oreq = (struct rpcrdma_req *)req->rl_buffer;
604 oreq->rl_reply = req->rl_reply;
605 (void) rpcrdma_deregister_internal(&r_xprt->rx_ia,
606 req->rl_handle,
607 &req->rl_iov);
608 kfree(req);
609 req = oreq;
610 }
611
612 /* Put back request+reply buffers */
613 rpcrdma_buffer_put(req);
614}
615
616/*
617 * send_request invokes the meat of RPC RDMA. It must do the following:
618 * 1. Marshal the RPC request into an RPC RDMA request, which means
619 * putting a header in front of data, and creating IOVs for RDMA
620 * from those in the request.
621 * 2. In marshaling, detect opportunities for RDMA, and use them.
622 * 3. Post a recv message to set up asynch completion, then send
623 * the request (rpcrdma_ep_post).
624 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
625 */
626
627static int
628xprt_rdma_send_request(struct rpc_task *task)
629{
630 struct rpc_rqst *rqst = task->tk_rqstp;
631 struct rpc_xprt *xprt = task->tk_xprt;
632 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
633 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
634
635 /* marshal the send itself */
636 if (req->rl_niovs == 0 && rpcrdma_marshal_req(rqst) != 0) {
637 r_xprt->rx_stats.failed_marshal_count++;
638 dprintk("RPC: %s: rpcrdma_marshal_req failed\n",
639 __func__);
640 return -EIO;
641 }
642
643 if (req->rl_reply == NULL) /* e.g. reconnection */
644 rpcrdma_recv_buffer_get(req);
645
646 if (req->rl_reply) {
647 req->rl_reply->rr_func = rpcrdma_reply_handler;
648 /* this need only be done once, but... */
649 req->rl_reply->rr_xprt = xprt;
650 }
651
575448bd
TT
652 /* Must suppress retransmit to maintain credits */
653 if (req->rl_connect_cookie == xprt->connect_cookie)
654 goto drop_connection;
655 req->rl_connect_cookie = xprt->connect_cookie;
656
657 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
658 goto drop_connection;
f58851e6 659
d60dbb20 660 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
f58851e6
TT
661 rqst->rq_bytes_sent = 0;
662 return 0;
575448bd
TT
663
664drop_connection:
665 xprt_disconnect_done(xprt);
666 return -ENOTCONN; /* implies disconnect */
f58851e6
TT
667}
668
669static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
670{
671 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
672 long idle_time = 0;
673
674 if (xprt_connected(xprt))
675 idle_time = (long)(jiffies - xprt->last_used) / HZ;
676
677 seq_printf(seq,
678 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
679 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
680
681 0, /* need a local port? */
682 xprt->stat.bind_count,
683 xprt->stat.connect_count,
684 xprt->stat.connect_time,
685 idle_time,
686 xprt->stat.sends,
687 xprt->stat.recvs,
688 xprt->stat.bad_xids,
689 xprt->stat.req_u,
690 xprt->stat.bklog_u,
691
692 r_xprt->rx_stats.read_chunk_count,
693 r_xprt->rx_stats.write_chunk_count,
694 r_xprt->rx_stats.reply_chunk_count,
695 r_xprt->rx_stats.total_rdma_request,
696 r_xprt->rx_stats.total_rdma_reply,
697 r_xprt->rx_stats.pullup_copy_count,
698 r_xprt->rx_stats.fixup_copy_count,
699 r_xprt->rx_stats.hardway_register_count,
700 r_xprt->rx_stats.failed_marshal_count,
701 r_xprt->rx_stats.bad_reply_count);
702}
703
704/*
705 * Plumbing for rpc transport switch and kernel module
706 */
707
708static struct rpc_xprt_ops xprt_rdma_procs = {
709 .reserve_xprt = xprt_rdma_reserve_xprt,
710 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
f39c1bfb 711 .alloc_slot = xprt_alloc_slot,
f58851e6
TT
712 .release_request = xprt_release_rqst_cong, /* ditto */
713 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
714 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
715 .set_port = xprt_rdma_set_port,
716 .connect = xprt_rdma_connect,
717 .buf_alloc = xprt_rdma_allocate,
718 .buf_free = xprt_rdma_free,
719 .send_request = xprt_rdma_send_request,
720 .close = xprt_rdma_close,
721 .destroy = xprt_rdma_destroy,
722 .print_stats = xprt_rdma_print_stats
723};
724
725static struct xprt_class xprt_rdma = {
726 .list = LIST_HEAD_INIT(xprt_rdma.list),
727 .name = "rdma",
728 .owner = THIS_MODULE,
729 .ident = XPRT_TRANSPORT_RDMA,
730 .setup = xprt_setup_rdma,
731};
732
733static void __exit xprt_rdma_cleanup(void)
734{
735 int rc;
736
b3cd8d45 737 dprintk(KERN_INFO "RPCRDMA Module Removed, deregister RPC RDMA transport\n");
f58851e6
TT
738#ifdef RPC_DEBUG
739 if (sunrpc_table_header) {
740 unregister_sysctl_table(sunrpc_table_header);
741 sunrpc_table_header = NULL;
742 }
743#endif
744 rc = xprt_unregister_transport(&xprt_rdma);
745 if (rc)
746 dprintk("RPC: %s: xprt_unregister returned %i\n",
747 __func__, rc);
748}
749
750static int __init xprt_rdma_init(void)
751{
752 int rc;
753
754 rc = xprt_register_transport(&xprt_rdma);
755
756 if (rc)
757 return rc;
758
759 dprintk(KERN_INFO "RPCRDMA Module Init, register RPC RDMA transport\n");
760
761 dprintk(KERN_INFO "Defaults:\n");
762 dprintk(KERN_INFO "\tSlots %d\n"
763 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
764 xprt_rdma_slot_table_entries,
765 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
766 dprintk(KERN_INFO "\tPadding %d\n\tMemreg %d\n",
767 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
768
769#ifdef RPC_DEBUG
770 if (!sunrpc_table_header)
771 sunrpc_table_header = register_sysctl_table(sunrpc_table);
772#endif
773 return 0;
774}
775
776module_init(xprt_rdma_init);
777module_exit(xprt_rdma_cleanup);