2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
48 #include "xprt_rdma.h"
50 #include <linux/highmem.h>
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY RPCDBG_TRANS
56 static const char transfertypes[][12] = {
57 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
61 "reply chunk" /* entire reply via rdma write */
64 /* Returns size of largest RPC-over-RDMA header in a Call message
66 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
69 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
80 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
85 dprintk("RPC: %s: max call header size = %u\n",
90 /* Returns size of largest RPC-over-RDMA header in a Reply message
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
95 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
108 dprintk("RPC: %s: max reply header size = %u\n",
113 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
125 /* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
128 * a Read chunk for this operation.
130 * A Read chunk is also required if sending the RPC call inline would
131 * exceed this device's max_sge limit.
133 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
134 struct rpc_rqst *rqst)
136 struct xdr_buf *xdr = &rqst->rq_snd_buf;
137 unsigned int count, remaining, offset;
139 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
143 remaining = xdr->page_len;
144 offset = offset_in_page(xdr->page_base);
147 remaining -= min_t(unsigned int,
148 PAGE_SIZE - offset, remaining);
150 if (++count > r_xprt->rx_ia.ri_max_send_sges)
158 /* The client can't know how large the actual reply will be. Thus it
159 * plans for the largest possible reply for that particular ULP
160 * operation. If the maximum combined reply message size exceeds that
161 * limit, the client must provide a write list or a reply chunk for
164 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
165 struct rpc_rqst *rqst)
167 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
169 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
172 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
173 * a byte range. Other modes coalesce these SGEs into a single MR
176 * Returns pointer to next available SGE, and bumps the total number
179 static struct rpcrdma_mr_seg *
180 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
183 u32 remaining, page_offset;
186 base = vec->iov_base;
187 page_offset = offset_in_page(base);
188 remaining = vec->iov_len;
191 seg->mr_offset = base;
192 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
193 remaining -= seg->mr_len;
202 /* Convert @xdrbuf into SGEs no larger than a page each. As they
203 * are registered, these SGEs are then coalesced into RDMA segments
204 * when the selected memreg mode supports it.
206 * Returns positive number of SGEs consumed, or a negative errno.
210 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
211 unsigned int pos, enum rpcrdma_chunktype type,
212 struct rpcrdma_mr_seg *seg)
214 unsigned long page_base;
216 struct page **ppages;
220 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
222 len = xdrbuf->page_len;
223 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
224 page_base = offset_in_page(xdrbuf->page_base);
226 if (unlikely(!*ppages)) {
227 /* XXX: Certain upper layer operations do
228 * not provide receive buffer pages.
230 *ppages = alloc_page(GFP_ATOMIC);
234 seg->mr_page = *ppages;
235 seg->mr_offset = (char *)page_base;
236 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
244 /* When encoding a Read chunk, the tail iovec contains an
245 * XDR pad and may be omitted.
247 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
250 /* When encoding a Write chunk, some servers need to see an
251 * extra segment for non-XDR-aligned Write chunks. The upper
252 * layer provides space in the tail iovec that may be used
255 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
258 if (xdrbuf->tail[0].iov_len)
259 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
262 if (unlikely(n > RPCRDMA_MAX_SEGS))
268 encode_item_present(struct xdr_stream *xdr)
272 p = xdr_reserve_space(xdr, sizeof(*p));
281 encode_item_not_present(struct xdr_stream *xdr)
285 p = xdr_reserve_space(xdr, sizeof(*p));
294 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
296 *iptr++ = cpu_to_be32(mw->mw_handle);
297 *iptr++ = cpu_to_be32(mw->mw_length);
298 xdr_encode_hyper(iptr, mw->mw_offset);
302 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
306 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
310 xdr_encode_rdma_segment(p, mw);
315 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
320 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
324 *p++ = xdr_one; /* Item present */
325 *p++ = cpu_to_be32(position);
326 xdr_encode_rdma_segment(p, mw);
330 /* Register and XDR encode the Read list. Supports encoding a list of read
331 * segments that belong to a single read chunk.
333 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
335 * Read chunklist (a linked list):
336 * N elements, position P (same P for all chunks of same arg!):
337 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
339 * Returns zero on success, or a negative errno if a failure occurred.
340 * @xdr is advanced to the next position in the stream.
342 * Only a single @pos value is currently supported.
345 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
346 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
348 struct xdr_stream *xdr = &req->rl_stream;
349 struct rpcrdma_mr_seg *seg;
350 struct rpcrdma_mw *mw;
354 pos = rqst->rq_snd_buf.head[0].iov_len;
355 if (rtype == rpcrdma_areadch)
357 seg = req->rl_segments;
358 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
364 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
368 rpcrdma_push_mw(mw, &req->rl_registered);
370 if (encode_read_segment(xdr, mw, pos) < 0)
373 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
374 rqst->rq_task->tk_pid, __func__, pos,
375 mw->mw_length, (unsigned long long)mw->mw_offset,
376 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
378 r_xprt->rx_stats.read_chunk_count++;
379 nsegs -= mw->mw_nents;
385 /* Register and XDR encode the Write list. Supports encoding a list
386 * containing one array of plain segments that belong to a single
389 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
391 * Write chunklist (a list of (one) counted array):
393 * 1 - N - HLOO - HLOO - ... - HLOO - 0
395 * Returns zero on success, or a negative errno if a failure occurred.
396 * @xdr is advanced to the next position in the stream.
398 * Only a single Write chunk is currently supported.
401 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
402 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
404 struct xdr_stream *xdr = &req->rl_stream;
405 struct rpcrdma_mr_seg *seg;
406 struct rpcrdma_mw *mw;
410 seg = req->rl_segments;
411 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
412 rqst->rq_rcv_buf.head[0].iov_len,
417 if (encode_item_present(xdr) < 0)
419 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
420 if (unlikely(!segcount))
422 /* Actual value encoded below */
426 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
430 rpcrdma_push_mw(mw, &req->rl_registered);
432 if (encode_rdma_segment(xdr, mw) < 0)
435 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
436 rqst->rq_task->tk_pid, __func__,
437 mw->mw_length, (unsigned long long)mw->mw_offset,
438 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
440 r_xprt->rx_stats.write_chunk_count++;
441 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
443 nsegs -= mw->mw_nents;
446 /* Update count of segments in this Write chunk */
447 *segcount = cpu_to_be32(nchunks);
452 /* Register and XDR encode the Reply chunk. Supports encoding an array
453 * of plain segments that belong to a single write (reply) chunk.
455 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
457 * Reply chunk (a counted array):
459 * 1 - N - HLOO - HLOO - ... - HLOO
461 * Returns zero on success, or a negative errno if a failure occurred.
462 * @xdr is advanced to the next position in the stream.
465 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
466 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
468 struct xdr_stream *xdr = &req->rl_stream;
469 struct rpcrdma_mr_seg *seg;
470 struct rpcrdma_mw *mw;
474 seg = req->rl_segments;
475 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
479 if (encode_item_present(xdr) < 0)
481 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
482 if (unlikely(!segcount))
484 /* Actual value encoded below */
488 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
492 rpcrdma_push_mw(mw, &req->rl_registered);
494 if (encode_rdma_segment(xdr, mw) < 0)
497 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
498 rqst->rq_task->tk_pid, __func__,
499 mw->mw_length, (unsigned long long)mw->mw_offset,
500 mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
502 r_xprt->rx_stats.reply_chunk_count++;
503 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
505 nsegs -= mw->mw_nents;
508 /* Update count of segments in the Reply chunk */
509 *segcount = cpu_to_be32(nchunks);
515 * rpcrdma_unmap_sges - DMA-unmap Send buffers
516 * @ia: interface adapter (device)
517 * @req: req with possibly some SGEs to be DMA unmapped
521 rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
526 /* The first two SGEs contain the transport header and
527 * the inline buffer. These are always left mapped so
528 * they can be cheaply re-used.
530 sge = &req->rl_send_sge[2];
531 for (count = req->rl_mapped_sges; count--; sge++)
532 ib_dma_unmap_page(ia->ri_device,
533 sge->addr, sge->length, DMA_TO_DEVICE);
536 /* Prepare the RPC-over-RDMA header SGE.
539 rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
542 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
543 struct ib_sge *sge = &req->rl_send_sge[0];
545 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
546 if (!__rpcrdma_dma_map_regbuf(ia, rb))
548 sge->addr = rdmab_addr(rb);
549 sge->lkey = rdmab_lkey(rb);
553 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
554 sge->length, DMA_TO_DEVICE);
555 req->rl_send_wr.num_sge++;
559 /* Prepare the Send SGEs. The head and tail iovec, and each entry
560 * in the page list, gets its own SGE.
563 rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
564 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
566 unsigned int sge_no, page_base, len, remaining;
567 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
568 struct ib_device *device = ia->ri_device;
569 struct ib_sge *sge = req->rl_send_sge;
570 u32 lkey = ia->ri_pd->local_dma_lkey;
571 struct page *page, **ppages;
573 /* The head iovec is straightforward, as it is already
574 * DMA-mapped. Sync the content that has changed.
576 if (!rpcrdma_dma_map_regbuf(ia, rb))
579 sge[sge_no].addr = rdmab_addr(rb);
580 sge[sge_no].length = xdr->head[0].iov_len;
581 sge[sge_no].lkey = rdmab_lkey(rb);
582 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
583 sge[sge_no].length, DMA_TO_DEVICE);
585 /* If there is a Read chunk, the page list is being handled
586 * via explicit RDMA, and thus is skipped here. However, the
587 * tail iovec may include an XDR pad for the page list, as
588 * well as additional content, and may not reside in the
589 * same page as the head iovec.
591 if (rtype == rpcrdma_readch) {
592 len = xdr->tail[0].iov_len;
594 /* Do not include the tail if it is only an XDR pad */
598 page = virt_to_page(xdr->tail[0].iov_base);
599 page_base = offset_in_page(xdr->tail[0].iov_base);
601 /* If the content in the page list is an odd length,
602 * xdr_write_pages() has added a pad at the beginning
603 * of the tail iovec. Force the tail's non-pad content
604 * to land at the next XDR position in the Send message.
606 page_base += len & 3;
611 /* If there is a page list present, temporarily DMA map
612 * and prepare an SGE for each page to be sent.
615 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
616 page_base = offset_in_page(xdr->page_base);
617 remaining = xdr->page_len;
620 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
621 goto out_mapping_overflow;
623 len = min_t(u32, PAGE_SIZE - page_base, remaining);
624 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
627 if (ib_dma_mapping_error(device, sge[sge_no].addr))
628 goto out_mapping_err;
629 sge[sge_no].length = len;
630 sge[sge_no].lkey = lkey;
632 req->rl_mapped_sges++;
639 /* The tail iovec is not always constructed in the same
640 * page where the head iovec resides (see, for example,
641 * gss_wrap_req_priv). To neatly accommodate that case,
642 * DMA map it separately.
644 if (xdr->tail[0].iov_len) {
645 page = virt_to_page(xdr->tail[0].iov_base);
646 page_base = offset_in_page(xdr->tail[0].iov_base);
647 len = xdr->tail[0].iov_len;
651 sge[sge_no].addr = ib_dma_map_page(device, page,
654 if (ib_dma_mapping_error(device, sge[sge_no].addr))
655 goto out_mapping_err;
656 sge[sge_no].length = len;
657 sge[sge_no].lkey = lkey;
658 req->rl_mapped_sges++;
662 req->rl_send_wr.num_sge += sge_no;
665 out_mapping_overflow:
666 rpcrdma_unmap_sges(ia, req);
667 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
671 rpcrdma_unmap_sges(ia, req);
672 pr_err("rpcrdma: Send mapping error\n");
677 rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
678 u32 hdrlen, struct xdr_buf *xdr,
679 enum rpcrdma_chunktype rtype)
681 req->rl_send_wr.num_sge = 0;
682 req->rl_mapped_sges = 0;
684 if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
687 if (rtype != rpcrdma_areadch)
688 if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
694 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
699 * rpcrdma_marshal_req - Marshal and send one RPC request
700 * @r_xprt: controlling transport
701 * @rqst: RPC request to be marshaled
703 * For the RPC in "rqst", this function:
704 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
705 * - Registers Read, Write, and Reply chunks
706 * - Constructs the transport header
707 * - Posts a Send WR to send the transport header and request
710 * %0 if the RPC was sent successfully,
711 * %-ENOTCONN if the connection was lost,
712 * %-EAGAIN if not enough pages are available for on-demand reply buffer,
713 * %-ENOBUFS if no MRs are available to register chunks,
714 * %-EMSGSIZE if the transport header is too small,
715 * %-EIO if a permanent problem occurred while marshaling.
718 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
720 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
721 struct xdr_stream *xdr = &req->rl_stream;
722 enum rpcrdma_chunktype rtype, wtype;
727 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
728 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
729 return rpcrdma_bc_marshal_reply(rqst);
732 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
733 xdr_init_encode(xdr, &req->rl_hdrbuf,
734 req->rl_rdmabuf->rg_base);
736 /* Fixed header fields */
738 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
742 *p++ = rpcrdma_version;
743 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
745 /* When the ULP employs a GSS flavor that guarantees integrity
746 * or privacy, direct data placement of individual data items
749 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
750 RPCAUTH_AUTH_DATATOUCH);
753 * Chunks needed for results?
755 * o If the expected result is under the inline threshold, all ops
757 * o Large read ops return data as write chunk(s), header as
759 * o Large non-read ops return as a single reply chunk.
761 if (rpcrdma_results_inline(r_xprt, rqst))
762 wtype = rpcrdma_noch;
763 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
764 wtype = rpcrdma_writech;
766 wtype = rpcrdma_replych;
769 * Chunks needed for arguments?
771 * o If the total request is under the inline threshold, all ops
772 * are sent as inline.
773 * o Large write ops transmit data as read chunk(s), header as
775 * o Large non-write ops are sent with the entire message as a
776 * single read chunk (protocol 0-position special case).
778 * This assumes that the upper layer does not present a request
779 * that both has a data payload, and whose non-data arguments
780 * by themselves are larger than the inline threshold.
782 if (rpcrdma_args_inline(r_xprt, rqst)) {
784 rtype = rpcrdma_noch;
785 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
787 rtype = rpcrdma_readch;
789 r_xprt->rx_stats.nomsg_call_count++;
791 rtype = rpcrdma_areadch;
794 /* This implementation supports the following combinations
795 * of chunk lists in one RPC-over-RDMA Call message:
800 * - Read list + Reply chunk
802 * It might not yet support the following combinations:
804 * - Read list + Write list
806 * It does not support the following combinations:
808 * - Write list + Reply chunk
809 * - Read list + Write list + Reply chunk
811 * This implementation supports only a single chunk in each
812 * Read or Write list. Thus for example the client cannot
813 * send a Call message with a Position Zero Read chunk and a
814 * regular Read chunk at the same time.
816 if (rtype != rpcrdma_noch) {
817 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
821 ret = encode_item_not_present(xdr);
825 if (wtype == rpcrdma_writech) {
826 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
830 ret = encode_item_not_present(xdr);
834 if (wtype != rpcrdma_replych)
835 ret = encode_item_not_present(xdr);
837 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
841 dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
842 rqst->rq_task->tk_pid, __func__,
843 transfertypes[rtype], transfertypes[wtype],
844 xdr_stream_pos(xdr));
846 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req,
848 &rqst->rq_snd_buf, rtype)) {
855 if (ret != -ENOBUFS) {
856 pr_err("rpcrdma: header marshaling failed (%d)\n", ret);
857 r_xprt->rx_stats.failed_marshal_count++;
863 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
864 * @rqst: controlling RPC request
865 * @srcp: points to RPC message payload in receive buffer
866 * @copy_len: remaining length of receive buffer content
867 * @pad: Write chunk pad bytes needed (zero for pure inline)
869 * The upper layer has set the maximum number of bytes it can
870 * receive in each component of rq_rcv_buf. These values are set in
871 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
873 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
874 * many cases this function simply updates iov_base pointers in
875 * rq_rcv_buf to point directly to the received reply data, to
876 * avoid copying reply data.
878 * Returns the count of bytes which had to be memcopied.
881 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
883 unsigned long fixup_copy_count;
884 int i, npages, curlen;
886 struct page **ppages;
889 /* The head iovec is redirected to the RPC reply message
890 * in the receive buffer, to avoid a memcopy.
892 rqst->rq_rcv_buf.head[0].iov_base = srcp;
893 rqst->rq_private_buf.head[0].iov_base = srcp;
895 /* The contents of the receive buffer that follow
896 * head.iov_len bytes are copied into the page list.
898 curlen = rqst->rq_rcv_buf.head[0].iov_len;
899 if (curlen > copy_len)
901 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
902 __func__, srcp, copy_len, curlen);
906 ppages = rqst->rq_rcv_buf.pages +
907 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
908 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
909 fixup_copy_count = 0;
910 if (copy_len && rqst->rq_rcv_buf.page_len) {
913 pagelist_len = rqst->rq_rcv_buf.page_len;
914 if (pagelist_len > copy_len)
915 pagelist_len = copy_len;
916 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
917 for (i = 0; i < npages; i++) {
918 curlen = PAGE_SIZE - page_base;
919 if (curlen > pagelist_len)
920 curlen = pagelist_len;
922 dprintk("RPC: %s: page %d"
923 " srcp 0x%p len %d curlen %d\n",
924 __func__, i, srcp, copy_len, curlen);
925 destp = kmap_atomic(ppages[i]);
926 memcpy(destp + page_base, srcp, curlen);
927 flush_dcache_page(ppages[i]);
928 kunmap_atomic(destp);
931 fixup_copy_count += curlen;
932 pagelist_len -= curlen;
938 /* Implicit padding for the last segment in a Write
939 * chunk is inserted inline at the front of the tail
940 * iovec. The upper layer ignores the content of
941 * the pad. Simply ensure inline content in the tail
942 * that follows the Write chunk is properly aligned.
948 /* The tail iovec is redirected to the remaining data
949 * in the receive buffer, to avoid a memcopy.
951 if (copy_len || pad) {
952 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
953 rqst->rq_private_buf.tail[0].iov_base = srcp;
956 return fixup_copy_count;
959 /* Caller must guarantee @rep remains stable during this call.
962 rpcrdma_mark_remote_invalidation(struct list_head *mws,
963 struct rpcrdma_rep *rep)
965 struct rpcrdma_mw *mw;
967 if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
970 list_for_each_entry(mw, mws, mw_list)
971 if (mw->mw_handle == rep->rr_inv_rkey) {
972 mw->mw_flags = RPCRDMA_MW_F_RI;
973 break; /* only one invalidated MR per RPC */
977 /* By convention, backchannel calls arrive via rdma_msg type
978 * messages, and never populate the chunk lists. This makes
979 * the RPC/RDMA header small and fixed in size, so it is
980 * straightforward to check the RPC header's direction field.
983 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
984 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
986 struct xdr_stream *xdr = &rep->rr_stream;
989 if (rep->rr_proc != rdma_msg)
992 /* Peek at stream contents without advancing. */
993 p = xdr_inline_decode(xdr, 0);
996 if (*p++ != xdr_zero)
998 if (*p++ != xdr_zero)
1000 if (*p++ != xdr_zero)
1004 if (*p++ != rep->rr_xid)
1006 if (*p != cpu_to_be32(RPC_CALL))
1009 /* Now that we are sure this is a backchannel call,
1010 * advance to the RPC header.
1012 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1016 rpcrdma_bc_receive_call(r_xprt, rep);
1020 pr_warn("RPC/RDMA short backward direction call\n");
1021 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1022 xprt_disconnect_done(&r_xprt->rx_xprt);
1025 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1029 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1031 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1035 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1043 handle = be32_to_cpup(p++);
1044 *length = be32_to_cpup(p++);
1045 xdr_decode_hyper(p, &offset);
1046 dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
1047 __func__, *length, (unsigned long long)offset,
1050 *length = be32_to_cpup(p + 1);
1056 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1058 u32 segcount, seglength;
1061 p = xdr_inline_decode(xdr, sizeof(*p));
1066 segcount = be32_to_cpup(p);
1067 while (segcount--) {
1068 if (decode_rdma_segment(xdr, &seglength))
1070 *length += seglength;
1073 dprintk("RPC: %s: segcount=%u, %u bytes\n",
1074 __func__, be32_to_cpup(p), *length);
1078 /* In RPC-over-RDMA Version One replies, a Read list is never
1079 * expected. This decoder is a stub that returns an error if
1080 * a Read list is present.
1082 static int decode_read_list(struct xdr_stream *xdr)
1086 p = xdr_inline_decode(xdr, sizeof(*p));
1089 if (unlikely(*p != xdr_zero))
1094 /* Supports only one Write chunk in the Write list
1096 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1105 p = xdr_inline_decode(xdr, sizeof(*p));
1113 if (decode_write_chunk(xdr, &chunklen))
1115 *length += chunklen;
1121 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1125 p = xdr_inline_decode(xdr, sizeof(*p));
1131 if (decode_write_chunk(xdr, length))
1137 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1138 struct rpc_rqst *rqst)
1140 struct xdr_stream *xdr = &rep->rr_stream;
1141 u32 writelist, replychunk, rpclen;
1144 /* Decode the chunk lists */
1145 if (decode_read_list(xdr))
1147 if (decode_write_list(xdr, &writelist))
1149 if (decode_reply_chunk(xdr, &replychunk))
1152 /* RDMA_MSG sanity checks */
1153 if (unlikely(replychunk))
1156 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1157 base = (char *)xdr_inline_decode(xdr, 0);
1158 rpclen = xdr_stream_remaining(xdr);
1159 r_xprt->rx_stats.fixup_copy_count +=
1160 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1162 r_xprt->rx_stats.total_rdma_reply += writelist;
1163 return rpclen + xdr_align_size(writelist);
1167 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1169 struct xdr_stream *xdr = &rep->rr_stream;
1170 u32 writelist, replychunk;
1172 /* Decode the chunk lists */
1173 if (decode_read_list(xdr))
1175 if (decode_write_list(xdr, &writelist))
1177 if (decode_reply_chunk(xdr, &replychunk))
1180 /* RDMA_NOMSG sanity checks */
1181 if (unlikely(writelist))
1183 if (unlikely(!replychunk))
1186 /* Reply chunk buffer already is the reply vector */
1187 r_xprt->rx_stats.total_rdma_reply += replychunk;
1192 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1193 struct rpc_rqst *rqst)
1195 struct xdr_stream *xdr = &rep->rr_stream;
1198 p = xdr_inline_decode(xdr, sizeof(*p));
1204 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1207 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1208 rqst->rq_task->tk_pid, __func__,
1209 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1212 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1213 rqst->rq_task->tk_pid, __func__);
1216 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1217 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1220 r_xprt->rx_stats.bad_reply_count++;
1224 /* Perform XID lookup, reconstruction of the RPC reply, and
1225 * RPC completion while holding the transport lock to ensure
1226 * the rep, rqst, and rq_task pointers remain stable.
1228 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1230 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1231 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1232 struct rpc_rqst *rqst = rep->rr_rqst;
1236 xprt->reestablish_timeout = 0;
1238 switch (rep->rr_proc) {
1240 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1243 status = rpcrdma_decode_nomsg(r_xprt, rep);
1246 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1255 spin_lock(&xprt->recv_lock);
1257 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
1258 if (xprt->cwnd > cwnd)
1259 xprt_release_rqst_cong(rqst->rq_task);
1261 xprt_complete_rqst(rqst->rq_task, status);
1262 xprt_unpin_rqst(rqst);
1263 spin_unlock(&xprt->recv_lock);
1266 /* If the incoming reply terminated a pending RPC, the next
1267 * RPC call will post a replacement receive buffer as it is
1271 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1272 rqst->rq_task->tk_pid, __func__, be32_to_cpu(rep->rr_proc));
1273 r_xprt->rx_stats.bad_reply_count++;
1278 /* Reply handling runs in the poll worker thread. Anything that
1279 * might wait is deferred to a separate workqueue.
1281 void rpcrdma_deferred_completion(struct work_struct *work)
1283 struct rpcrdma_rep *rep =
1284 container_of(work, struct rpcrdma_rep, rr_work);
1285 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1286 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1288 /* Invalidate and unmap the data payloads before waking
1289 * the waiting application. This guarantees the memory
1290 * regions are properly fenced from the server before the
1291 * application accesses the data. It also ensures proper
1292 * send flow control: waking the next RPC waits until this
1293 * RPC has relinquished all its Send Queue entries.
1295 rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
1296 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered);
1298 rpcrdma_complete_rqst(rep);
1301 /* Process received RPC/RDMA messages.
1303 * Errors must result in the RPC task either being awakened, or
1304 * allowed to timeout, to discover the errors at that time.
1306 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1308 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1309 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1310 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1311 struct rpcrdma_req *req;
1312 struct rpc_rqst *rqst;
1316 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
1318 if (rep->rr_hdrbuf.head[0].iov_len == 0)
1321 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1322 rep->rr_hdrbuf.head[0].iov_base);
1324 /* Fixed transport header fields */
1325 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1327 goto out_shortreply;
1329 rep->rr_vers = *p++;
1330 credits = be32_to_cpu(*p++);
1331 rep->rr_proc = *p++;
1333 if (rep->rr_vers != rpcrdma_version)
1334 goto out_badversion;
1336 if (rpcrdma_is_bcall(r_xprt, rep))
1339 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1340 * get context for handling any incoming chunks.
1342 spin_lock(&xprt->recv_lock);
1343 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1346 xprt_pin_rqst(rqst);
1349 credits = 1; /* don't deadlock */
1350 else if (credits > buf->rb_max_requests)
1351 credits = buf->rb_max_requests;
1352 buf->rb_credits = credits;
1354 spin_unlock(&xprt->recv_lock);
1356 req = rpcr_to_rdmar(rqst);
1357 req->rl_reply = rep;
1358 rep->rr_rqst = rqst;
1360 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
1361 __func__, rep, req, be32_to_cpu(rep->rr_xid));
1363 if (list_empty(&req->rl_registered))
1364 rpcrdma_complete_rqst(rep);
1366 queue_work(rpcrdma_receive_wq, &rep->rr_work);
1370 rpcrdma_recv_buffer_put(rep);
1371 if (r_xprt->rx_ep.rep_connected == 1) {
1372 r_xprt->rx_ep.rep_connected = -EIO;
1373 rpcrdma_conn_func(&r_xprt->rx_ep);
1378 dprintk("RPC: %s: invalid version %d\n",
1379 __func__, be32_to_cpu(rep->rr_vers));
1382 /* The RPC transaction has already been terminated, or the header
1386 spin_unlock(&xprt->recv_lock);
1387 dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
1388 __func__, be32_to_cpu(rep->rr_xid));
1392 dprintk("RPC: %s: short/invalid reply\n", __func__);
1394 /* If no pending RPC transaction was matched, post a replacement
1395 * receive buffer before returning.
1398 r_xprt->rx_stats.bad_reply_count++;
1399 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1400 rpcrdma_recv_buffer_put(rep);