2 * Copyright (c) 2014-2017 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 * This file contains the guts of the RPC RDMA protocol, and
45 * does marshaling/unmarshaling, etc. It is also where interfacing
46 * to the Linux RPC framework lives.
49 #include "xprt_rdma.h"
51 #include <linux/highmem.h>
53 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
54 # define RPCDBG_FACILITY RPCDBG_TRANS
57 static const char transfertypes[][12] = {
58 "inline", /* no chunks */
59 "read list", /* some argument via rdma read */
60 "*read list", /* entire request via rdma read */
61 "write list", /* some result via rdma write */
62 "reply chunk" /* entire reply via rdma write */
65 /* Returns size of largest RPC-over-RDMA header in a Call message
67 * The largest Call header contains a full-size Read list and a
68 * minimal Reply chunk.
70 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
74 /* Fixed header fields and list discriminators */
75 size = RPCRDMA_HDRLEN_MIN;
77 /* Maximum Read list size */
78 maxsegs += 2; /* segment for head and tail buffers */
79 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
81 /* Minimal Read chunk size */
82 size += sizeof(__be32); /* segment count */
83 size += rpcrdma_segment_maxsz * sizeof(__be32);
84 size += sizeof(__be32); /* list discriminator */
86 dprintk("RPC: %s: max call header size = %u\n",
91 /* Returns size of largest RPC-over-RDMA header in a Reply message
93 * There is only one Write list or one Reply chunk per Reply
94 * message. The larger list is the Write list.
96 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
100 /* Fixed header fields and list discriminators */
101 size = RPCRDMA_HDRLEN_MIN;
103 /* Maximum Write list size */
104 maxsegs += 2; /* segment for head and tail buffers */
105 size = sizeof(__be32); /* segment count */
106 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
107 size += sizeof(__be32); /* list discriminator */
109 dprintk("RPC: %s: max reply header size = %u\n",
114 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
116 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
117 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
118 unsigned int maxsegs = ia->ri_max_segs;
120 ia->ri_max_inline_write = cdata->inline_wsize -
121 rpcrdma_max_call_header_size(maxsegs);
122 ia->ri_max_inline_read = cdata->inline_rsize -
123 rpcrdma_max_reply_header_size(maxsegs);
126 /* The client can send a request inline as long as the RPCRDMA header
127 * plus the RPC call fit under the transport's inline limit. If the
128 * combined call message size exceeds that limit, the client must use
129 * a Read chunk for this operation.
131 * A Read chunk is also required if sending the RPC call inline would
132 * exceed this device's max_sge limit.
134 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
135 struct rpc_rqst *rqst)
137 struct xdr_buf *xdr = &rqst->rq_snd_buf;
138 unsigned int count, remaining, offset;
140 if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
144 remaining = xdr->page_len;
145 offset = offset_in_page(xdr->page_base);
146 count = RPCRDMA_MIN_SEND_SGES;
148 remaining -= min_t(unsigned int,
149 PAGE_SIZE - offset, remaining);
151 if (++count > r_xprt->rx_ia.ri_max_send_sges)
159 /* The client can't know how large the actual reply will be. Thus it
160 * plans for the largest possible reply for that particular ULP
161 * operation. If the maximum combined reply message size exceeds that
162 * limit, the client must provide a write list or a reply chunk for
165 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
166 struct rpc_rqst *rqst)
168 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
170 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
173 /* Split @vec on page boundaries into SGEs. FMR registers pages, not
174 * a byte range. Other modes coalesce these SGEs into a single MR
177 * Returns pointer to next available SGE, and bumps the total number
180 static struct rpcrdma_mr_seg *
181 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
184 u32 remaining, page_offset;
187 base = vec->iov_base;
188 page_offset = offset_in_page(base);
189 remaining = vec->iov_len;
192 seg->mr_offset = base;
193 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
194 remaining -= seg->mr_len;
203 /* Convert @xdrbuf into SGEs no larger than a page each. As they
204 * are registered, these SGEs are then coalesced into RDMA segments
205 * when the selected memreg mode supports it.
207 * Returns positive number of SGEs consumed, or a negative errno.
211 rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
212 unsigned int pos, enum rpcrdma_chunktype type,
213 struct rpcrdma_mr_seg *seg)
215 unsigned long page_base;
217 struct page **ppages;
221 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
223 len = xdrbuf->page_len;
224 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
225 page_base = offset_in_page(xdrbuf->page_base);
227 if (unlikely(!*ppages)) {
228 /* XXX: Certain upper layer operations do
229 * not provide receive buffer pages.
231 *ppages = alloc_page(GFP_ATOMIC);
235 seg->mr_page = *ppages;
236 seg->mr_offset = (char *)page_base;
237 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
245 /* When encoding a Read chunk, the tail iovec contains an
246 * XDR pad and may be omitted.
248 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
251 /* When encoding a Write chunk, some servers need to see an
252 * extra segment for non-XDR-aligned Write chunks. The upper
253 * layer provides space in the tail iovec that may be used
256 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
259 if (xdrbuf->tail[0].iov_len)
260 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
263 if (unlikely(n > RPCRDMA_MAX_SEGS))
269 encode_item_present(struct xdr_stream *xdr)
273 p = xdr_reserve_space(xdr, sizeof(*p));
282 encode_item_not_present(struct xdr_stream *xdr)
286 p = xdr_reserve_space(xdr, sizeof(*p));
295 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
297 *iptr++ = cpu_to_be32(mr->mr_handle);
298 *iptr++ = cpu_to_be32(mr->mr_length);
299 xdr_encode_hyper(iptr, mr->mr_offset);
303 encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
307 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
311 xdr_encode_rdma_segment(p, mr);
316 encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
321 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
325 *p++ = xdr_one; /* Item present */
326 *p++ = cpu_to_be32(position);
327 xdr_encode_rdma_segment(p, mr);
331 /* Register and XDR encode the Read list. Supports encoding a list of read
332 * segments that belong to a single read chunk.
334 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
336 * Read chunklist (a linked list):
337 * N elements, position P (same P for all chunks of same arg!):
338 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
340 * Returns zero on success, or a negative errno if a failure occurred.
341 * @xdr is advanced to the next position in the stream.
343 * Only a single @pos value is currently supported.
346 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
347 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
349 struct xdr_stream *xdr = &req->rl_stream;
350 struct rpcrdma_mr_seg *seg;
351 struct rpcrdma_mr *mr;
355 pos = rqst->rq_snd_buf.head[0].iov_len;
356 if (rtype == rpcrdma_areadch)
358 seg = req->rl_segments;
359 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
365 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
369 rpcrdma_mr_push(mr, &req->rl_registered);
371 if (encode_read_segment(xdr, mr, pos) < 0)
374 trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
375 r_xprt->rx_stats.read_chunk_count++;
376 nsegs -= mr->mr_nents;
382 if (PTR_ERR(seg) == -EAGAIN)
383 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
387 /* Register and XDR encode the Write list. Supports encoding a list
388 * containing one array of plain segments that belong to a single
391 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
393 * Write chunklist (a list of (one) counted array):
395 * 1 - N - HLOO - HLOO - ... - HLOO - 0
397 * Returns zero on success, or a negative errno if a failure occurred.
398 * @xdr is advanced to the next position in the stream.
400 * Only a single Write chunk is currently supported.
403 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
404 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
406 struct xdr_stream *xdr = &req->rl_stream;
407 struct rpcrdma_mr_seg *seg;
408 struct rpcrdma_mr *mr;
412 seg = req->rl_segments;
413 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
414 rqst->rq_rcv_buf.head[0].iov_len,
419 if (encode_item_present(xdr) < 0)
421 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
422 if (unlikely(!segcount))
424 /* Actual value encoded below */
428 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
432 rpcrdma_mr_push(mr, &req->rl_registered);
434 if (encode_rdma_segment(xdr, mr) < 0)
437 trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
438 r_xprt->rx_stats.write_chunk_count++;
439 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
441 nsegs -= mr->mr_nents;
444 /* Update count of segments in this Write chunk */
445 *segcount = cpu_to_be32(nchunks);
450 if (PTR_ERR(seg) == -EAGAIN)
451 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
455 /* Register and XDR encode the Reply chunk. Supports encoding an array
456 * of plain segments that belong to a single write (reply) chunk.
458 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
460 * Reply chunk (a counted array):
462 * 1 - N - HLOO - HLOO - ... - HLOO
464 * Returns zero on success, or a negative errno if a failure occurred.
465 * @xdr is advanced to the next position in the stream.
468 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
469 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
471 struct xdr_stream *xdr = &req->rl_stream;
472 struct rpcrdma_mr_seg *seg;
473 struct rpcrdma_mr *mr;
477 seg = req->rl_segments;
478 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
482 if (encode_item_present(xdr) < 0)
484 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
485 if (unlikely(!segcount))
487 /* Actual value encoded below */
491 seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
495 rpcrdma_mr_push(mr, &req->rl_registered);
497 if (encode_rdma_segment(xdr, mr) < 0)
500 trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
501 r_xprt->rx_stats.reply_chunk_count++;
502 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
504 nsegs -= mr->mr_nents;
507 /* Update count of segments in the Reply chunk */
508 *segcount = cpu_to_be32(nchunks);
513 if (PTR_ERR(seg) == -EAGAIN)
514 xprt_wait_for_buffer_space(rqst->rq_task, NULL);
519 * rpcrdma_unmap_sendctx - DMA-unmap Send buffers
520 * @sc: sendctx containing SGEs to unmap
524 rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
526 struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
530 /* The first two SGEs contain the transport header and
531 * the inline buffer. These are always left mapped so
532 * they can be cheaply re-used.
534 sge = &sc->sc_sges[2];
535 for (count = sc->sc_unmap_count; count; ++sge, --count)
536 ib_dma_unmap_page(ia->ri_device,
537 sge->addr, sge->length, DMA_TO_DEVICE);
539 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
540 smp_mb__after_atomic();
541 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
545 /* Prepare an SGE for the RPC-over-RDMA transport header.
548 rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
551 struct rpcrdma_sendctx *sc = req->rl_sendctx;
552 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
553 struct ib_sge *sge = sc->sc_sges;
555 if (!rpcrdma_dma_map_regbuf(ia, rb))
557 sge->addr = rdmab_addr(rb);
559 sge->lkey = rdmab_lkey(rb);
561 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
562 sge->length, DMA_TO_DEVICE);
567 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
571 /* Prepare the Send SGEs. The head and tail iovec, and each entry
572 * in the page list, gets its own SGE.
575 rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
576 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
578 struct rpcrdma_sendctx *sc = req->rl_sendctx;
579 unsigned int sge_no, page_base, len, remaining;
580 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
581 struct ib_device *device = ia->ri_device;
582 struct ib_sge *sge = sc->sc_sges;
583 u32 lkey = ia->ri_pd->local_dma_lkey;
584 struct page *page, **ppages;
586 /* The head iovec is straightforward, as it is already
587 * DMA-mapped. Sync the content that has changed.
589 if (!rpcrdma_dma_map_regbuf(ia, rb))
592 sge[sge_no].addr = rdmab_addr(rb);
593 sge[sge_no].length = xdr->head[0].iov_len;
594 sge[sge_no].lkey = rdmab_lkey(rb);
595 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
596 sge[sge_no].length, DMA_TO_DEVICE);
598 /* If there is a Read chunk, the page list is being handled
599 * via explicit RDMA, and thus is skipped here. However, the
600 * tail iovec may include an XDR pad for the page list, as
601 * well as additional content, and may not reside in the
602 * same page as the head iovec.
604 if (rtype == rpcrdma_readch) {
605 len = xdr->tail[0].iov_len;
607 /* Do not include the tail if it is only an XDR pad */
611 page = virt_to_page(xdr->tail[0].iov_base);
612 page_base = offset_in_page(xdr->tail[0].iov_base);
614 /* If the content in the page list is an odd length,
615 * xdr_write_pages() has added a pad at the beginning
616 * of the tail iovec. Force the tail's non-pad content
617 * to land at the next XDR position in the Send message.
619 page_base += len & 3;
624 /* If there is a page list present, temporarily DMA map
625 * and prepare an SGE for each page to be sent.
628 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
629 page_base = offset_in_page(xdr->page_base);
630 remaining = xdr->page_len;
633 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
634 goto out_mapping_overflow;
636 len = min_t(u32, PAGE_SIZE - page_base, remaining);
637 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
640 if (ib_dma_mapping_error(device, sge[sge_no].addr))
641 goto out_mapping_err;
642 sge[sge_no].length = len;
643 sge[sge_no].lkey = lkey;
645 sc->sc_unmap_count++;
652 /* The tail iovec is not always constructed in the same
653 * page where the head iovec resides (see, for example,
654 * gss_wrap_req_priv). To neatly accommodate that case,
655 * DMA map it separately.
657 if (xdr->tail[0].iov_len) {
658 page = virt_to_page(xdr->tail[0].iov_base);
659 page_base = offset_in_page(xdr->tail[0].iov_base);
660 len = xdr->tail[0].iov_len;
664 sge[sge_no].addr = ib_dma_map_page(device, page,
667 if (ib_dma_mapping_error(device, sge[sge_no].addr))
668 goto out_mapping_err;
669 sge[sge_no].length = len;
670 sge[sge_no].lkey = lkey;
671 sc->sc_unmap_count++;
675 sc->sc_wr.num_sge += sge_no;
676 if (sc->sc_unmap_count)
677 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
681 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
684 out_mapping_overflow:
685 rpcrdma_unmap_sendctx(sc);
686 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
690 rpcrdma_unmap_sendctx(sc);
691 pr_err("rpcrdma: Send mapping error\n");
696 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
697 * @r_xprt: controlling transport
698 * @req: context of RPC Call being marshalled
699 * @hdrlen: size of transport header, in bytes
700 * @xdr: xdr_buf containing RPC Call
701 * @rtype: chunk type being encoded
703 * Returns 0 on success; otherwise a negative errno is returned.
706 rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
707 struct rpcrdma_req *req, u32 hdrlen,
708 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
710 req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
711 if (!req->rl_sendctx)
713 req->rl_sendctx->sc_wr.num_sge = 0;
714 req->rl_sendctx->sc_unmap_count = 0;
715 req->rl_sendctx->sc_req = req;
716 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
718 if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
721 if (rtype != rpcrdma_areadch)
722 if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
729 * rpcrdma_marshal_req - Marshal and send one RPC request
730 * @r_xprt: controlling transport
731 * @rqst: RPC request to be marshaled
733 * For the RPC in "rqst", this function:
734 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
735 * - Registers Read, Write, and Reply chunks
736 * - Constructs the transport header
737 * - Posts a Send WR to send the transport header and request
740 * %0 if the RPC was sent successfully,
741 * %-ENOTCONN if the connection was lost,
742 * %-EAGAIN if the caller should call again with the same arguments,
743 * %-ENOBUFS if the caller should call again after a delay,
744 * %-EMSGSIZE if the transport header is too small,
745 * %-EIO if a permanent problem occurred while marshaling.
748 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
750 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
751 struct xdr_stream *xdr = &req->rl_stream;
752 enum rpcrdma_chunktype rtype, wtype;
757 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
758 xdr_init_encode(xdr, &req->rl_hdrbuf,
759 req->rl_rdmabuf->rg_base);
761 /* Fixed header fields */
763 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
767 *p++ = rpcrdma_version;
768 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
770 /* When the ULP employs a GSS flavor that guarantees integrity
771 * or privacy, direct data placement of individual data items
774 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
775 RPCAUTH_AUTH_DATATOUCH);
778 * Chunks needed for results?
780 * o If the expected result is under the inline threshold, all ops
782 * o Large read ops return data as write chunk(s), header as
784 * o Large non-read ops return as a single reply chunk.
786 if (rpcrdma_results_inline(r_xprt, rqst))
787 wtype = rpcrdma_noch;
788 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
789 wtype = rpcrdma_writech;
791 wtype = rpcrdma_replych;
794 * Chunks needed for arguments?
796 * o If the total request is under the inline threshold, all ops
797 * are sent as inline.
798 * o Large write ops transmit data as read chunk(s), header as
800 * o Large non-write ops are sent with the entire message as a
801 * single read chunk (protocol 0-position special case).
803 * This assumes that the upper layer does not present a request
804 * that both has a data payload, and whose non-data arguments
805 * by themselves are larger than the inline threshold.
807 if (rpcrdma_args_inline(r_xprt, rqst)) {
809 rtype = rpcrdma_noch;
810 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
812 rtype = rpcrdma_readch;
814 r_xprt->rx_stats.nomsg_call_count++;
816 rtype = rpcrdma_areadch;
819 /* If this is a retransmit, discard previously registered
820 * chunks. Very likely the connection has been replaced,
821 * so these registrations are invalid and unusable.
823 while (unlikely(!list_empty(&req->rl_registered))) {
824 struct rpcrdma_mr *mr;
826 mr = rpcrdma_mr_pop(&req->rl_registered);
827 rpcrdma_mr_defer_recovery(mr);
830 /* This implementation supports the following combinations
831 * of chunk lists in one RPC-over-RDMA Call message:
836 * - Read list + Reply chunk
838 * It might not yet support the following combinations:
840 * - Read list + Write list
842 * It does not support the following combinations:
844 * - Write list + Reply chunk
845 * - Read list + Write list + Reply chunk
847 * This implementation supports only a single chunk in each
848 * Read or Write list. Thus for example the client cannot
849 * send a Call message with a Position Zero Read chunk and a
850 * regular Read chunk at the same time.
852 if (rtype != rpcrdma_noch) {
853 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
857 ret = encode_item_not_present(xdr);
861 if (wtype == rpcrdma_writech) {
862 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
866 ret = encode_item_not_present(xdr);
870 if (wtype != rpcrdma_replych)
871 ret = encode_item_not_present(xdr);
873 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
877 trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
879 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
880 &rqst->rq_snd_buf, rtype);
886 r_xprt->rx_stats.failed_marshal_count++;
891 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
892 * @rqst: controlling RPC request
893 * @srcp: points to RPC message payload in receive buffer
894 * @copy_len: remaining length of receive buffer content
895 * @pad: Write chunk pad bytes needed (zero for pure inline)
897 * The upper layer has set the maximum number of bytes it can
898 * receive in each component of rq_rcv_buf. These values are set in
899 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
901 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
902 * many cases this function simply updates iov_base pointers in
903 * rq_rcv_buf to point directly to the received reply data, to
904 * avoid copying reply data.
906 * Returns the count of bytes which had to be memcopied.
909 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
911 unsigned long fixup_copy_count;
912 int i, npages, curlen;
914 struct page **ppages;
917 /* The head iovec is redirected to the RPC reply message
918 * in the receive buffer, to avoid a memcopy.
920 rqst->rq_rcv_buf.head[0].iov_base = srcp;
921 rqst->rq_private_buf.head[0].iov_base = srcp;
923 /* The contents of the receive buffer that follow
924 * head.iov_len bytes are copied into the page list.
926 curlen = rqst->rq_rcv_buf.head[0].iov_len;
927 if (curlen > copy_len)
929 trace_xprtrdma_fixup(rqst, copy_len, curlen);
933 ppages = rqst->rq_rcv_buf.pages +
934 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
935 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
936 fixup_copy_count = 0;
937 if (copy_len && rqst->rq_rcv_buf.page_len) {
940 pagelist_len = rqst->rq_rcv_buf.page_len;
941 if (pagelist_len > copy_len)
942 pagelist_len = copy_len;
943 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
944 for (i = 0; i < npages; i++) {
945 curlen = PAGE_SIZE - page_base;
946 if (curlen > pagelist_len)
947 curlen = pagelist_len;
949 trace_xprtrdma_fixup_pg(rqst, i, srcp,
951 destp = kmap_atomic(ppages[i]);
952 memcpy(destp + page_base, srcp, curlen);
953 flush_dcache_page(ppages[i]);
954 kunmap_atomic(destp);
957 fixup_copy_count += curlen;
958 pagelist_len -= curlen;
964 /* Implicit padding for the last segment in a Write
965 * chunk is inserted inline at the front of the tail
966 * iovec. The upper layer ignores the content of
967 * the pad. Simply ensure inline content in the tail
968 * that follows the Write chunk is properly aligned.
974 /* The tail iovec is redirected to the remaining data
975 * in the receive buffer, to avoid a memcopy.
977 if (copy_len || pad) {
978 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
979 rqst->rq_private_buf.tail[0].iov_base = srcp;
982 return fixup_copy_count;
985 /* By convention, backchannel calls arrive via rdma_msg type
986 * messages, and never populate the chunk lists. This makes
987 * the RPC/RDMA header small and fixed in size, so it is
988 * straightforward to check the RPC header's direction field.
991 rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
992 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
994 struct xdr_stream *xdr = &rep->rr_stream;
997 if (rep->rr_proc != rdma_msg)
1000 /* Peek at stream contents without advancing. */
1001 p = xdr_inline_decode(xdr, 0);
1004 if (*p++ != xdr_zero)
1006 if (*p++ != xdr_zero)
1008 if (*p++ != xdr_zero)
1012 if (*p++ != rep->rr_xid)
1014 if (*p != cpu_to_be32(RPC_CALL))
1017 /* Now that we are sure this is a backchannel call,
1018 * advance to the RPC header.
1020 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1024 rpcrdma_bc_receive_call(r_xprt, rep);
1028 pr_warn("RPC/RDMA short backward direction call\n");
1029 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1030 xprt_disconnect_done(&r_xprt->rx_xprt);
1033 #else /* CONFIG_SUNRPC_BACKCHANNEL */
1037 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1039 static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
1045 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
1049 handle = be32_to_cpup(p++);
1050 *length = be32_to_cpup(p++);
1051 xdr_decode_hyper(p, &offset);
1053 trace_xprtrdma_decode_seg(handle, *length, offset);
1057 static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1059 u32 segcount, seglength;
1062 p = xdr_inline_decode(xdr, sizeof(*p));
1067 segcount = be32_to_cpup(p);
1068 while (segcount--) {
1069 if (decode_rdma_segment(xdr, &seglength))
1071 *length += seglength;
1077 /* In RPC-over-RDMA Version One replies, a Read list is never
1078 * expected. This decoder is a stub that returns an error if
1079 * a Read list is present.
1081 static int decode_read_list(struct xdr_stream *xdr)
1085 p = xdr_inline_decode(xdr, sizeof(*p));
1088 if (unlikely(*p != xdr_zero))
1093 /* Supports only one Write chunk in the Write list
1095 static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1104 p = xdr_inline_decode(xdr, sizeof(*p));
1112 if (decode_write_chunk(xdr, &chunklen))
1114 *length += chunklen;
1120 static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1124 p = xdr_inline_decode(xdr, sizeof(*p));
1130 if (decode_write_chunk(xdr, length))
1136 rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1137 struct rpc_rqst *rqst)
1139 struct xdr_stream *xdr = &rep->rr_stream;
1140 u32 writelist, replychunk, rpclen;
1143 /* Decode the chunk lists */
1144 if (decode_read_list(xdr))
1146 if (decode_write_list(xdr, &writelist))
1148 if (decode_reply_chunk(xdr, &replychunk))
1151 /* RDMA_MSG sanity checks */
1152 if (unlikely(replychunk))
1155 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1156 base = (char *)xdr_inline_decode(xdr, 0);
1157 rpclen = xdr_stream_remaining(xdr);
1158 r_xprt->rx_stats.fixup_copy_count +=
1159 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1161 r_xprt->rx_stats.total_rdma_reply += writelist;
1162 return rpclen + xdr_align_size(writelist);
1166 rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1168 struct xdr_stream *xdr = &rep->rr_stream;
1169 u32 writelist, replychunk;
1171 /* Decode the chunk lists */
1172 if (decode_read_list(xdr))
1174 if (decode_write_list(xdr, &writelist))
1176 if (decode_reply_chunk(xdr, &replychunk))
1179 /* RDMA_NOMSG sanity checks */
1180 if (unlikely(writelist))
1182 if (unlikely(!replychunk))
1185 /* Reply chunk buffer already is the reply vector */
1186 r_xprt->rx_stats.total_rdma_reply += replychunk;
1191 rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1192 struct rpc_rqst *rqst)
1194 struct xdr_stream *xdr = &rep->rr_stream;
1197 p = xdr_inline_decode(xdr, sizeof(*p));
1203 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1206 dprintk("RPC: %5u: %s: server reports version error (%u-%u)\n",
1207 rqst->rq_task->tk_pid, __func__,
1208 be32_to_cpup(p), be32_to_cpu(*(p + 1)));
1211 dprintk("RPC: %5u: %s: server reports header decoding error\n",
1212 rqst->rq_task->tk_pid, __func__);
1215 dprintk("RPC: %5u: %s: server reports unrecognized error %d\n",
1216 rqst->rq_task->tk_pid, __func__, be32_to_cpup(p));
1219 r_xprt->rx_stats.bad_reply_count++;
1223 /* Perform XID lookup, reconstruction of the RPC reply, and
1224 * RPC completion while holding the transport lock to ensure
1225 * the rep, rqst, and rq_task pointers remain stable.
1227 void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1229 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1230 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1231 struct rpc_rqst *rqst = rep->rr_rqst;
1235 xprt->reestablish_timeout = 0;
1237 switch (rep->rr_proc) {
1239 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1242 status = rpcrdma_decode_nomsg(r_xprt, rep);
1245 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1254 spin_lock(&xprt->recv_lock);
1256 xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
1257 if (xprt->cwnd > cwnd)
1258 xprt_release_rqst_cong(rqst->rq_task);
1260 xprt_complete_rqst(rqst->rq_task, status);
1261 xprt_unpin_rqst(rqst);
1262 spin_unlock(&xprt->recv_lock);
1265 /* If the incoming reply terminated a pending RPC, the next
1266 * RPC call will post a replacement receive buffer as it is
1270 trace_xprtrdma_reply_hdr(rep);
1271 r_xprt->rx_stats.bad_reply_count++;
1276 void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1278 /* Invalidate and unmap the data payloads before waking
1279 * the waiting application. This guarantees the memory
1280 * regions are properly fenced from the server before the
1281 * application accesses the data. It also ensures proper
1282 * send flow control: waking the next RPC waits until this
1283 * RPC has relinquished all its Send Queue entries.
1285 if (!list_empty(&req->rl_registered))
1286 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1287 &req->rl_registered);
1289 /* Ensure that any DMA mapped pages associated with
1290 * the Send of the RPC Call have been unmapped before
1291 * allowing the RPC to complete. This protects argument
1292 * memory not controlled by the RPC client from being
1293 * re-used before we're done with it.
1295 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1296 r_xprt->rx_stats.reply_waits_for_send++;
1297 out_of_line_wait_on_bit(&req->rl_flags,
1298 RPCRDMA_REQ_F_TX_RESOURCES,
1300 TASK_UNINTERRUPTIBLE);
1304 /* Reply handling runs in the poll worker thread. Anything that
1305 * might wait is deferred to a separate workqueue.
1307 void rpcrdma_deferred_completion(struct work_struct *work)
1309 struct rpcrdma_rep *rep =
1310 container_of(work, struct rpcrdma_rep, rr_work);
1311 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1312 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1314 trace_xprtrdma_defer_cmp(rep);
1315 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
1316 r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
1317 rpcrdma_release_rqst(r_xprt, req);
1318 rpcrdma_complete_rqst(rep);
1321 /* Process received RPC/RDMA messages.
1323 * Errors must result in the RPC task either being awakened, or
1324 * allowed to timeout, to discover the errors at that time.
1326 void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1328 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1329 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1330 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1331 struct rpcrdma_req *req;
1332 struct rpc_rqst *rqst;
1336 if (rep->rr_hdrbuf.head[0].iov_len == 0)
1339 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
1340 rep->rr_hdrbuf.head[0].iov_base);
1342 /* Fixed transport header fields */
1343 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
1345 goto out_shortreply;
1347 rep->rr_vers = *p++;
1348 credits = be32_to_cpu(*p++);
1349 rep->rr_proc = *p++;
1351 if (rep->rr_vers != rpcrdma_version)
1352 goto out_badversion;
1354 if (rpcrdma_is_bcall(r_xprt, rep))
1357 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1358 * get context for handling any incoming chunks.
1360 spin_lock(&xprt->recv_lock);
1361 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1364 xprt_pin_rqst(rqst);
1367 credits = 1; /* don't deadlock */
1368 else if (credits > buf->rb_max_requests)
1369 credits = buf->rb_max_requests;
1370 buf->rb_credits = credits;
1372 spin_unlock(&xprt->recv_lock);
1374 req = rpcr_to_rdmar(rqst);
1375 req->rl_reply = rep;
1376 rep->rr_rqst = rqst;
1377 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1379 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
1381 queue_work(rpcrdma_receive_wq, &rep->rr_work);
1385 rpcrdma_recv_buffer_put(rep);
1386 if (r_xprt->rx_ep.rep_connected == 1) {
1387 r_xprt->rx_ep.rep_connected = -EIO;
1388 rpcrdma_conn_func(&r_xprt->rx_ep);
1393 trace_xprtrdma_reply_vers(rep);
1396 /* The RPC transaction has already been terminated, or the header
1400 spin_unlock(&xprt->recv_lock);
1401 trace_xprtrdma_reply_rqst(rep);
1405 trace_xprtrdma_reply_short(rep);
1407 /* If no pending RPC transaction was matched, post a replacement
1408 * receive buffer before returning.
1411 r_xprt->rx_stats.bad_reply_count++;
1412 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1413 rpcrdma_recv_buffer_put(rep);