Merge tag 'fsnotify_for_v5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / net / sunrpc / xprtrdma / rpc_rdma.c
CommitLineData
a2268cfb 1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
f58851e6 2/*
62b56a67 3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
e9601828
TT
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42/*
43 * rpc_rdma.c
44 *
45 * This file contains the guts of the RPC RDMA protocol, and
46 * does marshaling/unmarshaling, etc. It is also where interfacing
47 * to the Linux RPC framework lives.
f58851e6
TT
48 */
49
e9601828
TT
50#include <linux/highmem.h>
51
bd2abef3
CL
52#include <linux/sunrpc/svc_rdma.h>
53
b6e717cb
CL
54#include "xprt_rdma.h"
55#include <trace/events/rpcrdma.h>
56
f895b252 57#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
e9601828
TT
58# define RPCDBG_FACILITY RPCDBG_TRANS
59#endif
60
302d3deb
CL
61/* Returns size of largest RPC-over-RDMA header in a Call message
62 *
94f58c58
CL
63 * The largest Call header contains a full-size Read list and a
64 * minimal Reply chunk.
302d3deb
CL
65 */
66static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
67{
68 unsigned int size;
69
70 /* Fixed header fields and list discriminators */
71 size = RPCRDMA_HDRLEN_MIN;
72
73 /* Maximum Read list size */
2232df5e 74 size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
302d3deb 75
94f58c58
CL
76 /* Minimal Read chunk size */
77 size += sizeof(__be32); /* segment count */
2232df5e 78 size += rpcrdma_segment_maxsz * sizeof(__be32);
94f58c58
CL
79 size += sizeof(__be32); /* list discriminator */
80
302d3deb
CL
81 dprintk("RPC: %s: max call header size = %u\n",
82 __func__, size);
83 return size;
84}
85
86/* Returns size of largest RPC-over-RDMA header in a Reply message
87 *
88 * There is only one Write list or one Reply chunk per Reply
89 * message. The larger list is the Write list.
90 */
91static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
92{
93 unsigned int size;
94
95 /* Fixed header fields and list discriminators */
96 size = RPCRDMA_HDRLEN_MIN;
97
98 /* Maximum Write list size */
302d3deb 99 size = sizeof(__be32); /* segment count */
2232df5e 100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
302d3deb
CL
101 size += sizeof(__be32); /* list discriminator */
102
103 dprintk("RPC: %s: max reply header size = %u\n",
104 __func__, size);
105 return size;
106}
107
94087e97
CL
108/**
109 * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
110 * @r_xprt: transport instance to initialize
111 *
112 * The max_inline fields contain the maximum size of an RPC message
113 * so the marshaling code doesn't have to repeat this calculation
114 * for every RPC.
115 */
87cfb9a0 116void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
302d3deb 117{
94087e97
CL
118 unsigned int maxsegs = r_xprt->rx_ia.ri_max_segs;
119 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
120
121 ep->rep_max_inline_send =
122 ep->rep_inline_send - rpcrdma_max_call_header_size(maxsegs);
123 ep->rep_max_inline_recv =
124 ep->rep_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
302d3deb 125}
e9601828 126
5457ced0
CL
127/* The client can send a request inline as long as the RPCRDMA header
128 * plus the RPC call fit under the transport's inline limit. If the
129 * combined call message size exceeds that limit, the client must use
16f906d6
CL
130 * a Read chunk for this operation.
131 *
132 * A Read chunk is also required if sending the RPC call inline would
133 * exceed this device's max_sge limit.
5457ced0 134 */
302d3deb
CL
135static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
136 struct rpc_rqst *rqst)
5457ced0 137{
16f906d6
CL
138 struct xdr_buf *xdr = &rqst->rq_snd_buf;
139 unsigned int count, remaining, offset;
140
94087e97 141 if (xdr->len > r_xprt->rx_ep.rep_max_inline_send)
16f906d6
CL
142 return false;
143
144 if (xdr->page_len) {
145 remaining = xdr->page_len;
d933cc32 146 offset = offset_in_page(xdr->page_base);
1179e2c2 147 count = RPCRDMA_MIN_SEND_SGES;
16f906d6
CL
148 while (remaining) {
149 remaining -= min_t(unsigned int,
150 PAGE_SIZE - offset, remaining);
151 offset = 0;
152 if (++count > r_xprt->rx_ia.ri_max_send_sges)
153 return false;
154 }
155 }
156
157 return true;
5457ced0
CL
158}
159
160/* The client can't know how large the actual reply will be. Thus it
161 * plans for the largest possible reply for that particular ULP
162 * operation. If the maximum combined reply message size exceeds that
163 * limit, the client must provide a write list or a reply chunk for
164 * this request.
165 */
302d3deb
CL
166static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
167 struct rpc_rqst *rqst)
5457ced0 168{
94087e97 169 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep.rep_max_inline_recv;
5457ced0
CL
170}
171
d4550bbe
CL
172/* The client is required to provide a Reply chunk if the maximum
173 * size of the non-payload part of the RPC Reply is larger than
174 * the inline threshold.
175 */
176static bool
177rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
178 const struct rpc_rqst *rqst)
179{
180 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
d4550bbe 181
94087e97
CL
182 return (buf->head[0].iov_len + buf->tail[0].iov_len) <
183 r_xprt->rx_ep.rep_max_inline_recv;
d4550bbe
CL
184}
185
28d9d56f
CL
186/* Split @vec on page boundaries into SGEs. FMR registers pages, not
187 * a byte range. Other modes coalesce these SGEs into a single MR
188 * when they can.
189 *
190 * Returns pointer to next available SGE, and bumps the total number
191 * of SGEs consumed.
821c791a 192 */
28d9d56f
CL
193static struct rpcrdma_mr_seg *
194rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
195 unsigned int *n)
821c791a 196{
28d9d56f 197 u32 remaining, page_offset;
821c791a
CL
198 char *base;
199
200 base = vec->iov_base;
201 page_offset = offset_in_page(base);
202 remaining = vec->iov_len;
28d9d56f
CL
203 while (remaining) {
204 seg->mr_page = NULL;
205 seg->mr_offset = base;
206 seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
207 remaining -= seg->mr_len;
208 base += seg->mr_len;
209 ++seg;
210 ++(*n);
821c791a
CL
211 page_offset = 0;
212 }
28d9d56f 213 return seg;
821c791a
CL
214}
215
28d9d56f
CL
216/* Convert @xdrbuf into SGEs no larger than a page each. As they
217 * are registered, these SGEs are then coalesced into RDMA segments
218 * when the selected memreg mode supports it.
e9601828 219 *
28d9d56f 220 * Returns positive number of SGEs consumed, or a negative errno.
e9601828
TT
221 */
222
223static int
b5f0afbe
CL
224rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
225 unsigned int pos, enum rpcrdma_chunktype type,
226 struct rpcrdma_mr_seg *seg)
e9601828 227{
28d9d56f
CL
228 unsigned long page_base;
229 unsigned int len, n;
bd7ea31b 230 struct page **ppages;
e9601828 231
5ab81428 232 n = 0;
28d9d56f
CL
233 if (pos == 0)
234 seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
e9601828 235
bd7ea31b
TT
236 len = xdrbuf->page_len;
237 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
d933cc32 238 page_base = offset_in_page(xdrbuf->page_base);
28d9d56f 239 while (len) {
15303d9e
CL
240 /* ACL likes to be lazy in allocating pages - ACLs
241 * are small by default but can get huge.
242 */
243 if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
244 if (!*ppages)
52db6f9a 245 *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
28d9d56f 246 if (!*ppages)
a8f688ec 247 return -ENOBUFS;
196c6998 248 }
28d9d56f
CL
249 seg->mr_page = *ppages;
250 seg->mr_offset = (char *)page_base;
251 seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
252 len -= seg->mr_len;
253 ++ppages;
254 ++seg;
e9601828 255 ++n;
28d9d56f 256 page_base = 0;
e9601828
TT
257 }
258
24abdf1b
CL
259 /* When encoding a Read chunk, the tail iovec contains an
260 * XDR pad and may be omitted.
261 */
b5f0afbe 262 if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
28d9d56f 263 goto out;
677eb17e 264
b5f0afbe
CL
265 /* When encoding a Write chunk, some servers need to see an
266 * extra segment for non-XDR-aligned Write chunks. The upper
267 * layer provides space in the tail iovec that may be used
268 * for this purpose.
c8b920bb 269 */
b5f0afbe 270 if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
28d9d56f 271 goto out;
c8b920bb 272
28d9d56f
CL
273 if (xdrbuf->tail[0].iov_len)
274 seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
e9601828 275
28d9d56f
CL
276out:
277 if (unlikely(n > RPCRDMA_MAX_SEGS))
278 return -EIO;
e9601828
TT
279 return n;
280}
281
39f4cd9e
CL
282static inline int
283encode_item_present(struct xdr_stream *xdr)
284{
285 __be32 *p;
286
287 p = xdr_reserve_space(xdr, sizeof(*p));
288 if (unlikely(!p))
289 return -EMSGSIZE;
290
291 *p = xdr_one;
292 return 0;
293}
294
295static inline int
296encode_item_not_present(struct xdr_stream *xdr)
297{
298 __be32 *p;
299
300 p = xdr_reserve_space(xdr, sizeof(*p));
301 if (unlikely(!p))
302 return -EMSGSIZE;
5ab81428 303
39f4cd9e
CL
304 *p = xdr_zero;
305 return 0;
e9601828
TT
306}
307
39f4cd9e 308static void
96ceddea 309xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
94f58c58 310{
96ceddea
CL
311 *iptr++ = cpu_to_be32(mr->mr_handle);
312 *iptr++ = cpu_to_be32(mr->mr_length);
313 xdr_encode_hyper(iptr, mr->mr_offset);
39f4cd9e
CL
314}
315
316static int
96ceddea 317encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
39f4cd9e
CL
318{
319 __be32 *p;
320
321 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
322 if (unlikely(!p))
323 return -EMSGSIZE;
324
96ceddea 325 xdr_encode_rdma_segment(p, mr);
39f4cd9e
CL
326 return 0;
327}
328
329static int
96ceddea 330encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
39f4cd9e
CL
331 u32 position)
332{
333 __be32 *p;
334
335 p = xdr_reserve_space(xdr, 6 * sizeof(*p));
336 if (unlikely(!p))
337 return -EMSGSIZE;
338
339 *p++ = xdr_one; /* Item present */
340 *p++ = cpu_to_be32(position);
96ceddea 341 xdr_encode_rdma_segment(p, mr);
39f4cd9e 342 return 0;
94f58c58
CL
343}
344
39f4cd9e 345/* Register and XDR encode the Read list. Supports encoding a list of read
94f58c58
CL
346 * segments that belong to a single read chunk.
347 *
348 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
349 *
350 * Read chunklist (a linked list):
351 * N elements, position P (same P for all chunks of same arg!):
352 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
353 *
39f4cd9e
CL
354 * Returns zero on success, or a negative errno if a failure occurred.
355 * @xdr is advanced to the next position in the stream.
356 *
357 * Only a single @pos value is currently supported.
94f58c58 358 */
39f4cd9e
CL
359static noinline int
360rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
361 struct rpc_rqst *rqst, enum rpcrdma_chunktype rtype)
94f58c58 362{
39f4cd9e 363 struct xdr_stream *xdr = &req->rl_stream;
5ab81428 364 struct rpcrdma_mr_seg *seg;
96ceddea 365 struct rpcrdma_mr *mr;
94f58c58 366 unsigned int pos;
6748b0ca 367 int nsegs;
94f58c58
CL
368
369 pos = rqst->rq_snd_buf.head[0].iov_len;
370 if (rtype == rpcrdma_areadch)
371 pos = 0;
5ab81428 372 seg = req->rl_segments;
b5f0afbe
CL
373 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
374 rtype, seg);
94f58c58 375 if (nsegs < 0)
39f4cd9e 376 return nsegs;
94f58c58
CL
377
378 do {
0a93fbcb 379 seg = frwr_map(r_xprt, seg, nsegs, false, rqst->rq_xid, &mr);
6748b0ca 380 if (IS_ERR(seg))
ed3aa742 381 return PTR_ERR(seg);
96ceddea 382 rpcrdma_mr_push(mr, &req->rl_registered);
94f58c58 383
96ceddea 384 if (encode_read_segment(xdr, mr, pos) < 0)
39f4cd9e 385 return -EMSGSIZE;
94f58c58 386
aba11831 387 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
94f58c58 388 r_xprt->rx_stats.read_chunk_count++;
96ceddea 389 nsegs -= mr->mr_nents;
94f58c58 390 } while (nsegs);
94f58c58 391
39f4cd9e 392 return 0;
94f58c58
CL
393}
394
39f4cd9e
CL
395/* Register and XDR encode the Write list. Supports encoding a list
396 * containing one array of plain segments that belong to a single
397 * write chunk.
94f58c58
CL
398 *
399 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
400 *
401 * Write chunklist (a list of (one) counted array):
402 * N elements:
403 * 1 - N - HLOO - HLOO - ... - HLOO - 0
404 *
39f4cd9e
CL
405 * Returns zero on success, or a negative errno if a failure occurred.
406 * @xdr is advanced to the next position in the stream.
407 *
408 * Only a single Write chunk is currently supported.
94f58c58 409 */
39f4cd9e 410static noinline int
94f58c58 411rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
39f4cd9e 412 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
94f58c58 413{
39f4cd9e 414 struct xdr_stream *xdr = &req->rl_stream;
5ab81428 415 struct rpcrdma_mr_seg *seg;
96ceddea 416 struct rpcrdma_mr *mr;
6748b0ca 417 int nsegs, nchunks;
94f58c58
CL
418 __be32 *segcount;
419
5ab81428 420 seg = req->rl_segments;
b5f0afbe 421 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
94f58c58 422 rqst->rq_rcv_buf.head[0].iov_len,
b5f0afbe 423 wtype, seg);
94f58c58 424 if (nsegs < 0)
39f4cd9e 425 return nsegs;
94f58c58 426
39f4cd9e
CL
427 if (encode_item_present(xdr) < 0)
428 return -EMSGSIZE;
429 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
430 if (unlikely(!segcount))
431 return -EMSGSIZE;
432 /* Actual value encoded below */
94f58c58
CL
433
434 nchunks = 0;
435 do {
0a93fbcb 436 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
6748b0ca 437 if (IS_ERR(seg))
ed3aa742 438 return PTR_ERR(seg);
96ceddea 439 rpcrdma_mr_push(mr, &req->rl_registered);
94f58c58 440
96ceddea 441 if (encode_rdma_segment(xdr, mr) < 0)
39f4cd9e 442 return -EMSGSIZE;
94f58c58 443
aba11831 444 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
94f58c58 445 r_xprt->rx_stats.write_chunk_count++;
aae2349c 446 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
94f58c58 447 nchunks++;
96ceddea 448 nsegs -= mr->mr_nents;
94f58c58 449 } while (nsegs);
94f58c58
CL
450
451 /* Update count of segments in this Write chunk */
452 *segcount = cpu_to_be32(nchunks);
453
39f4cd9e 454 return 0;
94f58c58
CL
455}
456
39f4cd9e
CL
457/* Register and XDR encode the Reply chunk. Supports encoding an array
458 * of plain segments that belong to a single write (reply) chunk.
94f58c58
CL
459 *
460 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
461 *
462 * Reply chunk (a counted array):
463 * N elements:
464 * 1 - N - HLOO - HLOO - ... - HLOO
465 *
39f4cd9e
CL
466 * Returns zero on success, or a negative errno if a failure occurred.
467 * @xdr is advanced to the next position in the stream.
94f58c58 468 */
39f4cd9e
CL
469static noinline int
470rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
471 struct rpc_rqst *rqst, enum rpcrdma_chunktype wtype)
94f58c58 472{
39f4cd9e 473 struct xdr_stream *xdr = &req->rl_stream;
5ab81428 474 struct rpcrdma_mr_seg *seg;
96ceddea 475 struct rpcrdma_mr *mr;
6748b0ca 476 int nsegs, nchunks;
94f58c58
CL
477 __be32 *segcount;
478
5ab81428 479 seg = req->rl_segments;
b5f0afbe 480 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
94f58c58 481 if (nsegs < 0)
39f4cd9e 482 return nsegs;
94f58c58 483
39f4cd9e
CL
484 if (encode_item_present(xdr) < 0)
485 return -EMSGSIZE;
486 segcount = xdr_reserve_space(xdr, sizeof(*segcount));
487 if (unlikely(!segcount))
488 return -EMSGSIZE;
489 /* Actual value encoded below */
94f58c58
CL
490
491 nchunks = 0;
492 do {
0a93fbcb 493 seg = frwr_map(r_xprt, seg, nsegs, true, rqst->rq_xid, &mr);
6748b0ca 494 if (IS_ERR(seg))
ed3aa742 495 return PTR_ERR(seg);
96ceddea 496 rpcrdma_mr_push(mr, &req->rl_registered);
94f58c58 497
96ceddea 498 if (encode_rdma_segment(xdr, mr) < 0)
39f4cd9e 499 return -EMSGSIZE;
94f58c58 500
aba11831 501 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
94f58c58 502 r_xprt->rx_stats.reply_chunk_count++;
aae2349c 503 r_xprt->rx_stats.total_rdma_request += mr->mr_length;
94f58c58 504 nchunks++;
96ceddea 505 nsegs -= mr->mr_nents;
94f58c58 506 } while (nsegs);
94f58c58
CL
507
508 /* Update count of segments in the Reply chunk */
509 *segcount = cpu_to_be32(nchunks);
510
39f4cd9e 511 return 0;
94f58c58
CL
512}
513
394b2c77 514/**
dbcc53a5 515 * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
ae72950a 516 * @sc: sendctx containing SGEs to unmap
394b2c77
CL
517 *
518 */
dbcc53a5 519void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
394b2c77
CL
520{
521 struct ib_sge *sge;
394b2c77
CL
522
523 /* The first two SGEs contain the transport header and
524 * the inline buffer. These are always left mapped so
525 * they can be cheaply re-used.
526 */
dbcc53a5
CL
527 for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
528 ++sge, --sc->sc_unmap_count)
529 ib_dma_unmap_page(sc->sc_device, sge->addr, sge->length,
530 DMA_TO_DEVICE);
01bb35c8 531
dbcc53a5
CL
532 if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES,
533 &sc->sc_req->rl_flags))
01bb35c8 534 wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
394b2c77
CL
535}
536
a062a2a3 537/* Prepare an SGE for the RPC-over-RDMA transport header.
e9601828 538 */
d2832af3
CL
539static bool rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
540 struct rpcrdma_req *req, u32 len)
e9601828 541{
ae72950a 542 struct rpcrdma_sendctx *sc = req->rl_sendctx;
655fec69 543 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
ae72950a 544 struct ib_sge *sge = sc->sc_sges;
655fec69 545
d2832af3 546 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
a062a2a3
CL
547 goto out_regbuf;
548 sge->addr = rdmab_addr(rb);
655fec69 549 sge->length = len;
a062a2a3 550 sge->lkey = rdmab_lkey(rb);
655fec69 551
d2832af3
CL
552 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
553 DMA_TO_DEVICE);
ae72950a 554 sc->sc_wr.num_sge++;
655fec69 555 return true;
857f9aca
CL
556
557out_regbuf:
558 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
559 return false;
655fec69
CL
560}
561
562/* Prepare the Send SGEs. The head and tail iovec, and each entry
563 * in the page list, gets its own SGE.
564 */
d2832af3
CL
565static bool rpcrdma_prepare_msg_sges(struct rpcrdma_xprt *r_xprt,
566 struct rpcrdma_req *req,
567 struct xdr_buf *xdr,
568 enum rpcrdma_chunktype rtype)
655fec69 569{
ae72950a 570 struct rpcrdma_sendctx *sc = req->rl_sendctx;
655fec69
CL
571 unsigned int sge_no, page_base, len, remaining;
572 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
ae72950a 573 struct ib_sge *sge = sc->sc_sges;
655fec69
CL
574 struct page *page, **ppages;
575
576 /* The head iovec is straightforward, as it is already
577 * DMA-mapped. Sync the content that has changed.
578 */
d2832af3 579 if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
857f9aca 580 goto out_regbuf;
dbcc53a5 581 sc->sc_device = rdmab_device(rb);
655fec69
CL
582 sge_no = 1;
583 sge[sge_no].addr = rdmab_addr(rb);
584 sge[sge_no].length = xdr->head[0].iov_len;
585 sge[sge_no].lkey = rdmab_lkey(rb);
91a10c52 586 ib_dma_sync_single_for_device(rdmab_device(rb), sge[sge_no].addr,
655fec69
CL
587 sge[sge_no].length, DMA_TO_DEVICE);
588
589 /* If there is a Read chunk, the page list is being handled
590 * via explicit RDMA, and thus is skipped here. However, the
591 * tail iovec may include an XDR pad for the page list, as
592 * well as additional content, and may not reside in the
593 * same page as the head iovec.
594 */
595 if (rtype == rpcrdma_readch) {
596 len = xdr->tail[0].iov_len;
e9601828 597
655fec69
CL
598 /* Do not include the tail if it is only an XDR pad */
599 if (len < 4)
600 goto out;
e9601828 601
655fec69 602 page = virt_to_page(xdr->tail[0].iov_base);
d933cc32 603 page_base = offset_in_page(xdr->tail[0].iov_base);
e9601828 604
655fec69
CL
605 /* If the content in the page list is an odd length,
606 * xdr_write_pages() has added a pad at the beginning
607 * of the tail iovec. Force the tail's non-pad content
608 * to land at the next XDR position in the Send message.
609 */
610 page_base += len & 3;
611 len -= len & 3;
612 goto map_tail;
613 }
b38ab40a 614
655fec69
CL
615 /* If there is a page list present, temporarily DMA map
616 * and prepare an SGE for each page to be sent.
617 */
618 if (xdr->page_len) {
619 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
d933cc32 620 page_base = offset_in_page(xdr->page_base);
655fec69
CL
621 remaining = xdr->page_len;
622 while (remaining) {
623 sge_no++;
624 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
625 goto out_mapping_overflow;
626
627 len = min_t(u32, PAGE_SIZE - page_base, remaining);
d2832af3
CL
628 sge[sge_no].addr =
629 ib_dma_map_page(rdmab_device(rb), *ppages,
630 page_base, len, DMA_TO_DEVICE);
631 if (ib_dma_mapping_error(rdmab_device(rb),
632 sge[sge_no].addr))
655fec69
CL
633 goto out_mapping_err;
634 sge[sge_no].length = len;
d2832af3 635 sge[sge_no].lkey = rdmab_lkey(rb);
655fec69 636
ae72950a 637 sc->sc_unmap_count++;
655fec69
CL
638 ppages++;
639 remaining -= len;
640 page_base = 0;
b38ab40a 641 }
b38ab40a 642 }
655fec69
CL
643
644 /* The tail iovec is not always constructed in the same
645 * page where the head iovec resides (see, for example,
646 * gss_wrap_req_priv). To neatly accommodate that case,
647 * DMA map it separately.
648 */
649 if (xdr->tail[0].iov_len) {
650 page = virt_to_page(xdr->tail[0].iov_base);
d933cc32 651 page_base = offset_in_page(xdr->tail[0].iov_base);
655fec69
CL
652 len = xdr->tail[0].iov_len;
653
654map_tail:
655 sge_no++;
d2832af3
CL
656 sge[sge_no].addr =
657 ib_dma_map_page(rdmab_device(rb), page, page_base, len,
658 DMA_TO_DEVICE);
659 if (ib_dma_mapping_error(rdmab_device(rb), sge[sge_no].addr))
655fec69
CL
660 goto out_mapping_err;
661 sge[sge_no].length = len;
d2832af3 662 sge[sge_no].lkey = rdmab_lkey(rb);
ae72950a 663 sc->sc_unmap_count++;
e9601828 664 }
655fec69
CL
665
666out:
ae72950a 667 sc->sc_wr.num_sge += sge_no;
01bb35c8
CL
668 if (sc->sc_unmap_count)
669 __set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
655fec69
CL
670 return true;
671
857f9aca
CL
672out_regbuf:
673 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
674 return false;
675
655fec69 676out_mapping_overflow:
dbcc53a5 677 rpcrdma_sendctx_unmap(sc);
655fec69
CL
678 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
679 return false;
680
681out_mapping_err:
dbcc53a5 682 rpcrdma_sendctx_unmap(sc);
53b2c1cb 683 trace_xprtrdma_dma_maperr(sge[sge_no].addr);
655fec69
CL
684 return false;
685}
686
857f9aca
CL
687/**
688 * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
689 * @r_xprt: controlling transport
690 * @req: context of RPC Call being marshalled
691 * @hdrlen: size of transport header, in bytes
692 * @xdr: xdr_buf containing RPC Call
693 * @rtype: chunk type being encoded
694 *
695 * Returns 0 on success; otherwise a negative errno is returned.
696 */
697int
698rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
699 struct rpcrdma_req *req, u32 hdrlen,
700 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
655fec69 701{
dbcc53a5 702 req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
ae72950a 703 if (!req->rl_sendctx)
2fad6592 704 return -EAGAIN;
ae72950a
CL
705 req->rl_sendctx->sc_wr.num_sge = 0;
706 req->rl_sendctx->sc_unmap_count = 0;
01bb35c8
CL
707 req->rl_sendctx->sc_req = req;
708 __clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
655fec69 709
d2832af3 710 if (!rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen))
857f9aca 711 return -EIO;
655fec69
CL
712
713 if (rtype != rpcrdma_areadch)
d2832af3 714 if (!rpcrdma_prepare_msg_sges(r_xprt, req, xdr, rtype))
857f9aca 715 return -EIO;
655fec69 716
857f9aca 717 return 0;
655fec69
CL
718}
719
09e60641
CL
720/**
721 * rpcrdma_marshal_req - Marshal and send one RPC request
722 * @r_xprt: controlling transport
723 * @rqst: RPC request to be marshaled
724 *
725 * For the RPC in "rqst", this function:
726 * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
727 * - Registers Read, Write, and Reply chunks
728 * - Constructs the transport header
729 * - Posts a Send WR to send the transport header and request
e9601828 730 *
09e60641
CL
731 * Returns:
732 * %0 if the RPC was sent successfully,
733 * %-ENOTCONN if the connection was lost,
9e679d5e
CL
734 * %-EAGAIN if the caller should call again with the same arguments,
735 * %-ENOBUFS if the caller should call again after a delay,
7a80f3f0 736 * %-EMSGSIZE if the transport header is too small,
09e60641 737 * %-EIO if a permanent problem occurred while marshaling.
e9601828 738 */
e9601828 739int
09e60641 740rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
e9601828 741{
e9601828 742 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
7a80f3f0 743 struct xdr_stream *xdr = &req->rl_stream;
e2377945 744 enum rpcrdma_chunktype rtype, wtype;
65b80179 745 bool ddp_allowed;
7a80f3f0 746 __be32 *p;
39f4cd9e 747 int ret;
e9601828 748
7a80f3f0 749 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
8cec3dba
CL
750 xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
751 rqst);
7a80f3f0
CL
752
753 /* Fixed header fields */
39f4cd9e 754 ret = -EMSGSIZE;
7a80f3f0
CL
755 p = xdr_reserve_space(xdr, 4 * sizeof(*p));
756 if (!p)
757 goto out_err;
758 *p++ = rqst->rq_xid;
759 *p++ = rpcrdma_version;
760 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
e9601828 761
65b80179
CL
762 /* When the ULP employs a GSS flavor that guarantees integrity
763 * or privacy, direct data placement of individual data items
764 * is not allowed.
765 */
766 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
767 RPCAUTH_AUTH_DATATOUCH);
768
e9601828
TT
769 /*
770 * Chunks needed for results?
771 *
772 * o If the expected result is under the inline threshold, all ops
33943b29 773 * return as inline.
cce6deeb
CL
774 * o Large read ops return data as write chunk(s), header as
775 * inline.
e9601828 776 * o Large non-read ops return as a single reply chunk.
e9601828 777 */
cce6deeb 778 if (rpcrdma_results_inline(r_xprt, rqst))
02eb57d8 779 wtype = rpcrdma_noch;
d4550bbe
CL
780 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
781 rpcrdma_nonpayload_inline(r_xprt, rqst))
cce6deeb 782 wtype = rpcrdma_writech;
e9601828 783 else
e2377945 784 wtype = rpcrdma_replych;
e9601828
TT
785
786 /*
787 * Chunks needed for arguments?
788 *
789 * o If the total request is under the inline threshold, all ops
790 * are sent as inline.
e9601828
TT
791 * o Large write ops transmit data as read chunk(s), header as
792 * inline.
2fcc213a
CL
793 * o Large non-write ops are sent with the entire message as a
794 * single read chunk (protocol 0-position special case).
e9601828 795 *
2fcc213a
CL
796 * This assumes that the upper layer does not present a request
797 * that both has a data payload, and whose non-data arguments
798 * by themselves are larger than the inline threshold.
e9601828 799 */
302d3deb 800 if (rpcrdma_args_inline(r_xprt, rqst)) {
7a80f3f0 801 *p++ = rdma_msg;
e2377945 802 rtype = rpcrdma_noch;
65b80179 803 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
7a80f3f0 804 *p++ = rdma_msg;
e2377945 805 rtype = rpcrdma_readch;
2fcc213a 806 } else {
860477d1 807 r_xprt->rx_stats.nomsg_call_count++;
7a80f3f0 808 *p++ = rdma_nomsg;
2fcc213a 809 rtype = rpcrdma_areadch;
2fcc213a 810 }
e9601828 811
a2b6470b
CL
812 /* If this is a retransmit, discard previously registered
813 * chunks. Very likely the connection has been replaced,
814 * so these registrations are invalid and unusable.
815 */
816 while (unlikely(!list_empty(&req->rl_registered))) {
96ceddea 817 struct rpcrdma_mr *mr;
a2b6470b 818
96ceddea 819 mr = rpcrdma_mr_pop(&req->rl_registered);
61da886b 820 rpcrdma_mr_recycle(mr);
a2b6470b
CL
821 }
822
94f58c58
CL
823 /* This implementation supports the following combinations
824 * of chunk lists in one RPC-over-RDMA Call message:
825 *
826 * - Read list
827 * - Write list
828 * - Reply chunk
829 * - Read list + Reply chunk
830 *
831 * It might not yet support the following combinations:
832 *
833 * - Read list + Write list
834 *
835 * It does not support the following combinations:
836 *
837 * - Write list + Reply chunk
838 * - Read list + Write list + Reply chunk
839 *
840 * This implementation supports only a single chunk in each
841 * Read or Write list. Thus for example the client cannot
842 * send a Call message with a Position Zero Read chunk and a
843 * regular Read chunk at the same time.
e9601828 844 */
39f4cd9e
CL
845 if (rtype != rpcrdma_noch) {
846 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
847 if (ret)
848 goto out_err;
849 }
850 ret = encode_item_not_present(xdr);
851 if (ret)
18c0fb31 852 goto out_err;
39f4cd9e
CL
853
854 if (wtype == rpcrdma_writech) {
855 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
856 if (ret)
857 goto out_err;
858 }
859 ret = encode_item_not_present(xdr);
860 if (ret)
18c0fb31 861 goto out_err;
39f4cd9e
CL
862
863 if (wtype != rpcrdma_replych)
864 ret = encode_item_not_present(xdr);
865 else
866 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
867 if (ret)
18c0fb31 868 goto out_err;
e9601828 869
ab03eff5 870 trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
e9601828 871
857f9aca
CL
872 ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
873 &rqst->rq_snd_buf, rtype);
874 if (ret)
18c0fb31 875 goto out_err;
e9601828 876 return 0;
302d3deb 877
18c0fb31 878out_err:
17e4c443 879 trace_xprtrdma_marshal_failed(rqst, ret);
ed3aa742
CL
880 switch (ret) {
881 case -EAGAIN:
c544577d 882 xprt_wait_for_buffer_space(rqst->rq_xprt);
ed3aa742
CL
883 break;
884 case -ENOBUFS:
885 break;
886 default:
887 r_xprt->rx_stats.failed_marshal_count++;
888 }
39f4cd9e 889 return ret;
e9601828
TT
890}
891
cb0ae1fb
CL
892/**
893 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
894 * @rqst: controlling RPC request
895 * @srcp: points to RPC message payload in receive buffer
896 * @copy_len: remaining length of receive buffer content
897 * @pad: Write chunk pad bytes needed (zero for pure inline)
898 *
899 * The upper layer has set the maximum number of bytes it can
900 * receive in each component of rq_rcv_buf. These values are set in
901 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
cfabe2c6
CL
902 *
903 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
904 * many cases this function simply updates iov_base pointers in
905 * rq_rcv_buf to point directly to the received reply data, to
906 * avoid copying reply data.
64695bde
CL
907 *
908 * Returns the count of bytes which had to be memcopied.
e9601828 909 */
64695bde 910static unsigned long
9191ca3b 911rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
e9601828 912{
64695bde
CL
913 unsigned long fixup_copy_count;
914 int i, npages, curlen;
e9601828 915 char *destp;
bd7ea31b
TT
916 struct page **ppages;
917 int page_base;
e9601828 918
cb0ae1fb
CL
919 /* The head iovec is redirected to the RPC reply message
920 * in the receive buffer, to avoid a memcopy.
921 */
922 rqst->rq_rcv_buf.head[0].iov_base = srcp;
cfabe2c6 923 rqst->rq_private_buf.head[0].iov_base = srcp;
cb0ae1fb
CL
924
925 /* The contents of the receive buffer that follow
926 * head.iov_len bytes are copied into the page list.
927 */
e9601828 928 curlen = rqst->rq_rcv_buf.head[0].iov_len;
cb0ae1fb 929 if (curlen > copy_len)
e9601828 930 curlen = copy_len;
e11b7c96 931 trace_xprtrdma_fixup(rqst, copy_len, curlen);
e9601828
TT
932 srcp += curlen;
933 copy_len -= curlen;
934
d933cc32
CL
935 ppages = rqst->rq_rcv_buf.pages +
936 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
937 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
64695bde 938 fixup_copy_count = 0;
e9601828 939 if (copy_len && rqst->rq_rcv_buf.page_len) {
80414abc
CL
940 int pagelist_len;
941
942 pagelist_len = rqst->rq_rcv_buf.page_len;
943 if (pagelist_len > copy_len)
944 pagelist_len = copy_len;
945 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
64695bde 946 for (i = 0; i < npages; i++) {
bd7ea31b 947 curlen = PAGE_SIZE - page_base;
80414abc
CL
948 if (curlen > pagelist_len)
949 curlen = pagelist_len;
950
e11b7c96
CL
951 trace_xprtrdma_fixup_pg(rqst, i, srcp,
952 copy_len, curlen);
b8541786 953 destp = kmap_atomic(ppages[i]);
bd7ea31b
TT
954 memcpy(destp + page_base, srcp, curlen);
955 flush_dcache_page(ppages[i]);
b8541786 956 kunmap_atomic(destp);
e9601828
TT
957 srcp += curlen;
958 copy_len -= curlen;
64695bde 959 fixup_copy_count += curlen;
80414abc
CL
960 pagelist_len -= curlen;
961 if (!pagelist_len)
e9601828 962 break;
bd7ea31b 963 page_base = 0;
e9601828 964 }
e9601828 965
cb0ae1fb
CL
966 /* Implicit padding for the last segment in a Write
967 * chunk is inserted inline at the front of the tail
968 * iovec. The upper layer ignores the content of
969 * the pad. Simply ensure inline content in the tail
970 * that follows the Write chunk is properly aligned.
971 */
972 if (pad)
973 srcp -= pad;
9191ca3b
TT
974 }
975
cb0ae1fb
CL
976 /* The tail iovec is redirected to the remaining data
977 * in the receive buffer, to avoid a memcopy.
978 */
cfabe2c6 979 if (copy_len || pad) {
cb0ae1fb 980 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
cfabe2c6
CL
981 rqst->rq_private_buf.tail[0].iov_base = srcp;
982 }
cb0ae1fb 983
64695bde 984 return fixup_copy_count;
e9601828
TT
985}
986
63cae470
CL
987/* By convention, backchannel calls arrive via rdma_msg type
988 * messages, and never populate the chunk lists. This makes
989 * the RPC/RDMA header small and fixed in size, so it is
990 * straightforward to check the RPC header's direction field.
991 */
992static bool
5381e0ec 993rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
41c8f70f 994#if defined(CONFIG_SUNRPC_BACKCHANNEL)
63cae470 995{
41c8f70f
CL
996 struct xdr_stream *xdr = &rep->rr_stream;
997 __be32 *p;
63cae470 998
5381e0ec 999 if (rep->rr_proc != rdma_msg)
63cae470 1000 return false;
41c8f70f
CL
1001
1002 /* Peek at stream contents without advancing. */
1003 p = xdr_inline_decode(xdr, 0);
1004
1005 /* Chunk lists */
1006 if (*p++ != xdr_zero)
63cae470 1007 return false;
41c8f70f 1008 if (*p++ != xdr_zero)
63cae470 1009 return false;
41c8f70f 1010 if (*p++ != xdr_zero)
63cae470
CL
1011 return false;
1012
41c8f70f 1013 /* RPC header */
5381e0ec 1014 if (*p++ != rep->rr_xid)
63cae470 1015 return false;
41c8f70f 1016 if (*p != cpu_to_be32(RPC_CALL))
63cae470
CL
1017 return false;
1018
41c8f70f
CL
1019 /* Now that we are sure this is a backchannel call,
1020 * advance to the RPC header.
1021 */
1022 p = xdr_inline_decode(xdr, 3 * sizeof(*p));
1023 if (unlikely(!p))
1024 goto out_short;
1025
1026 rpcrdma_bc_receive_call(r_xprt, rep);
1027 return true;
1028
1029out_short:
1030 pr_warn("RPC/RDMA short backward direction call\n");
63cae470
CL
1031 return true;
1032}
41c8f70f
CL
1033#else /* CONFIG_SUNRPC_BACKCHANNEL */
1034{
1035 return false;
1036}
63cae470
CL
1037#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1038
264b0cdb 1039static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
07ff2dd5 1040{
e11b7c96
CL
1041 u32 handle;
1042 u64 offset;
07ff2dd5
CL
1043 __be32 *p;
1044
264b0cdb 1045 p = xdr_inline_decode(xdr, 4 * sizeof(*p));
07ff2dd5
CL
1046 if (unlikely(!p))
1047 return -EIO;
1048
e11b7c96
CL
1049 handle = be32_to_cpup(p++);
1050 *length = be32_to_cpup(p++);
1051 xdr_decode_hyper(p, &offset);
264b0cdb 1052
e11b7c96 1053 trace_xprtrdma_decode_seg(handle, *length, offset);
264b0cdb
CL
1054 return 0;
1055}
07ff2dd5 1056
264b0cdb
CL
1057static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
1058{
1059 u32 segcount, seglength;
1060 __be32 *p;
1061
1062 p = xdr_inline_decode(xdr, sizeof(*p));
1063 if (unlikely(!p))
1064 return -EIO;
07ff2dd5 1065
264b0cdb
CL
1066 *length = 0;
1067 segcount = be32_to_cpup(p);
1068 while (segcount--) {
1069 if (decode_rdma_segment(xdr, &seglength))
07ff2dd5 1070 return -EIO;
264b0cdb
CL
1071 *length += seglength;
1072 }
07ff2dd5 1073
264b0cdb
CL
1074 return 0;
1075}
07ff2dd5 1076
264b0cdb
CL
1077/* In RPC-over-RDMA Version One replies, a Read list is never
1078 * expected. This decoder is a stub that returns an error if
1079 * a Read list is present.
1080 */
1081static int decode_read_list(struct xdr_stream *xdr)
1082{
1083 __be32 *p;
1084
1085 p = xdr_inline_decode(xdr, sizeof(*p));
1086 if (unlikely(!p))
1087 return -EIO;
1088 if (unlikely(*p != xdr_zero))
1089 return -EIO;
1090 return 0;
1091}
1092
1093/* Supports only one Write chunk in the Write list
1094 */
1095static int decode_write_list(struct xdr_stream *xdr, u32 *length)
1096{
1097 u32 chunklen;
1098 bool first;
1099 __be32 *p;
1100
1101 *length = 0;
1102 first = true;
1103 do {
07ff2dd5 1104 p = xdr_inline_decode(xdr, sizeof(*p));
264b0cdb
CL
1105 if (unlikely(!p))
1106 return -EIO;
1107 if (*p == xdr_zero)
1108 break;
1109 if (!first)
07ff2dd5
CL
1110 return -EIO;
1111
264b0cdb 1112 if (decode_write_chunk(xdr, &chunklen))
07ff2dd5 1113 return -EIO;
264b0cdb
CL
1114 *length += chunklen;
1115 first = false;
1116 } while (true);
1117 return 0;
1118}
1119
1120static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
1121{
1122 __be32 *p;
1123
1124 p = xdr_inline_decode(xdr, sizeof(*p));
1125 if (unlikely(!p))
1126 return -EIO;
1127
1128 *length = 0;
1129 if (*p != xdr_zero)
1130 if (decode_write_chunk(xdr, length))
1131 return -EIO;
1132 return 0;
1133}
07ff2dd5 1134
264b0cdb
CL
1135static int
1136rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1137 struct rpc_rqst *rqst)
1138{
1139 struct xdr_stream *xdr = &rep->rr_stream;
1140 u32 writelist, replychunk, rpclen;
1141 char *base;
1142
1143 /* Decode the chunk lists */
1144 if (decode_read_list(xdr))
1145 return -EIO;
1146 if (decode_write_list(xdr, &writelist))
1147 return -EIO;
1148 if (decode_reply_chunk(xdr, &replychunk))
1149 return -EIO;
1150
1151 /* RDMA_MSG sanity checks */
1152 if (unlikely(replychunk))
1153 return -EIO;
1154
1155 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1156 base = (char *)xdr_inline_decode(xdr, 0);
1157 rpclen = xdr_stream_remaining(xdr);
07ff2dd5 1158 r_xprt->rx_stats.fixup_copy_count +=
264b0cdb 1159 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
07ff2dd5 1160
264b0cdb
CL
1161 r_xprt->rx_stats.total_rdma_reply += writelist;
1162 return rpclen + xdr_align_size(writelist);
07ff2dd5
CL
1163}
1164
1165static noinline int
1166rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
1167{
1168 struct xdr_stream *xdr = &rep->rr_stream;
264b0cdb 1169 u32 writelist, replychunk;
07ff2dd5 1170
264b0cdb
CL
1171 /* Decode the chunk lists */
1172 if (decode_read_list(xdr))
07ff2dd5 1173 return -EIO;
264b0cdb 1174 if (decode_write_list(xdr, &writelist))
07ff2dd5 1175 return -EIO;
264b0cdb 1176 if (decode_reply_chunk(xdr, &replychunk))
07ff2dd5
CL
1177 return -EIO;
1178
264b0cdb
CL
1179 /* RDMA_NOMSG sanity checks */
1180 if (unlikely(writelist))
1181 return -EIO;
1182 if (unlikely(!replychunk))
07ff2dd5 1183 return -EIO;
07ff2dd5 1184
264b0cdb
CL
1185 /* Reply chunk buffer already is the reply vector */
1186 r_xprt->rx_stats.total_rdma_reply += replychunk;
1187 return replychunk;
07ff2dd5
CL
1188}
1189
1190static noinline int
1191rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
1192 struct rpc_rqst *rqst)
1193{
1194 struct xdr_stream *xdr = &rep->rr_stream;
1195 __be32 *p;
1196
1197 p = xdr_inline_decode(xdr, sizeof(*p));
1198 if (unlikely(!p))
1199 return -EIO;
1200
1201 switch (*p) {
1202 case err_vers:
1203 p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1204 if (!p)
1205 break;
ddbb347f
CL
1206 dprintk("RPC: %s: server reports "
1207 "version error (%u-%u), xid %08x\n", __func__,
1208 be32_to_cpup(p), be32_to_cpu(*(p + 1)),
1209 be32_to_cpu(rep->rr_xid));
07ff2dd5
CL
1210 break;
1211 case err_chunk:
ddbb347f
CL
1212 dprintk("RPC: %s: server reports "
1213 "header decoding error, xid %08x\n", __func__,
1214 be32_to_cpu(rep->rr_xid));
07ff2dd5
CL
1215 break;
1216 default:
ddbb347f
CL
1217 dprintk("RPC: %s: server reports "
1218 "unrecognized error %d, xid %08x\n", __func__,
1219 be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
07ff2dd5
CL
1220 }
1221
1222 r_xprt->rx_stats.bad_reply_count++;
1223 return -EREMOTEIO;
1224}
1225
e1352c96
CL
1226/* Perform XID lookup, reconstruction of the RPC reply, and
1227 * RPC completion while holding the transport lock to ensure
1228 * the rep, rqst, and rq_task pointers remain stable.
1229 */
1230void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
1231{
1232 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1233 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
1234 struct rpc_rqst *rqst = rep->rr_rqst;
e1352c96
CL
1235 int status;
1236
1237 xprt->reestablish_timeout = 0;
1238
1239 switch (rep->rr_proc) {
1240 case rdma_msg:
1241 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1242 break;
1243 case rdma_nomsg:
1244 status = rpcrdma_decode_nomsg(r_xprt, rep);
1245 break;
1246 case rdma_error:
1247 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1248 break;
1249 default:
1250 status = -EIO;
1251 }
1252 if (status < 0)
1253 goto out_badheader;
1254
1255out:
75c84151 1256 spin_lock(&xprt->queue_lock);
e1352c96
CL
1257 xprt_complete_rqst(rqst->rq_task, status);
1258 xprt_unpin_rqst(rqst);
75c84151 1259 spin_unlock(&xprt->queue_lock);
e1352c96
CL
1260 return;
1261
1262/* If the incoming reply terminated a pending RPC, the next
1263 * RPC call will post a replacement receive buffer as it is
1264 * being marshaled.
1265 */
1266out_badheader:
b4a7f91c 1267 trace_xprtrdma_reply_hdr(rep);
e1352c96 1268 r_xprt->rx_stats.bad_reply_count++;
e1352c96
CL
1269 goto out;
1270}
1271
0ba6f370
CL
1272void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1273{
1274 /* Invalidate and unmap the data payloads before waking
1275 * the waiting application. This guarantees the memory
1276 * regions are properly fenced from the server before the
1277 * application accesses the data. It also ensures proper
1278 * send flow control: waking the next RPC waits until this
1279 * RPC has relinquished all its Send Queue entries.
1280 */
1281 if (!list_empty(&req->rl_registered))
5f62412b 1282 frwr_unmap_sync(r_xprt, &req->rl_registered);
01bb35c8
CL
1283
1284 /* Ensure that any DMA mapped pages associated with
1285 * the Send of the RPC Call have been unmapped before
1286 * allowing the RPC to complete. This protects argument
1287 * memory not controlled by the RPC client from being
1288 * re-used before we're done with it.
1289 */
1290 if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1291 r_xprt->rx_stats.reply_waits_for_send++;
1292 out_of_line_wait_on_bit(&req->rl_flags,
1293 RPCRDMA_REQ_F_TX_RESOURCES,
1294 bit_wait,
1295 TASK_UNINTERRUPTIBLE);
1296 }
0ba6f370
CL
1297}
1298
d8f532d2
CL
1299/* Reply handling runs in the poll worker thread. Anything that
1300 * might wait is deferred to a separate workqueue.
1301 */
1302void rpcrdma_deferred_completion(struct work_struct *work)
1303{
1304 struct rpcrdma_rep *rep =
1305 container_of(work, struct rpcrdma_rep, rr_work);
1306 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
c3441618 1307 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
d8f532d2 1308
b4a7f91c 1309 trace_xprtrdma_defer_cmp(rep);
c3441618 1310 if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
5f62412b 1311 frwr_reminv(rep, &req->rl_registered);
c3441618 1312 rpcrdma_release_rqst(r_xprt, req);
d8f532d2
CL
1313 rpcrdma_complete_rqst(rep);
1314}
1315
fe97b47c
CL
1316/* Process received RPC/RDMA messages.
1317 *
e9601828
TT
1318 * Errors must result in the RPC task either being awakened, or
1319 * allowed to timeout, to discover the errors at that time.
1320 */
d8f532d2 1321void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
e9601828 1322{
431af645 1323 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
431af645 1324 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
be798f90 1325 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
e9601828
TT
1326 struct rpcrdma_req *req;
1327 struct rpc_rqst *rqst;
be798f90 1328 u32 credits;
5381e0ec 1329 __be32 *p;
e9601828 1330
7c8d9e7c 1331 /* Fixed transport header fields */
5381e0ec 1332 xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
0ccc61b1 1333 rep->rr_hdrbuf.head[0].iov_base, NULL);
5381e0ec 1334 p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
96f8778f 1335 if (unlikely(!p))
b0e178a2 1336 goto out_shortreply;
5381e0ec
CL
1337 rep->rr_xid = *p++;
1338 rep->rr_vers = *p++;
be798f90 1339 credits = be32_to_cpu(*p++);
5381e0ec 1340 rep->rr_proc = *p++;
b0e178a2 1341
5381e0ec 1342 if (rep->rr_vers != rpcrdma_version)
61433af5
CL
1343 goto out_badversion;
1344
5381e0ec 1345 if (rpcrdma_is_bcall(r_xprt, rep))
41c8f70f 1346 return;
e9601828 1347
fe97b47c
CL
1348 /* Match incoming rpcrdma_rep to an rpcrdma_req to
1349 * get context for handling any incoming chunks.
1350 */
75c84151 1351 spin_lock(&xprt->queue_lock);
5381e0ec 1352 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
9590d083
CL
1353 if (!rqst)
1354 goto out_norqst;
1355 xprt_pin_rqst(rqst);
93bdcf9f 1356 spin_unlock(&xprt->queue_lock);
be798f90
CL
1357
1358 if (credits == 0)
1359 credits = 1; /* don't deadlock */
1360 else if (credits > buf->rb_max_requests)
1361 credits = buf->rb_max_requests;
91ca1866
CL
1362 if (buf->rb_credits != credits) {
1363 spin_lock_bh(&xprt->transport_lock);
1364 buf->rb_credits = credits;
1365 xprt->cwnd = credits << RPC_CWNDSHIFT;
1366 spin_unlock_bh(&xprt->transport_lock);
1367 }
be798f90 1368
9590d083 1369 req = rpcr_to_rdmar(rqst);
07e10308
CL
1370 if (req->rl_reply) {
1371 trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
1372 rpcrdma_recv_buffer_put(req->rl_reply);
1373 }
4b196dc6 1374 req->rl_reply = rep;
e1352c96 1375 rep->rr_rqst = rqst;
0ba6f370 1376 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
431af645 1377
b4a7f91c 1378 trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
6d2d0ee2 1379 queue_work(buf->rb_completion_wq, &rep->rr_work);
b0e178a2
CL
1380 return;
1381
b0e178a2 1382out_badversion:
b4a7f91c 1383 trace_xprtrdma_reply_vers(rep);
6ceea368 1384 goto out;
59aa1f9a 1385
431af645 1386out_norqst:
75c84151 1387 spin_unlock(&xprt->queue_lock);
b4a7f91c 1388 trace_xprtrdma_reply_rqst(rep);
6ceea368 1389 goto out;
b0e178a2 1390
9590d083 1391out_shortreply:
b4a7f91c 1392 trace_xprtrdma_reply_short(rep);
b0e178a2 1393
6ceea368 1394out:
7c8d9e7c 1395 rpcrdma_recv_buffer_put(rep);
e9601828 1396}