Commit | Line | Data |
---|---|---|
a2268cfb | 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
f58851e6 | 2 | /* |
62b56a67 | 3 | * Copyright (c) 2014-2017 Oracle. All rights reserved. |
f58851e6 TT |
4 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the BSD-type | |
10 | * license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or without | |
13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | |
15 | * | |
16 | * Redistributions of source code must retain the above copyright | |
17 | * notice, this list of conditions and the following disclaimer. | |
18 | * | |
19 | * Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials provided | |
22 | * with the distribution. | |
23 | * | |
24 | * Neither the name of the Network Appliance, Inc. nor the names of | |
25 | * its contributors may be used to endorse or promote products | |
26 | * derived from this software without specific prior written | |
27 | * permission. | |
28 | * | |
29 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
30 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
31 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
32 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
33 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
34 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
35 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
36 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
37 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
38 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
39 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
40 | */ | |
41 | ||
42 | #ifndef _LINUX_SUNRPC_XPRT_RDMA_H | |
43 | #define _LINUX_SUNRPC_XPRT_RDMA_H | |
44 | ||
45 | #include <linux/wait.h> /* wait_queue_head_t, etc */ | |
46 | #include <linux/spinlock.h> /* spinlock_t, etc */ | |
0ab11523 CL |
47 | #include <linux/atomic.h> /* atomic_t, etc */ |
48 | #include <linux/kref.h> /* struct kref */ | |
254f91e2 | 49 | #include <linux/workqueue.h> /* struct work_struct */ |
f58851e6 TT |
50 | |
51 | #include <rdma/rdma_cm.h> /* RDMA connection api */ | |
52 | #include <rdma/ib_verbs.h> /* RDMA verbs api */ | |
53 | ||
54 | #include <linux/sunrpc/clnt.h> /* rpc_xprt */ | |
55 | #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ | |
56 | #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ | |
57 | ||
5675add3 TT |
58 | #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ |
59 | #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ | |
60 | ||
5d252f90 CL |
61 | #define RPCRDMA_BIND_TO (60U * HZ) |
62 | #define RPCRDMA_INIT_REEST_TO (5U * HZ) | |
63 | #define RPCRDMA_MAX_REEST_TO (30U * HZ) | |
64 | #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ) | |
65 | ||
f58851e6 TT |
66 | /* |
67 | * Interface Adapter -- one per transport instance | |
68 | */ | |
69 | struct rpcrdma_ia { | |
70 | struct rdma_cm_id *ri_id; | |
71 | struct ib_pd *ri_pd; | |
f58851e6 | 72 | int ri_async_rc; |
87cfb9a0 | 73 | unsigned int ri_max_segs; |
ce5b3717 | 74 | unsigned int ri_max_frwr_depth; |
16f906d6 | 75 | unsigned int ri_max_send_sges; |
b5f0afbe | 76 | bool ri_implicit_roundup; |
5e9fc6a0 | 77 | enum ib_mr_type ri_mrtype; |
bebd0318 | 78 | unsigned long ri_flags; |
f19bd0bb CL |
79 | struct completion ri_done; |
80 | struct completion ri_remove_done; | |
f58851e6 TT |
81 | }; |
82 | ||
bebd0318 CL |
83 | enum { |
84 | RPCRDMA_IAF_REMOVING = 0, | |
85 | }; | |
86 | ||
f58851e6 TT |
87 | /* |
88 | * RDMA Endpoint -- one per transport instance | |
89 | */ | |
90 | ||
91 | struct rpcrdma_ep { | |
ae72950a CL |
92 | unsigned int rep_send_count; |
93 | unsigned int rep_send_batch; | |
94087e97 CL |
94 | unsigned int rep_max_inline_send; |
95 | unsigned int rep_max_inline_recv; | |
f58851e6 | 96 | int rep_connected; |
f58851e6 TT |
97 | struct ib_qp_init_attr rep_attr; |
98 | wait_queue_head_t rep_connect_wait; | |
87cfb9a0 | 99 | struct rpcrdma_connect_private rep_cm_private; |
f58851e6 | 100 | struct rdma_conn_param rep_remote_cma; |
86c4ccd9 | 101 | unsigned int rep_max_requests; /* set by /proc */ |
94087e97 CL |
102 | unsigned int rep_inline_send; /* negotiated */ |
103 | unsigned int rep_inline_recv; /* negotiated */ | |
6ceea368 | 104 | int rep_receive_count; |
f58851e6 TT |
105 | }; |
106 | ||
124fa17d CL |
107 | /* Pre-allocate extra Work Requests for handling backward receives |
108 | * and sends. This is a fixed value because the Work Queues are | |
4ba02e8d CL |
109 | * allocated when the forward channel is set up, long before the |
110 | * backchannel is provisioned. This value is two times | |
111 | * NFS4_DEF_CB_SLOT_TABLE_SIZE. | |
124fa17d CL |
112 | */ |
113 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | |
4ba02e8d | 114 | #define RPCRDMA_BACKWARD_WRS (32) |
124fa17d | 115 | #else |
4ba02e8d | 116 | #define RPCRDMA_BACKWARD_WRS (0) |
124fa17d CL |
117 | #endif |
118 | ||
9128c3e7 | 119 | /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV |
9128c3e7 CL |
120 | */ |
121 | ||
122 | struct rpcrdma_regbuf { | |
9128c3e7 | 123 | struct ib_sge rg_iov; |
54cbd6b0 | 124 | struct ib_device *rg_device; |
99ef4db3 | 125 | enum dma_data_direction rg_direction; |
8cec3dba | 126 | void *rg_data; |
9128c3e7 CL |
127 | }; |
128 | ||
8cec3dba | 129 | static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb) |
9128c3e7 CL |
130 | { |
131 | return rb->rg_iov.addr; | |
132 | } | |
133 | ||
8cec3dba | 134 | static inline u32 rdmab_length(struct rpcrdma_regbuf *rb) |
9128c3e7 CL |
135 | { |
136 | return rb->rg_iov.length; | |
137 | } | |
138 | ||
8cec3dba | 139 | static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb) |
9128c3e7 CL |
140 | { |
141 | return rb->rg_iov.lkey; | |
142 | } | |
143 | ||
8cec3dba | 144 | static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb) |
91a10c52 CL |
145 | { |
146 | return rb->rg_device; | |
147 | } | |
148 | ||
8cec3dba CL |
149 | static inline void *rdmab_data(const struct rpcrdma_regbuf *rb) |
150 | { | |
151 | return rb->rg_data; | |
152 | } | |
153 | ||
5d252f90 CL |
154 | #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN) |
155 | ||
94931746 CL |
156 | /* To ensure a transport can always make forward progress, |
157 | * the number of RDMA segments allowed in header chunk lists | |
f3c66a2f CL |
158 | * is capped at 16. This prevents less-capable devices from |
159 | * overrunning the Send buffer while building chunk lists. | |
94931746 CL |
160 | * |
161 | * Elements of the Read list take up more room than the | |
f3c66a2f CL |
162 | * Write list or Reply chunk. 16 read segments means the |
163 | * chunk lists cannot consume more than | |
94931746 | 164 | * |
f3c66a2f | 165 | * ((16 + 2) * read segment size) + 1 XDR words, |
94931746 | 166 | * |
f3c66a2f CL |
167 | * or about 400 bytes. The fixed part of the header is |
168 | * another 24 bytes. Thus when the inline threshold is | |
169 | * 1024 bytes, at least 600 bytes are available for RPC | |
170 | * message bodies. | |
94931746 | 171 | */ |
08cf2efd | 172 | enum { |
f3c66a2f | 173 | RPCRDMA_MAX_HDR_SEGS = 16, |
08cf2efd | 174 | }; |
94931746 | 175 | |
f58851e6 | 176 | /* |
e1352c96 CL |
177 | * struct rpcrdma_rep -- this structure encapsulates state required |
178 | * to receive and complete an RPC Reply, asychronously. It needs | |
179 | * several pieces of state: | |
f58851e6 | 180 | * |
e1352c96 CL |
181 | * o receive buffer and ib_sge (donated to provider) |
182 | * o status of receive (success or not, length, inv rkey) | |
183 | * o bookkeeping state to get run by reply handler (XDR stream) | |
f58851e6 | 184 | * |
e1352c96 CL |
185 | * These structures are allocated during transport initialization. |
186 | * N of these are associated with a transport instance, managed by | |
187 | * struct rpcrdma_buffer. N is the max number of outstanding RPCs. | |
f58851e6 TT |
188 | */ |
189 | ||
f58851e6 | 190 | struct rpcrdma_rep { |
552bf225 | 191 | struct ib_cqe rr_cqe; |
5381e0ec CL |
192 | __be32 rr_xid; |
193 | __be32 rr_vers; | |
194 | __be32 rr_proc; | |
c8b920bb CL |
195 | int rr_wc_flags; |
196 | u32 rr_inv_rkey; | |
7c8d9e7c | 197 | bool rr_temp; |
c1bcb68e | 198 | struct rpcrdma_regbuf *rr_rdmabuf; |
fed171b3 | 199 | struct rpcrdma_xprt *rr_rxprt; |
d8099fed | 200 | struct rpc_rqst *rr_rqst; |
96f8778f CL |
201 | struct xdr_buf rr_hdrbuf; |
202 | struct xdr_stream rr_stream; | |
6b1184cd | 203 | struct list_head rr_list; |
6ea8e711 | 204 | struct ib_recv_wr rr_recv_wr; |
f58851e6 TT |
205 | }; |
206 | ||
e340c2d6 CL |
207 | /* To reduce the rate at which a transport invokes ib_post_recv |
208 | * (and thus the hardware doorbell rate), xprtrdma posts Receive | |
209 | * WRs in batches. | |
210 | * | |
211 | * Setting this to zero disables Receive post batching. | |
212 | */ | |
213 | enum { | |
214 | RPCRDMA_MAX_RECV_BATCH = 7, | |
215 | }; | |
216 | ||
ae72950a CL |
217 | /* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes |
218 | */ | |
01bb35c8 | 219 | struct rpcrdma_req; |
ae72950a CL |
220 | struct rpcrdma_xprt; |
221 | struct rpcrdma_sendctx { | |
222 | struct ib_send_wr sc_wr; | |
223 | struct ib_cqe sc_cqe; | |
dbcc53a5 | 224 | struct ib_device *sc_device; |
ae72950a | 225 | struct rpcrdma_xprt *sc_xprt; |
01bb35c8 | 226 | struct rpcrdma_req *sc_req; |
ae72950a CL |
227 | unsigned int sc_unmap_count; |
228 | struct ib_sge sc_sges[]; | |
229 | }; | |
230 | ||
0dbb4108 | 231 | /* |
96ceddea | 232 | * struct rpcrdma_mr - external memory region metadata |
0dbb4108 CL |
233 | * |
234 | * An external memory region is any buffer or page that is registered | |
235 | * on the fly (ie, not pre-registered). | |
0dbb4108 | 236 | */ |
ce5b3717 | 237 | struct rpcrdma_frwr { |
0dbb4108 | 238 | struct ib_mr *fr_mr; |
2fa8f88d | 239 | struct ib_cqe fr_cqe; |
2fa8f88d | 240 | struct completion fr_linv_done; |
3cf4e169 CL |
241 | union { |
242 | struct ib_reg_wr fr_regwr; | |
243 | struct ib_send_wr fr_invwr; | |
244 | }; | |
0dbb4108 CL |
245 | }; |
246 | ||
6dc6ec9e | 247 | struct rpcrdma_req; |
96ceddea CL |
248 | struct rpcrdma_mr { |
249 | struct list_head mr_list; | |
6dc6ec9e | 250 | struct rpcrdma_req *mr_req; |
96ceddea CL |
251 | struct scatterlist *mr_sg; |
252 | int mr_nents; | |
253 | enum dma_data_direction mr_dir; | |
ba69cd12 | 254 | struct rpcrdma_frwr frwr; |
96ceddea CL |
255 | struct rpcrdma_xprt *mr_xprt; |
256 | u32 mr_handle; | |
257 | u32 mr_length; | |
258 | u64 mr_offset; | |
61da886b | 259 | struct work_struct mr_recycle; |
96ceddea | 260 | struct list_head mr_all; |
0dbb4108 CL |
261 | }; |
262 | ||
f58851e6 TT |
263 | /* |
264 | * struct rpcrdma_req -- structure central to the request/reply sequence. | |
265 | * | |
266 | * N of these are associated with a transport instance, and stored in | |
267 | * struct rpcrdma_buffer. N is the max number of outstanding requests. | |
268 | * | |
269 | * It includes pre-registered buffer memory for send AND recv. | |
270 | * The recv buffer, however, is not owned by this structure, and | |
271 | * is "donated" to the hardware when a recv is posted. When a | |
272 | * reply is handled, the recv buffer used is given back to the | |
273 | * struct rpcrdma_req associated with the request. | |
274 | * | |
275 | * In addition to the basic memory, this structure includes an array | |
276 | * of iovs for send operations. The reason is that the iovs passed to | |
277 | * ib_post_{send,recv} must not be modified until the work request | |
278 | * completes. | |
f58851e6 TT |
279 | */ |
280 | ||
5ab81428 CL |
281 | /* Maximum number of page-sized "segments" per chunk list to be |
282 | * registered or invalidated. Must handle a Reply chunk: | |
283 | */ | |
284 | enum { | |
285 | RPCRDMA_MAX_IOV_SEGS = 3, | |
286 | RPCRDMA_MAX_DATA_SEGS = ((1 * 1024 * 1024) / PAGE_SIZE) + 1, | |
287 | RPCRDMA_MAX_SEGS = RPCRDMA_MAX_DATA_SEGS + | |
288 | RPCRDMA_MAX_IOV_SEGS, | |
289 | }; | |
290 | ||
f58851e6 | 291 | struct rpcrdma_mr_seg { /* chunk descriptors */ |
f58851e6 | 292 | u32 mr_len; /* length of chunk or segment */ |
f58851e6 TT |
293 | struct page *mr_page; /* owning page, if any */ |
294 | char *mr_offset; /* kva if no page, else offset */ | |
295 | }; | |
296 | ||
c6f5b47f CL |
297 | /* The Send SGE array is provisioned to send a maximum size |
298 | * inline request: | |
655fec69 CL |
299 | * - RPC-over-RDMA header |
300 | * - xdr_buf head iovec | |
c6f5b47f | 301 | * - RPCRDMA_MAX_INLINE bytes, in pages |
655fec69 | 302 | * - xdr_buf tail iovec |
c6f5b47f CL |
303 | * |
304 | * The actual number of array elements consumed by each RPC | |
305 | * depends on the device's max_sge limit. | |
655fec69 CL |
306 | */ |
307 | enum { | |
16f906d6 | 308 | RPCRDMA_MIN_SEND_SGES = 3, |
c6f5b47f | 309 | RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT, |
655fec69 CL |
310 | RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1, |
311 | }; | |
b3221d6a | 312 | |
5ab81428 | 313 | struct rpcrdma_buffer; |
f58851e6 | 314 | struct rpcrdma_req { |
a80d66c9 | 315 | struct list_head rl_list; |
edb41e61 | 316 | struct rpc_rqst rl_slot; |
90aab602 | 317 | struct rpcrdma_rep *rl_reply; |
7a80f3f0 CL |
318 | struct xdr_stream rl_stream; |
319 | struct xdr_buf rl_hdrbuf; | |
ae72950a | 320 | struct rpcrdma_sendctx *rl_sendctx; |
9c40c49f CL |
321 | struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */ |
322 | struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */ | |
323 | struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */ | |
f531a5db CL |
324 | |
325 | struct list_head rl_all; | |
0ab11523 | 326 | struct kref rl_kref; |
5ab81428 | 327 | |
6dc6ec9e CL |
328 | struct list_head rl_free_mrs; |
329 | struct list_head rl_registered; | |
5ab81428 | 330 | struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; |
f58851e6 | 331 | }; |
0ca77dc3 CL |
332 | |
333 | static inline struct rpcrdma_req * | |
fc1eb807 | 334 | rpcr_to_rdmar(const struct rpc_rqst *rqst) |
0ca77dc3 | 335 | { |
edb41e61 | 336 | return container_of(rqst, struct rpcrdma_req, rl_slot); |
0ca77dc3 | 337 | } |
f58851e6 | 338 | |
9a5c63e9 | 339 | static inline void |
96ceddea | 340 | rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list) |
9a5c63e9 | 341 | { |
265a38d4 | 342 | list_add(&mr->mr_list, list); |
9a5c63e9 CL |
343 | } |
344 | ||
96ceddea CL |
345 | static inline struct rpcrdma_mr * |
346 | rpcrdma_mr_pop(struct list_head *list) | |
9a5c63e9 | 347 | { |
96ceddea | 348 | struct rpcrdma_mr *mr; |
9a5c63e9 | 349 | |
265a38d4 CL |
350 | mr = list_first_entry_or_null(list, struct rpcrdma_mr, mr_list); |
351 | if (mr) | |
352 | list_del_init(&mr->mr_list); | |
96ceddea | 353 | return mr; |
9a5c63e9 CL |
354 | } |
355 | ||
f58851e6 TT |
356 | /* |
357 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for | |
358 | * inline requests/replies, and client/server credits. | |
359 | * | |
360 | * One of these is associated with a transport instance | |
361 | */ | |
362 | struct rpcrdma_buffer { | |
4d6b8890 CL |
363 | spinlock_t rb_lock; |
364 | struct list_head rb_send_bufs; | |
365 | struct list_head rb_recv_bufs; | |
96ceddea | 366 | struct list_head rb_mrs; |
58d1dcf5 | 367 | |
ae72950a CL |
368 | unsigned long rb_sc_head; |
369 | unsigned long rb_sc_tail; | |
370 | unsigned long rb_sc_last; | |
371 | struct rpcrdma_sendctx **rb_sc_ctxs; | |
372 | ||
92f4433e | 373 | struct list_head rb_allreqs; |
4d6b8890 | 374 | struct list_head rb_all_mrs; |
92f4433e | 375 | |
58d1dcf5 | 376 | u32 rb_max_requests; |
be798f90 | 377 | u32 rb_credits; /* most recent credit grant */ |
f531a5db CL |
378 | |
379 | u32 rb_bc_srv_max_requests; | |
5d252f90 | 380 | u32 rb_bc_max_requests; |
505bbe64 | 381 | |
3b39f52a | 382 | struct work_struct rb_refresh_worker; |
f58851e6 | 383 | }; |
f58851e6 | 384 | |
f58851e6 TT |
385 | /* |
386 | * Statistics for RPCRDMA | |
387 | */ | |
388 | struct rpcrdma_stats { | |
67af6f65 | 389 | /* accessed when sending a call */ |
f58851e6 TT |
390 | unsigned long read_chunk_count; |
391 | unsigned long write_chunk_count; | |
392 | unsigned long reply_chunk_count; | |
f58851e6 | 393 | unsigned long long total_rdma_request; |
f58851e6 | 394 | |
67af6f65 | 395 | /* rarely accessed error counters */ |
f58851e6 | 396 | unsigned long long pullup_copy_count; |
f58851e6 TT |
397 | unsigned long hardway_register_count; |
398 | unsigned long failed_marshal_count; | |
399 | unsigned long bad_reply_count; | |
61da886b | 400 | unsigned long mrs_recycled; |
505bbe64 | 401 | unsigned long mrs_orphaned; |
e2ac236c | 402 | unsigned long mrs_allocated; |
ae72950a | 403 | unsigned long empty_sendctx_q; |
67af6f65 CL |
404 | |
405 | /* accessed when receiving a reply */ | |
406 | unsigned long long total_rdma_reply; | |
407 | unsigned long long fixup_copy_count; | |
01bb35c8 | 408 | unsigned long reply_waits_for_send; |
c8b920bb | 409 | unsigned long local_inv_needed; |
67af6f65 CL |
410 | unsigned long nomsg_call_count; |
411 | unsigned long bcall_count; | |
f58851e6 TT |
412 | }; |
413 | ||
414 | /* | |
415 | * RPCRDMA transport -- encapsulates the structures above for | |
416 | * integration with RPC. | |
417 | * | |
418 | * The contained structures are embedded, not pointers, | |
419 | * for convenience. This structure need not be visible externally. | |
420 | * | |
421 | * It is allocated and initialized during mount, and released | |
422 | * during unmount. | |
423 | */ | |
424 | struct rpcrdma_xprt { | |
5abefb86 | 425 | struct rpc_xprt rx_xprt; |
f58851e6 TT |
426 | struct rpcrdma_ia rx_ia; |
427 | struct rpcrdma_ep rx_ep; | |
428 | struct rpcrdma_buffer rx_buf; | |
5abefb86 | 429 | struct delayed_work rx_connect_worker; |
675dd90a | 430 | struct rpc_timeout rx_timeout; |
f58851e6 TT |
431 | struct rpcrdma_stats rx_stats; |
432 | }; | |
433 | ||
5abefb86 | 434 | #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt) |
f58851e6 | 435 | |
d461f1f2 CL |
436 | static inline const char * |
437 | rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt) | |
438 | { | |
439 | return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]; | |
440 | } | |
441 | ||
442 | static inline const char * | |
443 | rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt) | |
444 | { | |
445 | return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT]; | |
446 | } | |
447 | ||
9191ca3b TT |
448 | /* Setting this to 0 ensures interoperability with early servers. |
449 | * Setting this to 1 enhances certain unaligned read/write performance. | |
450 | * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */ | |
451 | extern int xprt_rdma_pad_optimize; | |
452 | ||
fff09594 CL |
453 | /* This setting controls the hunt for a supported memory |
454 | * registration strategy. | |
455 | */ | |
456 | extern unsigned int xprt_rdma_memreg_strategy; | |
457 | ||
f58851e6 TT |
458 | /* |
459 | * Interface Adapter calls - xprtrdma/verbs.c | |
460 | */ | |
dd229cee | 461 | int rpcrdma_ia_open(struct rpcrdma_xprt *xprt); |
bebd0318 | 462 | void rpcrdma_ia_remove(struct rpcrdma_ia *ia); |
f58851e6 | 463 | void rpcrdma_ia_close(struct rpcrdma_ia *); |
d8f532d2 | 464 | |
f58851e6 TT |
465 | /* |
466 | * Endpoint calls - xprtrdma/verbs.c | |
467 | */ | |
86c4ccd9 CL |
468 | int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt); |
469 | void rpcrdma_ep_destroy(struct rpcrdma_xprt *r_xprt); | |
f58851e6 | 470 | int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); |
282191cb | 471 | void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); |
f58851e6 TT |
472 | |
473 | int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, | |
474 | struct rpcrdma_req *); | |
f58851e6 TT |
475 | |
476 | /* | |
477 | * Buffer calls - xprtrdma/verbs.c | |
478 | */ | |
bb93a1ae | 479 | struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size, |
1769e6a8 | 480 | gfp_t flags); |
92f4433e | 481 | void rpcrdma_req_destroy(struct rpcrdma_req *req); |
ac920d04 | 482 | int rpcrdma_buffer_create(struct rpcrdma_xprt *); |
f58851e6 | 483 | void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); |
dbcc53a5 | 484 | struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt); |
f58851e6 | 485 | |
96ceddea CL |
486 | struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt); |
487 | void rpcrdma_mr_put(struct rpcrdma_mr *mr); | |
61da886b CL |
488 | |
489 | static inline void | |
490 | rpcrdma_mr_recycle(struct rpcrdma_mr *mr) | |
491 | { | |
492 | schedule_work(&mr->mr_recycle); | |
493 | } | |
96ceddea | 494 | |
f58851e6 | 495 | struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); |
5828ceba CL |
496 | void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, |
497 | struct rpcrdma_req *req); | |
f58851e6 TT |
498 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); |
499 | ||
0f665ceb CL |
500 | bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, |
501 | gfp_t flags); | |
d2832af3 CL |
502 | bool __rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, |
503 | struct rpcrdma_regbuf *rb); | |
9128c3e7 | 504 | |
d2832af3 CL |
505 | /** |
506 | * rpcrdma_regbuf_is_mapped - check if buffer is DMA mapped | |
507 | * | |
508 | * Returns true if the buffer is now mapped to rb->rg_device. | |
509 | */ | |
510 | static inline bool rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb) | |
54cbd6b0 CL |
511 | { |
512 | return rb->rg_device != NULL; | |
513 | } | |
514 | ||
d2832af3 CL |
515 | /** |
516 | * rpcrdma_regbuf_dma_map - DMA-map a regbuf | |
517 | * @r_xprt: controlling transport instance | |
518 | * @rb: regbuf to be mapped | |
519 | * | |
520 | * Returns true if the buffer is currently DMA mapped. | |
521 | */ | |
522 | static inline bool rpcrdma_regbuf_dma_map(struct rpcrdma_xprt *r_xprt, | |
523 | struct rpcrdma_regbuf *rb) | |
54cbd6b0 CL |
524 | { |
525 | if (likely(rpcrdma_regbuf_is_mapped(rb))) | |
526 | return true; | |
d2832af3 | 527 | return __rpcrdma_regbuf_dma_map(r_xprt, rb); |
54cbd6b0 CL |
528 | } |
529 | ||
d654788e CL |
530 | /* |
531 | * Wrappers for chunk registration, shared by read/write chunk code. | |
532 | */ | |
533 | ||
d654788e CL |
534 | static inline enum dma_data_direction |
535 | rpcrdma_data_dir(bool writing) | |
536 | { | |
537 | return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
538 | } | |
539 | ||
5f62412b CL |
540 | /* Memory registration calls xprtrdma/frwr_ops.c |
541 | */ | |
f19bd0bb | 542 | bool frwr_is_supported(struct ib_device *device); |
40088f0e | 543 | void frwr_reset(struct rpcrdma_req *req); |
86c4ccd9 | 544 | int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep); |
5f62412b CL |
545 | int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr); |
546 | void frwr_release_mr(struct rpcrdma_mr *mr); | |
547 | size_t frwr_maxpages(struct rpcrdma_xprt *r_xprt); | |
548 | struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, | |
549 | struct rpcrdma_mr_seg *seg, | |
ec482cc1 | 550 | int nsegs, bool writing, __be32 xid, |
3b39f52a | 551 | struct rpcrdma_mr *mr); |
5f62412b CL |
552 | int frwr_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req); |
553 | void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs); | |
84756894 | 554 | void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); |
d8099fed | 555 | void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req); |
5f62412b | 556 | |
f58851e6 TT |
557 | /* |
558 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c | |
559 | */ | |
655fec69 CL |
560 | |
561 | enum rpcrdma_chunktype { | |
562 | rpcrdma_noch = 0, | |
563 | rpcrdma_readch, | |
564 | rpcrdma_areadch, | |
565 | rpcrdma_writech, | |
566 | rpcrdma_replych | |
567 | }; | |
568 | ||
857f9aca CL |
569 | int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, |
570 | struct rpcrdma_req *req, u32 hdrlen, | |
571 | struct xdr_buf *xdr, | |
572 | enum rpcrdma_chunktype rtype); | |
dbcc53a5 | 573 | void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc); |
09e60641 | 574 | int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); |
87cfb9a0 | 575 | void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); |
e1352c96 | 576 | void rpcrdma_complete_rqst(struct rpcrdma_rep *rep); |
d8f532d2 | 577 | void rpcrdma_reply_handler(struct rpcrdma_rep *rep); |
f58851e6 | 578 | |
96f8778f CL |
579 | static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len) |
580 | { | |
581 | xdr->head[0].iov_len = len; | |
582 | xdr->len = len; | |
583 | } | |
584 | ||
ffe1f0df CL |
585 | /* RPC/RDMA module init - xprtrdma/transport.c |
586 | */ | |
86c4ccd9 | 587 | extern unsigned int xprt_rdma_slot_table_entries; |
5d252f90 | 588 | extern unsigned int xprt_rdma_max_inline_read; |
94087e97 | 589 | extern unsigned int xprt_rdma_max_inline_write; |
5d252f90 CL |
590 | void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap); |
591 | void xprt_rdma_free_addresses(struct rpc_xprt *xprt); | |
0c0829bc | 592 | void xprt_rdma_close(struct rpc_xprt *xprt); |
5d252f90 | 593 | void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq); |
ffe1f0df CL |
594 | int xprt_rdma_init(void); |
595 | void xprt_rdma_cleanup(void); | |
596 | ||
f531a5db CL |
597 | /* Backchannel calls - xprtrdma/backchannel.c |
598 | */ | |
599 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | |
600 | int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int); | |
6b26cc8c | 601 | size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *); |
7402a4fe | 602 | unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *); |
f531a5db | 603 | int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int); |
63cae470 | 604 | void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *); |
cf73daf5 | 605 | int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst); |
f531a5db CL |
606 | void xprt_rdma_bc_free_rqst(struct rpc_rqst *); |
607 | void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int); | |
608 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | |
609 | ||
5d252f90 | 610 | extern struct xprt_class xprt_rdma_bc; |
cec56c8f | 611 | |
f58851e6 | 612 | #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ |