Commit | Line | Data |
---|---|---|
a0ce85f5 CL |
1 | /* |
2 | * Copyright (c) 2015 Oracle. All rights reserved. | |
3 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | |
4 | */ | |
5 | ||
6 | /* No-op chunk preparation. All client memory is pre-registered. | |
7 | * Sometimes referred to as ALLPHYSICAL mode. | |
8 | * | |
9 | * Physical registration is simple because all client memory is | |
10 | * pre-registered and never deregistered. This mode is good for | |
11 | * adapter bring up, but is considered not safe: the server is | |
12 | * trusted not to abuse its access to client memory not involved | |
13 | * in RDMA I/O. | |
14 | */ | |
15 | ||
16 | #include "xprt_rdma.h" | |
17 | ||
18 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) | |
19 | # define RPCDBG_FACILITY RPCDBG_TRANS | |
20 | #endif | |
21 | ||
3968cb58 CL |
22 | static int |
23 | physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, | |
24 | struct rpcrdma_create_data_internal *cdata) | |
25 | { | |
d1ed857e CL |
26 | struct ib_mr *mr; |
27 | ||
28 | /* Obtain an rkey to use for RPC data payloads. | |
29 | */ | |
30 | mr = ib_get_dma_mr(ia->ri_pd, | |
31 | IB_ACCESS_LOCAL_WRITE | | |
32 | IB_ACCESS_REMOTE_WRITE | | |
33 | IB_ACCESS_REMOTE_READ); | |
34 | if (IS_ERR(mr)) { | |
35 | pr_err("%s: ib_get_dma_mr for failed with %lX\n", | |
36 | __func__, PTR_ERR(mr)); | |
37 | return -ENOMEM; | |
38 | } | |
d1ed857e | 39 | |
bb6c96d7 | 40 | ia->ri_dma_mr = mr; |
3968cb58 CL |
41 | return 0; |
42 | } | |
43 | ||
1c9351ee CL |
44 | /* PHYSICAL memory registration conveys one page per chunk segment. |
45 | */ | |
46 | static size_t | |
47 | physical_op_maxpages(struct rpcrdma_xprt *r_xprt) | |
48 | { | |
49 | return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, | |
50 | rpcrdma_max_segments(r_xprt)); | |
51 | } | |
52 | ||
91e70e70 CL |
53 | static int |
54 | physical_op_init(struct rpcrdma_xprt *r_xprt) | |
55 | { | |
56 | return 0; | |
57 | } | |
58 | ||
9c1b4d77 CL |
59 | /* The client's physical memory is already exposed for |
60 | * remote access via RDMA READ or RDMA WRITE. | |
61 | */ | |
62 | static int | |
63 | physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, | |
64 | int nsegs, bool writing) | |
65 | { | |
66 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
67 | ||
89e0d112 | 68 | rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing)); |
d1ed857e | 69 | seg->mr_rkey = ia->ri_dma_mr->rkey; |
9c1b4d77 CL |
70 | seg->mr_base = seg->mr_dma; |
71 | seg->mr_nsegs = 1; | |
72 | return 1; | |
73 | } | |
74 | ||
6814baea CL |
75 | /* Unmap a memory region, but leave it registered. |
76 | */ | |
77 | static int | |
78 | physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) | |
79 | { | |
d654788e CL |
80 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
81 | ||
89e0d112 | 82 | rpcrdma_unmap_one(ia->ri_device, seg); |
6814baea CL |
83 | return 1; |
84 | } | |
85 | ||
73eee9b2 CL |
86 | /* DMA unmap all memory regions that were mapped for "req". |
87 | */ | |
88 | static void | |
89 | physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) | |
90 | { | |
91 | struct ib_device *device = r_xprt->rx_ia.ri_device; | |
92 | unsigned int i; | |
93 | ||
94 | for (i = 0; req->rl_nchunks; --req->rl_nchunks) | |
95 | rpcrdma_unmap_one(device, &req->rl_segments[i++]); | |
96 | } | |
97 | ||
4561f347 CL |
98 | static void |
99 | physical_op_destroy(struct rpcrdma_buffer *buf) | |
100 | { | |
101 | } | |
102 | ||
a0ce85f5 | 103 | const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { |
9c1b4d77 | 104 | .ro_map = physical_op_map, |
73eee9b2 | 105 | .ro_unmap_sync = physical_op_unmap_sync, |
6814baea | 106 | .ro_unmap = physical_op_unmap, |
3968cb58 | 107 | .ro_open = physical_op_open, |
1c9351ee | 108 | .ro_maxpages = physical_op_maxpages, |
91e70e70 | 109 | .ro_init = physical_op_init, |
4561f347 | 110 | .ro_destroy = physical_op_destroy, |
a0ce85f5 CL |
111 | .ro_displayname = "physical", |
112 | }; |