Commit | Line | Data |
---|---|---|
fb7a0b9a RL |
1 | /****************************************************************************** |
2 | ||
3 | (c) 2007 Network Appliance, Inc. All Rights Reserved. | |
4 | (c) 2009 NetApp. All Rights Reserved. | |
5 | ||
6 | NetApp provides this source code under the GPL v2 License. | |
7 | The GPL v2 license is available at | |
8 | http://opensource.org/licenses/gpl-license.php. | |
9 | ||
10 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
11 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
12 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
13 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR | |
14 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
15 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
16 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
17 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
18 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
19 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
20 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
21 | ||
22 | ******************************************************************************/ | |
23 | ||
24 | #include <linux/tcp.h> | |
5a0e3ad6 | 25 | #include <linux/slab.h> |
fb7a0b9a | 26 | #include <linux/sunrpc/xprt.h> |
bc3b2d7f | 27 | #include <linux/export.h> |
09acfea5 | 28 | #include <linux/sunrpc/bc_xprt.h> |
fb7a0b9a | 29 | |
f895b252 | 30 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
fb7a0b9a RL |
31 | #define RPCDBG_FACILITY RPCDBG_TRANS |
32 | #endif | |
33 | ||
fb7a0b9a RL |
34 | /* |
35 | * Helper routines that track the number of preallocation elements | |
36 | * on the transport. | |
37 | */ | |
38 | static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) | |
39 | { | |
40 | return xprt->bc_alloc_count > 0; | |
41 | } | |
42 | ||
43 | static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) | |
44 | { | |
45 | xprt->bc_alloc_count += n; | |
46 | } | |
47 | ||
48 | static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) | |
49 | { | |
50 | return xprt->bc_alloc_count -= n; | |
51 | } | |
52 | ||
53 | /* | |
54 | * Free the preallocated rpc_rqst structure and the memory | |
55 | * buffers hanging off of it. | |
56 | */ | |
57 | static void xprt_free_allocation(struct rpc_rqst *req) | |
58 | { | |
59 | struct xdr_buf *xbufp; | |
60 | ||
61 | dprintk("RPC: free allocations for req= %p\n", req); | |
f30dfbba | 62 | WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); |
88de6af2 | 63 | xbufp = &req->rq_rcv_buf; |
fb7a0b9a RL |
64 | free_page((unsigned long)xbufp->head[0].iov_base); |
65 | xbufp = &req->rq_snd_buf; | |
66 | free_page((unsigned long)xbufp->head[0].iov_base); | |
fb7a0b9a RL |
67 | kfree(req); |
68 | } | |
69 | ||
1dddda86 TM |
70 | static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) |
71 | { | |
72 | struct page *page; | |
73 | /* Preallocate one XDR receive buffer */ | |
74 | page = alloc_page(gfp_flags); | |
75 | if (page == NULL) | |
76 | return -ENOMEM; | |
77 | buf->head[0].iov_base = page_address(page); | |
78 | buf->head[0].iov_len = PAGE_SIZE; | |
79 | buf->tail[0].iov_base = NULL; | |
80 | buf->tail[0].iov_len = 0; | |
81 | buf->page_len = 0; | |
82 | buf->len = 0; | |
83 | buf->buflen = PAGE_SIZE; | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static | |
88 | struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags) | |
89 | { | |
90 | struct rpc_rqst *req; | |
91 | ||
92 | /* Pre-allocate one backchannel rpc_rqst */ | |
93 | req = kzalloc(sizeof(*req), gfp_flags); | |
94 | if (req == NULL) | |
95 | return NULL; | |
96 | ||
97 | req->rq_xprt = xprt; | |
98 | INIT_LIST_HEAD(&req->rq_list); | |
99 | INIT_LIST_HEAD(&req->rq_bc_list); | |
100 | ||
101 | /* Preallocate one XDR receive buffer */ | |
102 | if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) { | |
103 | printk(KERN_ERR "Failed to create bc receive xbuf\n"); | |
104 | goto out_free; | |
105 | } | |
106 | req->rq_rcv_buf.len = PAGE_SIZE; | |
107 | ||
108 | /* Preallocate one XDR send buffer */ | |
109 | if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) { | |
110 | printk(KERN_ERR "Failed to create bc snd xbuf\n"); | |
111 | goto out_free; | |
112 | } | |
113 | return req; | |
114 | out_free: | |
115 | xprt_free_allocation(req); | |
116 | return NULL; | |
117 | } | |
118 | ||
fb7a0b9a RL |
119 | /* |
120 | * Preallocate up to min_reqs structures and related buffers for use | |
121 | * by the backchannel. This function can be called multiple times | |
122 | * when creating new sessions that use the same rpc_xprt. The | |
123 | * preallocated buffers are added to the pool of resources used by | |
124 | * the rpc_xprt. Anyone of these resources may be used used by an | |
125 | * incoming callback request. It's up to the higher levels in the | |
126 | * stack to enforce that the maximum number of session slots is not | |
127 | * being exceeded. | |
128 | * | |
129 | * Some callback arguments can be large. For example, a pNFS server | |
130 | * using multiple deviceids. The list can be unbound, but the client | |
131 | * has the ability to tell the server the maximum size of the callback | |
132 | * requests. Each deviceID is 16 bytes, so allocate one page | |
133 | * for the arguments to have enough room to receive a number of these | |
134 | * deviceIDs. The NFS client indicates to the pNFS server that its | |
135 | * callback requests can be up to 4096 bytes in size. | |
136 | */ | |
137 | int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) | |
138 | { | |
1dddda86 | 139 | struct rpc_rqst *req; |
fb7a0b9a RL |
140 | struct list_head tmp_list; |
141 | int i; | |
142 | ||
143 | dprintk("RPC: setup backchannel transport\n"); | |
144 | ||
145 | /* | |
146 | * We use a temporary list to keep track of the preallocated | |
147 | * buffers. Once we're done building the list we splice it | |
148 | * into the backchannel preallocation list off of the rpc_xprt | |
149 | * struct. This helps minimize the amount of time the list | |
150 | * lock is held on the rpc_xprt struct. It also makes cleanup | |
151 | * easier in case of memory allocation errors. | |
152 | */ | |
153 | INIT_LIST_HEAD(&tmp_list); | |
154 | for (i = 0; i < min_reqs; i++) { | |
155 | /* Pre-allocate one backchannel rpc_rqst */ | |
1dddda86 | 156 | req = xprt_alloc_bc_req(xprt, GFP_KERNEL); |
fb7a0b9a RL |
157 | if (req == NULL) { |
158 | printk(KERN_ERR "Failed to create bc rpc_rqst\n"); | |
159 | goto out_free; | |
160 | } | |
161 | ||
162 | /* Add the allocated buffer to the tmp list */ | |
163 | dprintk("RPC: adding req= %p\n", req); | |
164 | list_add(&req->rq_bc_pa_list, &tmp_list); | |
fb7a0b9a RL |
165 | } |
166 | ||
167 | /* | |
168 | * Add the temporary list to the backchannel preallocation list | |
169 | */ | |
170 | spin_lock_bh(&xprt->bc_pa_lock); | |
171 | list_splice(&tmp_list, &xprt->bc_pa_list); | |
172 | xprt_inc_alloc_count(xprt, min_reqs); | |
173 | spin_unlock_bh(&xprt->bc_pa_lock); | |
174 | ||
175 | dprintk("RPC: setup backchannel transport done\n"); | |
176 | return 0; | |
177 | ||
178 | out_free: | |
179 | /* | |
180 | * Memory allocation failed, free the temporary list | |
181 | */ | |
1dddda86 TM |
182 | while (!list_empty(&tmp_list)) { |
183 | req = list_first_entry(&tmp_list, | |
184 | struct rpc_rqst, | |
185 | rq_bc_pa_list); | |
62835679 | 186 | list_del(&req->rq_bc_pa_list); |
fb7a0b9a | 187 | xprt_free_allocation(req); |
62835679 | 188 | } |
fb7a0b9a RL |
189 | |
190 | dprintk("RPC: setup backchannel transport failed\n"); | |
d24bab93 | 191 | return -ENOMEM; |
fb7a0b9a | 192 | } |
0d961aa9 | 193 | EXPORT_SYMBOL_GPL(xprt_setup_backchannel); |
fb7a0b9a | 194 | |
2c53040f BH |
195 | /** |
196 | * xprt_destroy_backchannel - Destroys the backchannel preallocated structures. | |
197 | * @xprt: the transport holding the preallocated strucures | |
198 | * @max_reqs the maximum number of preallocated structures to destroy | |
199 | * | |
fb7a0b9a RL |
200 | * Since these structures may have been allocated by multiple calls |
201 | * to xprt_setup_backchannel, we only destroy up to the maximum number | |
202 | * of reqs specified by the caller. | |
fb7a0b9a RL |
203 | */ |
204 | void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs) | |
205 | { | |
206 | struct rpc_rqst *req = NULL, *tmp = NULL; | |
207 | ||
208 | dprintk("RPC: destroy backchannel transport\n"); | |
209 | ||
c4ded8d9 WAA |
210 | if (max_reqs == 0) |
211 | goto out; | |
212 | ||
fb7a0b9a RL |
213 | spin_lock_bh(&xprt->bc_pa_lock); |
214 | xprt_dec_alloc_count(xprt, max_reqs); | |
215 | list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { | |
216 | dprintk("RPC: req=%p\n", req); | |
62835679 | 217 | list_del(&req->rq_bc_pa_list); |
fb7a0b9a RL |
218 | xprt_free_allocation(req); |
219 | if (--max_reqs == 0) | |
220 | break; | |
221 | } | |
222 | spin_unlock_bh(&xprt->bc_pa_lock); | |
223 | ||
c4ded8d9 | 224 | out: |
fb7a0b9a RL |
225 | dprintk("RPC: backchannel list empty= %s\n", |
226 | list_empty(&xprt->bc_pa_list) ? "true" : "false"); | |
227 | } | |
0d961aa9 | 228 | EXPORT_SYMBOL_GPL(xprt_destroy_backchannel); |
fb7a0b9a | 229 | |
2ea24497 | 230 | static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) |
fb7a0b9a | 231 | { |
2ea24497 | 232 | struct rpc_rqst *req = NULL; |
fb7a0b9a RL |
233 | |
234 | dprintk("RPC: allocate a backchannel request\n"); | |
2ea24497 TM |
235 | if (list_empty(&xprt->bc_pa_list)) |
236 | goto not_found; | |
fb7a0b9a | 237 | |
2ea24497 TM |
238 | req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, |
239 | rq_bc_pa_list); | |
240 | req->rq_reply_bytes_recvd = 0; | |
241 | req->rq_bytes_sent = 0; | |
242 | memcpy(&req->rq_private_buf, &req->rq_rcv_buf, | |
fb7a0b9a | 243 | sizeof(req->rq_private_buf)); |
2ea24497 TM |
244 | req->rq_xid = xid; |
245 | req->rq_connect_cookie = xprt->connect_cookie; | |
246 | not_found: | |
fb7a0b9a RL |
247 | dprintk("RPC: backchannel req=%p\n", req); |
248 | return req; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Return the preallocated rpc_rqst structure and XDR buffers | |
253 | * associated with this rpc_task. | |
254 | */ | |
255 | void xprt_free_bc_request(struct rpc_rqst *req) | |
256 | { | |
257 | struct rpc_xprt *xprt = req->rq_xprt; | |
258 | ||
259 | dprintk("RPC: free backchannel req=%p\n", req); | |
260 | ||
2ea24497 | 261 | req->rq_connect_cookie = xprt->connect_cookie - 1; |
4e857c58 | 262 | smp_mb__before_atomic(); |
f30dfbba | 263 | WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); |
fb7a0b9a | 264 | clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); |
4e857c58 | 265 | smp_mb__after_atomic(); |
fb7a0b9a RL |
266 | |
267 | if (!xprt_need_to_requeue(xprt)) { | |
268 | /* | |
269 | * The last remaining session was destroyed while this | |
270 | * entry was in use. Free the entry and don't attempt | |
271 | * to add back to the list because there is no need to | |
272 | * have anymore preallocated entries. | |
273 | */ | |
274 | dprintk("RPC: Last session removed req=%p\n", req); | |
275 | xprt_free_allocation(req); | |
276 | return; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Return it to the list of preallocations so that it | |
281 | * may be reused by a new callback request. | |
282 | */ | |
283 | spin_lock_bh(&xprt->bc_pa_lock); | |
2ea24497 | 284 | list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); |
fb7a0b9a RL |
285 | spin_unlock_bh(&xprt->bc_pa_lock); |
286 | } | |
287 | ||
2ea24497 TM |
288 | /* |
289 | * One or more rpc_rqst structure have been preallocated during the | |
290 | * backchannel setup. Buffer space for the send and private XDR buffers | |
291 | * has been preallocated as well. Use xprt_alloc_bc_request to allocate | |
292 | * to this request. Use xprt_free_bc_request to return it. | |
293 | * | |
294 | * We know that we're called in soft interrupt context, grab the spin_lock | |
295 | * since there is no need to grab the bottom half spin_lock. | |
296 | * | |
297 | * Return an available rpc_rqst, otherwise NULL if non are available. | |
298 | */ | |
299 | struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid) | |
300 | { | |
301 | struct rpc_rqst *req; | |
302 | ||
303 | spin_lock(&xprt->bc_pa_lock); | |
304 | list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { | |
305 | if (req->rq_connect_cookie != xprt->connect_cookie) | |
306 | continue; | |
307 | if (req->rq_xid == xid) | |
308 | goto found; | |
309 | } | |
310 | req = xprt_alloc_bc_request(xprt, xid); | |
311 | found: | |
312 | spin_unlock(&xprt->bc_pa_lock); | |
313 | return req; | |
314 | } | |
315 | ||
316 | /* | |
317 | * Add callback request to callback list. The callback | |
318 | * service sleeps on the sv_cb_waitq waiting for new | |
319 | * requests. Wake it up after adding enqueing the | |
320 | * request. | |
321 | */ | |
322 | void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) | |
323 | { | |
324 | struct rpc_xprt *xprt = req->rq_xprt; | |
325 | struct svc_serv *bc_serv = xprt->bc_serv; | |
326 | ||
813b00d6 CL |
327 | spin_lock(&xprt->bc_pa_lock); |
328 | list_del(&req->rq_bc_pa_list); | |
329 | spin_unlock(&xprt->bc_pa_lock); | |
330 | ||
2ea24497 TM |
331 | req->rq_private_buf.len = copied; |
332 | set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); | |
333 | ||
334 | dprintk("RPC: add callback request to list\n"); | |
335 | spin_lock(&bc_serv->sv_cb_lock); | |
2ea24497 TM |
336 | list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); |
337 | wake_up(&bc_serv->sv_cb_waitq); | |
338 | spin_unlock(&bc_serv->sv_cb_lock); | |
339 | } | |
340 |