SUNRPC: Fix leak of krb5p encode pages
[linux-block.git] / net / sunrpc / xprt.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
55aa4f58
CL
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
55ae1aab
RL
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
17 * expired.
1da177e4 18 * - When a packet arrives, the data_ready handler walks the list of
55aa4f58 19 * pending requests for that transport. If a matching XID is found, the
1da177e4
LT
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
24 * of -ETIMEDOUT.
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
29 * again.
30 *
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
34 *
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
55aa4f58
CL
36 *
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
1da177e4
LT
38 */
39
a246b010
CL
40#include <linux/module.h>
41
1da177e4 42#include <linux/types.h>
a246b010 43#include <linux/interrupt.h>
1da177e4 44#include <linux/workqueue.h>
bf3fcf89 45#include <linux/net.h>
ff839970 46#include <linux/ktime.h>
1da177e4 47
a246b010 48#include <linux/sunrpc/clnt.h>
11c556b3 49#include <linux/sunrpc/metrics.h>
c9acb42e 50#include <linux/sunrpc/bc_xprt.h>
fda1bfef 51#include <linux/rcupdate.h>
1da177e4 52
3705ad64
JL
53#include <trace/events/sunrpc.h>
54
55ae1aab
RL
55#include "sunrpc.h"
56
1da177e4
LT
57/*
58 * Local variables
59 */
60
f895b252 61#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1da177e4
LT
62# define RPCDBG_FACILITY RPCDBG_XPRT
63#endif
64
1da177e4
LT
65/*
66 * Local functions
67 */
21de0a95 68static void xprt_init(struct rpc_xprt *xprt, struct net *net);
37ac86c3 69static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
1da177e4 70static void xprt_connect_status(struct rpc_task *task);
4e0038b6 71static void xprt_destroy(struct rpc_xprt *xprt);
1da177e4 72
5ba03e82 73static DEFINE_SPINLOCK(xprt_list_lock);
81c098af
TT
74static LIST_HEAD(xprt_list);
75
81c098af
TT
76/**
77 * xprt_register_transport - register a transport implementation
78 * @transport: transport to register
79 *
80 * If a transport implementation is loaded as a kernel module, it can
81 * call this interface to make itself known to the RPC client.
82 *
83 * Returns:
84 * 0: transport successfully registered
85 * -EEXIST: transport already registered
86 * -EINVAL: transport module being unloaded
87 */
88int xprt_register_transport(struct xprt_class *transport)
89{
90 struct xprt_class *t;
91 int result;
92
93 result = -EEXIST;
94 spin_lock(&xprt_list_lock);
95 list_for_each_entry(t, &xprt_list, list) {
96 /* don't register the same transport class twice */
4fa016eb 97 if (t->ident == transport->ident)
81c098af
TT
98 goto out;
99 }
100
c9f6cde6
DL
101 list_add_tail(&transport->list, &xprt_list);
102 printk(KERN_INFO "RPC: Registered %s transport module.\n",
103 transport->name);
104 result = 0;
81c098af
TT
105
106out:
107 spin_unlock(&xprt_list_lock);
108 return result;
109}
110EXPORT_SYMBOL_GPL(xprt_register_transport);
111
112/**
113 * xprt_unregister_transport - unregister a transport implementation
65b6e42c 114 * @transport: transport to unregister
81c098af
TT
115 *
116 * Returns:
117 * 0: transport successfully unregistered
118 * -ENOENT: transport never registered
119 */
120int xprt_unregister_transport(struct xprt_class *transport)
121{
122 struct xprt_class *t;
123 int result;
124
125 result = 0;
126 spin_lock(&xprt_list_lock);
127 list_for_each_entry(t, &xprt_list, list) {
128 if (t == transport) {
129 printk(KERN_INFO
130 "RPC: Unregistered %s transport module.\n",
131 transport->name);
132 list_del_init(&transport->list);
81c098af
TT
133 goto out;
134 }
135 }
136 result = -ENOENT;
137
138out:
139 spin_unlock(&xprt_list_lock);
140 return result;
141}
142EXPORT_SYMBOL_GPL(xprt_unregister_transport);
143
441e3e24
TT
144/**
145 * xprt_load_transport - load a transport implementation
146 * @transport_name: transport to load
147 *
148 * Returns:
149 * 0: transport successfully loaded
150 * -ENOENT: transport module not available
151 */
152int xprt_load_transport(const char *transport_name)
153{
154 struct xprt_class *t;
441e3e24
TT
155 int result;
156
157 result = 0;
158 spin_lock(&xprt_list_lock);
159 list_for_each_entry(t, &xprt_list, list) {
160 if (strcmp(t->name, transport_name) == 0) {
161 spin_unlock(&xprt_list_lock);
162 goto out;
163 }
164 }
165 spin_unlock(&xprt_list_lock);
ef7ffe8f 166 result = request_module("xprt%s", transport_name);
441e3e24
TT
167out:
168 return result;
169}
170EXPORT_SYMBOL_GPL(xprt_load_transport);
171
c544577d
TM
172static void xprt_clear_locked(struct rpc_xprt *xprt)
173{
174 xprt->snd_task = NULL;
175 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
176 smp_mb__before_atomic();
177 clear_bit(XPRT_LOCKED, &xprt->state);
178 smp_mb__after_atomic();
179 } else
180 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
181}
182
12a80469
CL
183/**
184 * xprt_reserve_xprt - serialize write access to transports
185 * @task: task that is requesting access to the transport
177c27bf 186 * @xprt: pointer to the target transport
12a80469
CL
187 *
188 * This prevents mixing the payload of separate requests, and prevents
189 * transport connects from colliding with writes. No congestion control
190 * is provided.
191 */
43cedbf0 192int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
12a80469 193{
12a80469
CL
194 struct rpc_rqst *req = task->tk_rqstp;
195
196 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
197 if (task == xprt->snd_task)
198 return 1;
12a80469
CL
199 goto out_sleep;
200 }
c544577d
TM
201 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
202 goto out_unlock;
12a80469 203 xprt->snd_task = task;
4d4a76f3 204
12a80469
CL
205 return 1;
206
c544577d
TM
207out_unlock:
208 xprt_clear_locked(xprt);
12a80469 209out_sleep:
46121cf7 210 dprintk("RPC: %5u failed to lock transport %p\n",
12a80469 211 task->tk_pid, xprt);
f05d54ec 212 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
12a80469 213 task->tk_status = -EAGAIN;
79c99152 214 rpc_sleep_on(&xprt->sending, task, NULL);
12a80469
CL
215 return 0;
216}
12444809 217EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
12a80469 218
75891f50
TM
219static bool
220xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
221{
222 return test_bit(XPRT_CWND_WAIT, &xprt->state);
223}
224
225static void
226xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
227{
228 if (!list_empty(&xprt->xmit_queue)) {
229 /* Peek at head of queue to see if it can make progress */
230 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
231 rq_xmit)->rq_cong)
232 return;
233 }
234 set_bit(XPRT_CWND_WAIT, &xprt->state);
235}
236
237static void
238xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
239{
240 if (!RPCXPRT_CONGESTED(xprt))
241 clear_bit(XPRT_CWND_WAIT, &xprt->state);
242}
243
1da177e4 244/*
12a80469
CL
245 * xprt_reserve_xprt_cong - serialize write access to transports
246 * @task: task that is requesting access to the transport
247 *
248 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
249 * integrated into the decision of whether a request is allowed to be
250 * woken up and given access to the transport.
75891f50 251 * Note that the lock is only granted if we know there are free slots.
1da177e4 252 */
43cedbf0 253int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
254{
255 struct rpc_rqst *req = task->tk_rqstp;
256
2226feb6 257 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
1da177e4
LT
258 if (task == xprt->snd_task)
259 return 1;
1da177e4
LT
260 goto out_sleep;
261 }
43cedbf0
TM
262 if (req == NULL) {
263 xprt->snd_task = task;
264 return 1;
265 }
c544577d
TM
266 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
267 goto out_unlock;
75891f50 268 if (!xprt_need_congestion_window_wait(xprt)) {
1da177e4 269 xprt->snd_task = task;
1da177e4
LT
270 return 1;
271 }
c544577d 272out_unlock:
632e3bdc 273 xprt_clear_locked(xprt);
1da177e4 274out_sleep:
46121cf7 275 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
f05d54ec 276 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
1da177e4 277 task->tk_status = -EAGAIN;
79c99152 278 rpc_sleep_on(&xprt->sending, task, NULL);
1da177e4
LT
279 return 0;
280}
12444809 281EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
1da177e4 282
12a80469 283static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
284{
285 int retval;
286
bd79bc57
TM
287 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
288 return 1;
4a0f8c04 289 spin_lock_bh(&xprt->transport_lock);
43cedbf0 290 retval = xprt->ops->reserve_xprt(xprt, task);
4a0f8c04 291 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
292 return retval;
293}
294
961a828d 295static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
49e9a890 296{
961a828d 297 struct rpc_xprt *xprt = data;
49e9a890 298
49e9a890 299 xprt->snd_task = task;
961a828d
TM
300 return true;
301}
49e9a890 302
961a828d
TM
303static void __xprt_lock_write_next(struct rpc_xprt *xprt)
304{
305 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
306 return;
c544577d
TM
307 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
308 goto out_unlock;
f1dc237c
TM
309 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
310 __xprt_lock_write_func, xprt))
961a828d 311 return;
c544577d 312out_unlock:
632e3bdc 313 xprt_clear_locked(xprt);
49e9a890
CL
314}
315
961a828d
TM
316static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
317{
318 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
319 return;
c544577d
TM
320 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
321 goto out_unlock;
75891f50 322 if (xprt_need_congestion_window_wait(xprt))
961a828d 323 goto out_unlock;
f1dc237c 324 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
75891f50 325 __xprt_lock_write_func, xprt))
961a828d 326 return;
1da177e4 327out_unlock:
632e3bdc 328 xprt_clear_locked(xprt);
1da177e4
LT
329}
330
49e9a890
CL
331/**
332 * xprt_release_xprt - allow other requests to use a transport
333 * @xprt: transport with other tasks potentially waiting
334 * @task: task that is releasing access to the transport
335 *
336 * Note that "task" can be NULL. No congestion control is provided.
1da177e4 337 */
49e9a890 338void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
339{
340 if (xprt->snd_task == task) {
632e3bdc 341 xprt_clear_locked(xprt);
1da177e4
LT
342 __xprt_lock_write_next(xprt);
343 }
344}
12444809 345EXPORT_SYMBOL_GPL(xprt_release_xprt);
1da177e4 346
49e9a890
CL
347/**
348 * xprt_release_xprt_cong - allow other requests to use a transport
349 * @xprt: transport with other tasks potentially waiting
350 * @task: task that is releasing access to the transport
351 *
352 * Note that "task" can be NULL. Another task is awoken to use the
353 * transport if the transport's congestion window allows it.
354 */
355void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
356{
357 if (xprt->snd_task == task) {
632e3bdc 358 xprt_clear_locked(xprt);
49e9a890
CL
359 __xprt_lock_write_next_cong(xprt);
360 }
361}
12444809 362EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
49e9a890
CL
363
364static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4 365{
bd79bc57
TM
366 if (xprt->snd_task != task)
367 return;
4a0f8c04 368 spin_lock_bh(&xprt->transport_lock);
49e9a890 369 xprt->ops->release_xprt(xprt, task);
4a0f8c04 370 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
371}
372
1da177e4
LT
373/*
374 * Van Jacobson congestion avoidance. Check if the congestion window
375 * overflowed. Put the task to sleep if this is the case.
376 */
377static int
75891f50 378__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
1da177e4 379{
1da177e4
LT
380 if (req->rq_cong)
381 return 1;
46121cf7 382 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
75891f50
TM
383 req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
384 if (RPCXPRT_CONGESTED(xprt)) {
385 xprt_set_congestion_window_wait(xprt);
1da177e4 386 return 0;
75891f50 387 }
1da177e4
LT
388 req->rq_cong = 1;
389 xprt->cong += RPC_CWNDSCALE;
390 return 1;
391}
392
393/*
394 * Adjust the congestion window, and wake up the next task
395 * that has been sleeping due to congestion
396 */
397static void
398__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
399{
400 if (!req->rq_cong)
401 return;
402 req->rq_cong = 0;
403 xprt->cong -= RPC_CWNDSCALE;
75891f50 404 xprt_test_and_clear_congestion_window_wait(xprt);
49e9a890 405 __xprt_lock_write_next_cong(xprt);
1da177e4
LT
406}
407
75891f50
TM
408/**
409 * xprt_request_get_cong - Request congestion control credits
410 * @xprt: pointer to transport
411 * @req: pointer to RPC request
412 *
413 * Useful for transports that require congestion control.
414 */
415bool
416xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
417{
418 bool ret = false;
419
420 if (req->rq_cong)
421 return true;
422 spin_lock_bh(&xprt->transport_lock);
423 ret = __xprt_get_cong(xprt, req) != 0;
424 spin_unlock_bh(&xprt->transport_lock);
425 return ret;
426}
427EXPORT_SYMBOL_GPL(xprt_request_get_cong);
428
a58dd398
CL
429/**
430 * xprt_release_rqst_cong - housekeeping when request is complete
431 * @task: RPC request that recently completed
432 *
433 * Useful for transports that require congestion control.
434 */
435void xprt_release_rqst_cong(struct rpc_task *task)
436{
a4f0835c
TM
437 struct rpc_rqst *req = task->tk_rqstp;
438
439 __xprt_put_cong(req->rq_xprt, req);
a58dd398 440}
12444809 441EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
a58dd398 442
75891f50
TM
443/*
444 * Clear the congestion window wait flag and wake up the next
445 * entry on xprt->sending
446 */
447static void
448xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
449{
450 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
451 spin_lock_bh(&xprt->transport_lock);
452 __xprt_lock_write_next_cong(xprt);
453 spin_unlock_bh(&xprt->transport_lock);
454 }
455}
456
46c0ee8b
CL
457/**
458 * xprt_adjust_cwnd - adjust transport congestion window
6a24dfb6 459 * @xprt: pointer to xprt
46c0ee8b
CL
460 * @task: recently completed RPC request used to adjust window
461 * @result: result code of completed RPC request
462 *
4f4cf5ad
CL
463 * The transport code maintains an estimate on the maximum number of out-
464 * standing RPC requests, using a smoothed version of the congestion
465 * avoidance implemented in 44BSD. This is basically the Van Jacobson
466 * congestion algorithm: If a retransmit occurs, the congestion window is
467 * halved; otherwise, it is incremented by 1/cwnd when
468 *
469 * - a reply is received and
470 * - a full number of requests are outstanding and
471 * - the congestion window hasn't been updated recently.
1da177e4 472 */
6a24dfb6 473void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
1da177e4 474{
46c0ee8b 475 struct rpc_rqst *req = task->tk_rqstp;
46c0ee8b 476 unsigned long cwnd = xprt->cwnd;
1da177e4 477
1da177e4
LT
478 if (result >= 0 && cwnd <= xprt->cong) {
479 /* The (cwnd >> 1) term makes sure
480 * the result gets rounded properly. */
481 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
482 if (cwnd > RPC_MAXCWND(xprt))
483 cwnd = RPC_MAXCWND(xprt);
49e9a890 484 __xprt_lock_write_next_cong(xprt);
1da177e4
LT
485 } else if (result == -ETIMEDOUT) {
486 cwnd >>= 1;
487 if (cwnd < RPC_CWNDSCALE)
488 cwnd = RPC_CWNDSCALE;
489 }
46121cf7 490 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
1da177e4
LT
491 xprt->cong, xprt->cwnd, cwnd);
492 xprt->cwnd = cwnd;
46c0ee8b 493 __xprt_put_cong(xprt, req);
1da177e4 494}
12444809 495EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
1da177e4 496
44fbac22
CL
497/**
498 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
499 * @xprt: transport with waiting tasks
500 * @status: result code to plant in each task before waking it
501 *
502 */
503void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
504{
505 if (status < 0)
506 rpc_wake_up_status(&xprt->pending, status);
507 else
508 rpc_wake_up(&xprt->pending);
509}
12444809 510EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
44fbac22 511
c7b2cae8
CL
512/**
513 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
c544577d 514 * @xprt: transport
a9a6b52e
TM
515 *
516 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
517 * we don't in general want to force a socket disconnection due to
518 * an incomplete RPC call transmission.
c7b2cae8 519 */
c544577d 520void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
c7b2cae8 521{
c544577d 522 set_bit(XPRT_WRITE_SPACE, &xprt->state);
c7b2cae8 523}
12444809 524EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
c7b2cae8 525
c544577d
TM
526static bool
527xprt_clear_write_space_locked(struct rpc_xprt *xprt)
528{
529 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
530 __xprt_lock_write_next(xprt);
531 dprintk("RPC: write space: waking waiting task on "
532 "xprt %p\n", xprt);
533 return true;
534 }
535 return false;
536}
537
c7b2cae8
CL
538/**
539 * xprt_write_space - wake the task waiting for transport output buffer space
540 * @xprt: transport with waiting tasks
541 *
542 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
543 */
c544577d 544bool xprt_write_space(struct rpc_xprt *xprt)
c7b2cae8 545{
c544577d
TM
546 bool ret;
547
548 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
549 return false;
c7b2cae8 550 spin_lock_bh(&xprt->transport_lock);
c544577d 551 ret = xprt_clear_write_space_locked(xprt);
c7b2cae8 552 spin_unlock_bh(&xprt->transport_lock);
c544577d 553 return ret;
c7b2cae8 554}
12444809 555EXPORT_SYMBOL_GPL(xprt_write_space);
c7b2cae8 556
fe3aca29
CL
557/**
558 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
559 * @task: task whose timeout is to be set
560 *
561 * Set a request's retransmit timeout based on the transport's
562 * default timeout parameters. Used by transports that don't adjust
563 * the retransmit timeout based on round-trip time estimation.
564 */
565void xprt_set_retrans_timeout_def(struct rpc_task *task)
566{
567 task->tk_timeout = task->tk_rqstp->rq_timeout;
568}
12444809 569EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
fe3aca29 570
2c53040f 571/**
fe3aca29
CL
572 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
573 * @task: task whose timeout is to be set
cca5172a 574 *
fe3aca29
CL
575 * Set a request's retransmit timeout using the RTT estimator.
576 */
577void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
578{
579 int timer = task->tk_msg.rpc_proc->p_timer;
ba7392bb
TM
580 struct rpc_clnt *clnt = task->tk_client;
581 struct rpc_rtt *rtt = clnt->cl_rtt;
fe3aca29 582 struct rpc_rqst *req = task->tk_rqstp;
ba7392bb 583 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
fe3aca29
CL
584
585 task->tk_timeout = rpc_calc_rto(rtt, timer);
586 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
587 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
588 task->tk_timeout = max_timeout;
589}
12444809 590EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
fe3aca29 591
1da177e4
LT
592static void xprt_reset_majortimeo(struct rpc_rqst *req)
593{
ba7392bb 594 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
1da177e4
LT
595
596 req->rq_majortimeo = req->rq_timeout;
597 if (to->to_exponential)
598 req->rq_majortimeo <<= to->to_retries;
599 else
600 req->rq_majortimeo += to->to_increment * to->to_retries;
601 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
602 req->rq_majortimeo = to->to_maxval;
603 req->rq_majortimeo += jiffies;
604}
605
9903cd1c
CL
606/**
607 * xprt_adjust_timeout - adjust timeout values for next retransmit
608 * @req: RPC request containing parameters to use for the adjustment
609 *
1da177e4
LT
610 */
611int xprt_adjust_timeout(struct rpc_rqst *req)
612{
613 struct rpc_xprt *xprt = req->rq_xprt;
ba7392bb 614 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
1da177e4
LT
615 int status = 0;
616
617 if (time_before(jiffies, req->rq_majortimeo)) {
618 if (to->to_exponential)
619 req->rq_timeout <<= 1;
620 else
621 req->rq_timeout += to->to_increment;
622 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
623 req->rq_timeout = to->to_maxval;
624 req->rq_retries++;
1da177e4
LT
625 } else {
626 req->rq_timeout = to->to_initval;
627 req->rq_retries = 0;
628 xprt_reset_majortimeo(req);
629 /* Reset the RTT counters == "slow start" */
4a0f8c04 630 spin_lock_bh(&xprt->transport_lock);
1da177e4 631 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
4a0f8c04 632 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
633 status = -ETIMEDOUT;
634 }
635
636 if (req->rq_timeout == 0) {
637 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
638 req->rq_timeout = 5 * HZ;
639 }
640 return status;
641}
642
65f27f38 643static void xprt_autoclose(struct work_struct *work)
1da177e4 644{
65f27f38
DH
645 struct rpc_xprt *xprt =
646 container_of(work, struct rpc_xprt, task_cleanup);
1da177e4 647
66af1e55 648 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
4876cc77 649 xprt->ops->close(xprt);
1da177e4 650 xprt_release_write(xprt, NULL);
79234c3d 651 wake_up_bit(&xprt->state, XPRT_LOCKED);
1da177e4
LT
652}
653
9903cd1c 654/**
62da3b24 655 * xprt_disconnect_done - mark a transport as disconnected
9903cd1c
CL
656 * @xprt: transport to flag for disconnect
657 *
1da177e4 658 */
62da3b24 659void xprt_disconnect_done(struct rpc_xprt *xprt)
1da177e4 660{
46121cf7 661 dprintk("RPC: disconnected transport %p\n", xprt);
4a0f8c04 662 spin_lock_bh(&xprt->transport_lock);
1da177e4 663 xprt_clear_connected(xprt);
c544577d 664 xprt_clear_write_space_locked(xprt);
2a491991 665 xprt_wake_pending_tasks(xprt, -EAGAIN);
4a0f8c04 666 spin_unlock_bh(&xprt->transport_lock);
1da177e4 667}
62da3b24 668EXPORT_SYMBOL_GPL(xprt_disconnect_done);
1da177e4 669
66af1e55
TM
670/**
671 * xprt_force_disconnect - force a transport to disconnect
672 * @xprt: transport to disconnect
673 *
674 */
675void xprt_force_disconnect(struct rpc_xprt *xprt)
676{
677 /* Don't race with the test_bit() in xprt_clear_locked() */
678 spin_lock_bh(&xprt->transport_lock);
679 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
680 /* Try to schedule an autoclose RPC call */
681 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
40a5f1b1 682 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
2a491991 683 xprt_wake_pending_tasks(xprt, -EAGAIN);
66af1e55
TM
684 spin_unlock_bh(&xprt->transport_lock);
685}
e2a4f4fb 686EXPORT_SYMBOL_GPL(xprt_force_disconnect);
66af1e55 687
7f3a1d1e
TM
688static unsigned int
689xprt_connect_cookie(struct rpc_xprt *xprt)
690{
691 return READ_ONCE(xprt->connect_cookie);
692}
693
694static bool
695xprt_request_retransmit_after_disconnect(struct rpc_task *task)
696{
697 struct rpc_rqst *req = task->tk_rqstp;
698 struct rpc_xprt *xprt = req->rq_xprt;
699
700 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
701 !xprt_connected(xprt);
702}
703
7c1d71cf
TM
704/**
705 * xprt_conditional_disconnect - force a transport to disconnect
706 * @xprt: transport to disconnect
707 * @cookie: 'connection cookie'
708 *
709 * This attempts to break the connection if and only if 'cookie' matches
710 * the current transport 'connection cookie'. It ensures that we don't
711 * try to break the connection more than once when we need to retransmit
712 * a batch of RPC requests.
713 *
714 */
715void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
716{
717 /* Don't race with the test_bit() in xprt_clear_locked() */
718 spin_lock_bh(&xprt->transport_lock);
719 if (cookie != xprt->connect_cookie)
720 goto out;
2c2ee6d2 721 if (test_bit(XPRT_CLOSING, &xprt->state))
7c1d71cf
TM
722 goto out;
723 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
724 /* Try to schedule an autoclose RPC call */
725 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
40a5f1b1 726 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
2a491991 727 xprt_wake_pending_tasks(xprt, -EAGAIN);
7c1d71cf
TM
728out:
729 spin_unlock_bh(&xprt->transport_lock);
730}
731
ad3331ac
TM
732static bool
733xprt_has_timer(const struct rpc_xprt *xprt)
734{
735 return xprt->idle_timeout != 0;
736}
737
738static void
739xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
740 __must_hold(&xprt->transport_lock)
741{
95f7691d 742 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
ad3331ac
TM
743 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
744}
745
1da177e4 746static void
ff861c4d 747xprt_init_autodisconnect(struct timer_list *t)
1da177e4 748{
ff861c4d 749 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
1da177e4 750
4a0f8c04 751 spin_lock(&xprt->transport_lock);
95f7691d 752 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
1da177e4 753 goto out_abort;
ad3331ac
TM
754 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
755 xprt->last_used = jiffies;
2226feb6 756 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
1da177e4 757 goto out_abort;
4a0f8c04 758 spin_unlock(&xprt->transport_lock);
40a5f1b1 759 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
1da177e4
LT
760 return;
761out_abort:
4a0f8c04 762 spin_unlock(&xprt->transport_lock);
1da177e4
LT
763}
764
718ba5b8
TM
765bool xprt_lock_connect(struct rpc_xprt *xprt,
766 struct rpc_task *task,
767 void *cookie)
768{
769 bool ret = false;
770
771 spin_lock_bh(&xprt->transport_lock);
772 if (!test_bit(XPRT_LOCKED, &xprt->state))
773 goto out;
774 if (xprt->snd_task != task)
775 goto out;
776 xprt->snd_task = cookie;
777 ret = true;
778out:
779 spin_unlock_bh(&xprt->transport_lock);
780 return ret;
781}
782
783void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
784{
785 spin_lock_bh(&xprt->transport_lock);
786 if (xprt->snd_task != cookie)
787 goto out;
788 if (!test_bit(XPRT_LOCKED, &xprt->state))
789 goto out;
790 xprt->snd_task =NULL;
791 xprt->ops->release_xprt(xprt, NULL);
ad3331ac 792 xprt_schedule_autodisconnect(xprt);
718ba5b8
TM
793out:
794 spin_unlock_bh(&xprt->transport_lock);
79234c3d 795 wake_up_bit(&xprt->state, XPRT_LOCKED);
718ba5b8
TM
796}
797
9903cd1c
CL
798/**
799 * xprt_connect - schedule a transport connect operation
800 * @task: RPC task that is requesting the connect
1da177e4
LT
801 *
802 */
803void xprt_connect(struct rpc_task *task)
804{
ad2368d6 805 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1da177e4 806
46121cf7 807 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
1da177e4
LT
808 xprt, (xprt_connected(xprt) ? "is" : "is not"));
809
ec739ef0 810 if (!xprt_bound(xprt)) {
01d37c42 811 task->tk_status = -EAGAIN;
1da177e4
LT
812 return;
813 }
814 if (!xprt_lock_write(xprt, task))
815 return;
feb8ca37
TM
816
817 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
818 xprt->ops->close(xprt);
819
718ba5b8 820 if (!xprt_connected(xprt)) {
a8ce4a8f 821 task->tk_timeout = task->tk_rqstp->rq_timeout;
2c2ee6d2 822 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
5d00837b 823 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
0b9e7943
TM
824
825 if (test_bit(XPRT_CLOSING, &xprt->state))
826 return;
827 if (xprt_test_and_set_connecting(xprt))
828 return;
262ca07d 829 xprt->stat.connect_start = jiffies;
1b092092 830 xprt->ops->connect(xprt, task);
1da177e4 831 }
718ba5b8 832 xprt_release_write(xprt, task);
1da177e4
LT
833}
834
9903cd1c 835static void xprt_connect_status(struct rpc_task *task)
1da177e4 836{
3968a8a5
CL
837 switch (task->tk_status) {
838 case 0:
46121cf7 839 dprintk("RPC: %5u xprt_connect_status: connection established\n",
1da177e4 840 task->tk_pid);
3968a8a5 841 break;
0fe8d04e
TM
842 case -ECONNREFUSED:
843 case -ECONNRESET:
844 case -ECONNABORTED:
845 case -ENETUNREACH:
846 case -EHOSTUNREACH:
2fc193cf 847 case -EPIPE:
2a491991
TM
848 case -EAGAIN:
849 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
23475d66 850 break;
1da177e4 851 case -ETIMEDOUT:
46121cf7
CL
852 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
853 "out\n", task->tk_pid);
1da177e4
LT
854 break;
855 default:
46121cf7
CL
856 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
857 "server %s\n", task->tk_pid, -task->tk_status,
3968a8a5 858 task->tk_rqstp->rq_xprt->servername);
23475d66 859 task->tk_status = -EIO;
1da177e4 860 }
1da177e4
LT
861}
862
95f7691d
TM
863enum xprt_xid_rb_cmp {
864 XID_RB_EQUAL,
865 XID_RB_LEFT,
866 XID_RB_RIGHT,
867};
868static enum xprt_xid_rb_cmp
869xprt_xid_cmp(__be32 xid1, __be32 xid2)
870{
871 if (xid1 == xid2)
872 return XID_RB_EQUAL;
873 if ((__force u32)xid1 < (__force u32)xid2)
874 return XID_RB_LEFT;
875 return XID_RB_RIGHT;
876}
877
878static struct rpc_rqst *
879xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
880{
881 struct rb_node *n = xprt->recv_queue.rb_node;
882 struct rpc_rqst *req;
883
884 while (n != NULL) {
885 req = rb_entry(n, struct rpc_rqst, rq_recv);
886 switch (xprt_xid_cmp(xid, req->rq_xid)) {
887 case XID_RB_LEFT:
888 n = n->rb_left;
889 break;
890 case XID_RB_RIGHT:
891 n = n->rb_right;
892 break;
893 case XID_RB_EQUAL:
894 return req;
895 }
896 }
897 return NULL;
898}
899
900static void
901xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
902{
903 struct rb_node **p = &xprt->recv_queue.rb_node;
904 struct rb_node *n = NULL;
905 struct rpc_rqst *req;
906
907 while (*p != NULL) {
908 n = *p;
909 req = rb_entry(n, struct rpc_rqst, rq_recv);
910 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
911 case XID_RB_LEFT:
912 p = &n->rb_left;
913 break;
914 case XID_RB_RIGHT:
915 p = &n->rb_right;
916 break;
917 case XID_RB_EQUAL:
918 WARN_ON_ONCE(new != req);
919 return;
920 }
921 }
922 rb_link_node(&new->rq_recv, n, p);
923 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
924}
925
926static void
927xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
928{
929 rb_erase(&req->rq_recv, &xprt->recv_queue);
930}
931
9903cd1c
CL
932/**
933 * xprt_lookup_rqst - find an RPC request corresponding to an XID
934 * @xprt: transport on which the original request was transmitted
935 * @xid: RPC XID of incoming reply
936 *
75c84151 937 * Caller holds xprt->queue_lock.
1da177e4 938 */
d8ed029d 939struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1da177e4 940{
8f3a6de3 941 struct rpc_rqst *entry;
1da177e4 942
95f7691d
TM
943 entry = xprt_request_rb_find(xprt, xid);
944 if (entry != NULL) {
945 trace_xprt_lookup_rqst(xprt, xid, 0);
946 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
947 return entry;
948 }
46121cf7
CL
949
950 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
951 ntohl(xid));
3705ad64 952 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
262ca07d
CL
953 xprt->stat.bad_xids++;
954 return NULL;
1da177e4 955}
12444809 956EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1da177e4 957
cf9946cd
TM
958static bool
959xprt_is_pinned_rqst(struct rpc_rqst *req)
960{
961 return atomic_read(&req->rq_pin) != 0;
962}
963
729749bb
TM
964/**
965 * xprt_pin_rqst - Pin a request on the transport receive list
966 * @req: Request to pin
967 *
968 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
cf9946cd 969 * so should be holding the xprt receive lock.
729749bb
TM
970 */
971void xprt_pin_rqst(struct rpc_rqst *req)
972{
cf9946cd 973 atomic_inc(&req->rq_pin);
729749bb 974}
9590d083 975EXPORT_SYMBOL_GPL(xprt_pin_rqst);
729749bb
TM
976
977/**
978 * xprt_unpin_rqst - Unpin a request on the transport receive list
979 * @req: Request to pin
980 *
cf9946cd 981 * Caller should be holding the xprt receive lock.
729749bb
TM
982 */
983void xprt_unpin_rqst(struct rpc_rqst *req)
984{
cf9946cd
TM
985 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
986 atomic_dec(&req->rq_pin);
987 return;
988 }
989 if (atomic_dec_and_test(&req->rq_pin))
990 wake_up_var(&req->rq_pin);
729749bb 991}
9590d083 992EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
729749bb
TM
993
994static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
729749bb 995{
cf9946cd 996 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
729749bb
TM
997}
998
edc81dcd
TM
999static bool
1000xprt_request_data_received(struct rpc_task *task)
1001{
1002 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1003 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1004}
1005
1006static bool
1007xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1008{
1009 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1010 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1011}
1012
1013/**
1014 * xprt_request_enqueue_receive - Add an request to the receive queue
1015 * @task: RPC task
1016 *
1017 */
1018void
1019xprt_request_enqueue_receive(struct rpc_task *task)
1020{
1021 struct rpc_rqst *req = task->tk_rqstp;
1022 struct rpc_xprt *xprt = req->rq_xprt;
1023
1024 if (!xprt_request_need_enqueue_receive(task, req))
1025 return;
1026 spin_lock(&xprt->queue_lock);
1027
1028 /* Update the softirq receive buffer */
1029 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1030 sizeof(req->rq_private_buf));
1031
1032 /* Add request to the receive list */
95f7691d 1033 xprt_request_rb_insert(xprt, req);
edc81dcd
TM
1034 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1035 spin_unlock(&xprt->queue_lock);
1036
1037 xprt_reset_majortimeo(req);
1038 /* Turn off autodisconnect */
1039 del_singleshot_timer_sync(&xprt->timer);
1040}
1041
1042/**
1043 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1044 * @task: RPC task
1045 *
1046 * Caller must hold xprt->queue_lock.
1047 */
1048static void
1049xprt_request_dequeue_receive_locked(struct rpc_task *task)
1050{
95f7691d
TM
1051 struct rpc_rqst *req = task->tk_rqstp;
1052
edc81dcd 1053 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
95f7691d 1054 xprt_request_rb_remove(req->rq_xprt, req);
edc81dcd
TM
1055}
1056
ecd465ee
CL
1057/**
1058 * xprt_update_rtt - Update RPC RTT statistics
1059 * @task: RPC request that recently completed
1060 *
75c84151 1061 * Caller holds xprt->queue_lock.
ecd465ee
CL
1062 */
1063void xprt_update_rtt(struct rpc_task *task)
1570c1e4
CL
1064{
1065 struct rpc_rqst *req = task->tk_rqstp;
1066 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
95c96174 1067 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
d60dbb20 1068 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1570c1e4
CL
1069
1070 if (timer) {
1071 if (req->rq_ntrans == 1)
ff839970 1072 rpc_update_rtt(rtt, timer, m);
1570c1e4
CL
1073 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1074 }
1075}
ecd465ee 1076EXPORT_SYMBOL_GPL(xprt_update_rtt);
1570c1e4 1077
9903cd1c
CL
1078/**
1079 * xprt_complete_rqst - called when reply processing is complete
1570c1e4 1080 * @task: RPC request that recently completed
9903cd1c
CL
1081 * @copied: actual number of bytes received from the transport
1082 *
75c84151 1083 * Caller holds xprt->queue_lock.
1da177e4 1084 */
1570c1e4 1085void xprt_complete_rqst(struct rpc_task *task, int copied)
1da177e4 1086{
1570c1e4 1087 struct rpc_rqst *req = task->tk_rqstp;
fda13939 1088 struct rpc_xprt *xprt = req->rq_xprt;
1da177e4 1089
1570c1e4
CL
1090 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1091 task->tk_pid, ntohl(req->rq_xid), copied);
3705ad64 1092 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1da177e4 1093
fda13939 1094 xprt->stat.recvs++;
ef759a2e 1095
1e799b67 1096 req->rq_private_buf.len = copied;
dd2b63d0
RL
1097 /* Ensure all writes are done before we update */
1098 /* req->rq_reply_bytes_recvd */
43ac3f29 1099 smp_wmb();
dd2b63d0 1100 req->rq_reply_bytes_recvd = copied;
edc81dcd 1101 xprt_request_dequeue_receive_locked(task);
fda13939 1102 rpc_wake_up_queued_task(&xprt->pending, task);
1da177e4 1103}
12444809 1104EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1da177e4 1105
46c0ee8b 1106static void xprt_timer(struct rpc_task *task)
1da177e4 1107{
46c0ee8b 1108 struct rpc_rqst *req = task->tk_rqstp;
1da177e4
LT
1109 struct rpc_xprt *xprt = req->rq_xprt;
1110
5d00837b
TM
1111 if (task->tk_status != -ETIMEDOUT)
1112 return;
1da177e4 1113
82476d9f 1114 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
dd2b63d0 1115 if (!req->rq_reply_bytes_recvd) {
46c0ee8b 1116 if (xprt->ops->timer)
6a24dfb6 1117 xprt->ops->timer(xprt, task);
5d00837b
TM
1118 } else
1119 task->tk_status = 0;
1da177e4
LT
1120}
1121
7f3a1d1e
TM
1122/**
1123 * xprt_request_wait_receive - wait for the reply to an RPC request
1124 * @task: RPC task about to send a request
1125 *
1126 */
1127void xprt_request_wait_receive(struct rpc_task *task)
1128{
1129 struct rpc_rqst *req = task->tk_rqstp;
1130 struct rpc_xprt *xprt = req->rq_xprt;
1131
1132 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1133 return;
1134 /*
1135 * Sleep on the pending queue if we're expecting a reply.
1136 * The spinlock ensures atomicity between the test of
1137 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1138 */
1139 spin_lock(&xprt->queue_lock);
1140 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1141 xprt->ops->set_retrans_timeout(task);
1142 rpc_sleep_on(&xprt->pending, task, xprt_timer);
1143 /*
1144 * Send an extra queue wakeup call if the
1145 * connection was dropped in case the call to
1146 * rpc_sleep_on() raced.
1147 */
1148 if (xprt_request_retransmit_after_disconnect(task))
1149 rpc_wake_up_queued_task_set_status(&xprt->pending,
1150 task, -ENOTCONN);
1151 }
1152 spin_unlock(&xprt->queue_lock);
1153}
1154
944b0429
TM
1155static bool
1156xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1157{
762e4e67 1158 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
944b0429
TM
1159}
1160
1161/**
1162 * xprt_request_enqueue_transmit - queue a task for transmission
1163 * @task: pointer to rpc_task
1164 *
1165 * Add a task to the transmission queue.
1166 */
1167void
1168xprt_request_enqueue_transmit(struct rpc_task *task)
1169{
918f3c1f 1170 struct rpc_rqst *pos, *req = task->tk_rqstp;
944b0429
TM
1171 struct rpc_xprt *xprt = req->rq_xprt;
1172
1173 if (xprt_request_need_enqueue_transmit(task, req)) {
1174 spin_lock(&xprt->queue_lock);
75891f50
TM
1175 /*
1176 * Requests that carry congestion control credits are added
1177 * to the head of the list to avoid starvation issues.
1178 */
1179 if (req->rq_cong) {
1180 xprt_clear_congestion_window_wait(xprt);
1181 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1182 if (pos->rq_cong)
1183 continue;
1184 /* Note: req is added _before_ pos */
1185 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1186 INIT_LIST_HEAD(&req->rq_xmit2);
1187 goto out;
1188 }
86aeee0e
TM
1189 } else if (RPC_IS_SWAPPER(task)) {
1190 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1191 if (pos->rq_cong || pos->rq_bytes_sent)
1192 continue;
1193 if (RPC_IS_SWAPPER(pos->rq_task))
1194 continue;
1195 /* Note: req is added _before_ pos */
1196 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1197 INIT_LIST_HEAD(&req->rq_xmit2);
1198 goto out;
1199 }
75891f50
TM
1200 } else {
1201 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1202 if (pos->rq_task->tk_owner != task->tk_owner)
1203 continue;
1204 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1205 INIT_LIST_HEAD(&req->rq_xmit);
1206 goto out;
1207 }
918f3c1f 1208 }
944b0429 1209 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
918f3c1f
TM
1210 INIT_LIST_HEAD(&req->rq_xmit2);
1211out:
944b0429
TM
1212 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1213 spin_unlock(&xprt->queue_lock);
1214 }
1215}
1216
1217/**
1218 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1219 * @task: pointer to rpc_task
1220 *
1221 * Remove a task from the transmission queue
1222 * Caller must hold xprt->queue_lock
1223 */
1224static void
1225xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1226{
918f3c1f
TM
1227 struct rpc_rqst *req = task->tk_rqstp;
1228
1229 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1230 return;
1231 if (!list_empty(&req->rq_xmit)) {
1232 list_del(&req->rq_xmit);
1233 if (!list_empty(&req->rq_xmit2)) {
1234 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1235 struct rpc_rqst, rq_xmit2);
1236 list_del(&req->rq_xmit2);
1237 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1238 }
1239 } else
1240 list_del(&req->rq_xmit2);
944b0429
TM
1241}
1242
1243/**
1244 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1245 * @task: pointer to rpc_task
1246 *
1247 * Remove a task from the transmission queue
1248 */
1249static void
1250xprt_request_dequeue_transmit(struct rpc_task *task)
1251{
1252 struct rpc_rqst *req = task->tk_rqstp;
1253 struct rpc_xprt *xprt = req->rq_xprt;
1254
1255 spin_lock(&xprt->queue_lock);
1256 xprt_request_dequeue_transmit_locked(task);
1257 spin_unlock(&xprt->queue_lock);
1258}
1259
9d96acbc
TM
1260/**
1261 * xprt_request_prepare - prepare an encoded request for transport
1262 * @req: pointer to rpc_rqst
1263 *
1264 * Calls into the transport layer to do whatever is needed to prepare
1265 * the request for transmission or receive.
1266 */
1267void
1268xprt_request_prepare(struct rpc_rqst *req)
1269{
1270 struct rpc_xprt *xprt = req->rq_xprt;
1271
1272 if (xprt->ops->prepare_request)
1273 xprt->ops->prepare_request(req);
1274}
1275
762e4e67
TM
1276/**
1277 * xprt_request_need_retransmit - Test if a task needs retransmission
1278 * @task: pointer to rpc_task
1279 *
1280 * Test for whether a connection breakage requires the task to retransmit
1281 */
1282bool
1283xprt_request_need_retransmit(struct rpc_task *task)
1284{
1285 return xprt_request_retransmit_after_disconnect(task);
1286}
1287
9903cd1c
CL
1288/**
1289 * xprt_prepare_transmit - reserve the transport before sending a request
1290 * @task: RPC task about to send a request
1291 *
1da177e4 1292 */
90051ea7 1293bool xprt_prepare_transmit(struct rpc_task *task)
1da177e4
LT
1294{
1295 struct rpc_rqst *req = task->tk_rqstp;
1296 struct rpc_xprt *xprt = req->rq_xprt;
1da177e4 1297
46121cf7 1298 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1da177e4 1299
5f2f6bd9
TM
1300 if (!xprt_lock_write(xprt, task)) {
1301 /* Race breaker: someone may have transmitted us */
944b0429 1302 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
5f2f6bd9
TM
1303 rpc_wake_up_queued_task_set_status(&xprt->sending,
1304 task, 0);
1305 return false;
1306
90051ea7 1307 }
5f2f6bd9 1308 return true;
1da177e4
LT
1309}
1310
e0ab53de 1311void xprt_end_transmit(struct rpc_task *task)
5e5ce5be 1312{
343952fa 1313 xprt_release_write(task->tk_rqstp->rq_xprt, task);
5e5ce5be
TM
1314}
1315
9903cd1c 1316/**
89f90fe1
TM
1317 * xprt_request_transmit - send an RPC request on a transport
1318 * @req: pointer to request to transmit
1319 * @snd_task: RPC task that owns the transport lock
9903cd1c 1320 *
89f90fe1
TM
1321 * This performs the transmission of a single request.
1322 * Note that if the request is not the same as snd_task, then it
1323 * does need to be pinned.
1324 * Returns '0' on success.
9903cd1c 1325 */
89f90fe1
TM
1326static int
1327xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1da177e4 1328{
89f90fe1
TM
1329 struct rpc_xprt *xprt = req->rq_xprt;
1330 struct rpc_task *task = req->rq_task;
90d91b0c 1331 unsigned int connect_cookie;
dcbbeda8 1332 int is_retrans = RPC_WAS_SENT(task);
ff699ea8 1333 int status;
1da177e4 1334
46121cf7 1335 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1da177e4 1336
edc81dcd 1337 if (!req->rq_bytes_sent) {
89f90fe1
TM
1338 if (xprt_request_data_received(task)) {
1339 status = 0;
944b0429 1340 goto out_dequeue;
89f90fe1 1341 }
3021a5bb 1342 /* Verify that our message lies in the RPCSEC_GSS window */
edc81dcd 1343 if (rpcauth_xmit_need_reencode(task)) {
89f90fe1 1344 status = -EBADMSG;
944b0429 1345 goto out_dequeue;
3021a5bb 1346 }
edc81dcd 1347 }
1da177e4 1348
dcbbeda8
TM
1349 /*
1350 * Update req->rq_ntrans before transmitting to avoid races with
1351 * xprt_update_rtt(), which needs to know that it is recording a
1352 * reply to the first transmission.
1353 */
1354 req->rq_ntrans++;
1355
90d91b0c 1356 connect_cookie = xprt->connect_cookie;
adfa7144 1357 status = xprt->ops->send_request(req);
3705ad64 1358 trace_xprt_transmit(xprt, req->rq_xid, status);
c8485e4d 1359 if (status != 0) {
dcbbeda8 1360 req->rq_ntrans--;
89f90fe1 1361 return status;
c8485e4d 1362 }
7ebbbc6e 1363
dcbbeda8
TM
1364 if (is_retrans)
1365 task->tk_client->cl_stats->rpcretrans++;
1366
4a068258 1367 xprt_inject_disconnect(xprt);
262ca07d 1368
c8485e4d 1369 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
468f8613 1370 task->tk_flags |= RPC_TASK_SENT;
c8485e4d 1371 spin_lock_bh(&xprt->transport_lock);
262ca07d 1372
c8485e4d
TM
1373 xprt->stat.sends++;
1374 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1375 xprt->stat.bklog_u += xprt->backlog.qlen;
15a45206
AA
1376 xprt->stat.sending_u += xprt->sending.qlen;
1377 xprt->stat.pending_u += xprt->pending.qlen;
90d91b0c 1378 spin_unlock_bh(&xprt->transport_lock);
1da177e4 1379
90d91b0c 1380 req->rq_connect_cookie = connect_cookie;
944b0429
TM
1381out_dequeue:
1382 xprt_request_dequeue_transmit(task);
89f90fe1
TM
1383 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1384 return status;
1385}
1386
1387/**
1388 * xprt_transmit - send an RPC request on a transport
1389 * @task: controlling RPC task
1390 *
1391 * Attempts to drain the transmit queue. On exit, either the transport
1392 * signalled an error that needs to be handled before transmission can
1393 * resume, or @task finished transmitting, and detected that it already
1394 * received a reply.
1395 */
1396void
1397xprt_transmit(struct rpc_task *task)
1398{
1399 struct rpc_rqst *next, *req = task->tk_rqstp;
1400 struct rpc_xprt *xprt = req->rq_xprt;
1401 int status;
1402
1403 spin_lock(&xprt->queue_lock);
1404 while (!list_empty(&xprt->xmit_queue)) {
1405 next = list_first_entry(&xprt->xmit_queue,
1406 struct rpc_rqst, rq_xmit);
1407 xprt_pin_rqst(next);
1408 spin_unlock(&xprt->queue_lock);
1409 status = xprt_request_transmit(next, task);
1410 if (status == -EBADMSG && next != req)
1411 status = 0;
1412 cond_resched();
1413 spin_lock(&xprt->queue_lock);
1414 xprt_unpin_rqst(next);
1415 if (status == 0) {
1416 if (!xprt_request_data_received(task) ||
1417 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1418 continue;
c544577d 1419 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
89f90fe1
TM
1420 task->tk_status = status;
1421 break;
1422 }
1423 spin_unlock(&xprt->queue_lock);
1da177e4
LT
1424}
1425
ba60eb25
TM
1426static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1427{
1428 set_bit(XPRT_CONGESTED, &xprt->state);
1429 rpc_sleep_on(&xprt->backlog, task, NULL);
1430}
1431
1432static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1433{
1434 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1435 clear_bit(XPRT_CONGESTED, &xprt->state);
1436}
1437
1438static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1439{
1440 bool ret = false;
1441
1442 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1443 goto out;
1444 spin_lock(&xprt->reserve_lock);
1445 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1446 rpc_sleep_on(&xprt->backlog, task, NULL);
1447 ret = true;
1448 }
1449 spin_unlock(&xprt->reserve_lock);
1450out:
1451 return ret;
1452}
1453
92ea011f 1454static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
d9ba131d
TM
1455{
1456 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1457
ff699ea8 1458 if (xprt->num_reqs >= xprt->max_reqs)
d9ba131d 1459 goto out;
ff699ea8 1460 ++xprt->num_reqs;
92ea011f
TM
1461 spin_unlock(&xprt->reserve_lock);
1462 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1463 spin_lock(&xprt->reserve_lock);
d9ba131d
TM
1464 if (req != NULL)
1465 goto out;
ff699ea8 1466 --xprt->num_reqs;
d9ba131d
TM
1467 req = ERR_PTR(-ENOMEM);
1468out:
1469 return req;
1470}
1471
1472static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1473{
ff699ea8
CL
1474 if (xprt->num_reqs > xprt->min_reqs) {
1475 --xprt->num_reqs;
d9ba131d
TM
1476 kfree(req);
1477 return true;
1478 }
1479 return false;
1480}
1481
f39c1bfb 1482void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4 1483{
d9ba131d 1484 struct rpc_rqst *req;
1da177e4 1485
f39c1bfb 1486 spin_lock(&xprt->reserve_lock);
1da177e4 1487 if (!list_empty(&xprt->free)) {
d9ba131d
TM
1488 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1489 list_del(&req->rq_list);
1490 goto out_init_req;
1491 }
92ea011f 1492 req = xprt_dynamic_alloc_slot(xprt);
d9ba131d
TM
1493 if (!IS_ERR(req))
1494 goto out_init_req;
1495 switch (PTR_ERR(req)) {
1496 case -ENOMEM:
d9ba131d
TM
1497 dprintk("RPC: dynamic allocation of request slot "
1498 "failed! Retrying\n");
1afeaf5c 1499 task->tk_status = -ENOMEM;
d9ba131d
TM
1500 break;
1501 case -EAGAIN:
ba60eb25 1502 xprt_add_backlog(xprt, task);
d9ba131d 1503 dprintk("RPC: waiting for request slot\n");
e9d47639 1504 /* fall through */
1afeaf5c
TM
1505 default:
1506 task->tk_status = -EAGAIN;
1da177e4 1507 }
f39c1bfb 1508 spin_unlock(&xprt->reserve_lock);
d9ba131d
TM
1509 return;
1510out_init_req:
ff699ea8
CL
1511 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1512 xprt->num_reqs);
37ac86c3
CL
1513 spin_unlock(&xprt->reserve_lock);
1514
d9ba131d
TM
1515 task->tk_status = 0;
1516 task->tk_rqstp = req;
f39c1bfb
TM
1517}
1518EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1519
a9cde23a 1520void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
ee5ebe85 1521{
ee5ebe85 1522 spin_lock(&xprt->reserve_lock);
c25573b5
TM
1523 if (!xprt_dynamic_free_slot(xprt, req)) {
1524 memset(req, 0, sizeof(*req)); /* mark unused */
1525 list_add(&req->rq_list, &xprt->free);
1526 }
ba60eb25 1527 xprt_wake_up_backlog(xprt);
ee5ebe85
TM
1528 spin_unlock(&xprt->reserve_lock);
1529}
a9cde23a 1530EXPORT_SYMBOL_GPL(xprt_free_slot);
ee5ebe85 1531
21de0a95
TM
1532static void xprt_free_all_slots(struct rpc_xprt *xprt)
1533{
1534 struct rpc_rqst *req;
1535 while (!list_empty(&xprt->free)) {
1536 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1537 list_del(&req->rq_list);
1538 kfree(req);
1539 }
1540}
1541
d9ba131d
TM
1542struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1543 unsigned int num_prealloc,
1544 unsigned int max_alloc)
bd1722d4
PE
1545{
1546 struct rpc_xprt *xprt;
21de0a95
TM
1547 struct rpc_rqst *req;
1548 int i;
bd1722d4
PE
1549
1550 xprt = kzalloc(size, GFP_KERNEL);
1551 if (xprt == NULL)
1552 goto out;
1553
21de0a95
TM
1554 xprt_init(xprt, net);
1555
1556 for (i = 0; i < num_prealloc; i++) {
1557 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1558 if (!req)
8313164c 1559 goto out_free;
21de0a95
TM
1560 list_add(&req->rq_list, &xprt->free);
1561 }
d9ba131d
TM
1562 if (max_alloc > num_prealloc)
1563 xprt->max_reqs = max_alloc;
1564 else
1565 xprt->max_reqs = num_prealloc;
1566 xprt->min_reqs = num_prealloc;
ff699ea8 1567 xprt->num_reqs = num_prealloc;
bd1722d4
PE
1568
1569 return xprt;
1570
1571out_free:
21de0a95 1572 xprt_free(xprt);
bd1722d4
PE
1573out:
1574 return NULL;
1575}
1576EXPORT_SYMBOL_GPL(xprt_alloc);
1577
e204e621
PE
1578void xprt_free(struct rpc_xprt *xprt)
1579{
37aa2133 1580 put_net(xprt->xprt_net);
21de0a95 1581 xprt_free_all_slots(xprt);
fda1bfef 1582 kfree_rcu(xprt, rcu);
e204e621
PE
1583}
1584EXPORT_SYMBOL_GPL(xprt_free);
1585
902c5887
TM
1586static void
1587xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1588{
1589 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1590}
1591
9dc6edcf
TM
1592static __be32
1593xprt_alloc_xid(struct rpc_xprt *xprt)
1594{
1595 __be32 xid;
1596
1597 spin_lock(&xprt->reserve_lock);
1598 xid = (__force __be32)xprt->xid++;
1599 spin_unlock(&xprt->reserve_lock);
1600 return xid;
1601}
1602
1603static void
1604xprt_init_xid(struct rpc_xprt *xprt)
1605{
1606 xprt->xid = prandom_u32();
1607}
1608
1609static void
1610xprt_request_init(struct rpc_task *task)
1611{
1612 struct rpc_xprt *xprt = task->tk_xprt;
1613 struct rpc_rqst *req = task->tk_rqstp;
1614
9dc6edcf
TM
1615 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1616 req->rq_task = task;
1617 req->rq_xprt = xprt;
1618 req->rq_buffer = NULL;
1619 req->rq_xid = xprt_alloc_xid(xprt);
902c5887 1620 xprt_init_connect_cookie(req, xprt);
9dc6edcf
TM
1621 req->rq_bytes_sent = 0;
1622 req->rq_snd_buf.len = 0;
1623 req->rq_snd_buf.buflen = 0;
1624 req->rq_rcv_buf.len = 0;
1625 req->rq_rcv_buf.buflen = 0;
1626 req->rq_release_snd_buf = NULL;
1627 xprt_reset_majortimeo(req);
1628 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1629 req, ntohl(req->rq_xid));
1630}
1631
1632static void
1633xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1634{
1635 xprt->ops->alloc_slot(xprt, task);
1636 if (task->tk_rqstp != NULL)
1637 xprt_request_init(task);
1638}
1639
9903cd1c
CL
1640/**
1641 * xprt_reserve - allocate an RPC request slot
1642 * @task: RPC task requesting a slot allocation
1643 *
ba60eb25
TM
1644 * If the transport is marked as being congested, or if no more
1645 * slots are available, place the task on the transport's
9903cd1c
CL
1646 * backlog queue.
1647 */
1648void xprt_reserve(struct rpc_task *task)
1da177e4 1649{
fb43d172 1650 struct rpc_xprt *xprt = task->tk_xprt;
1da177e4 1651
43cedbf0
TM
1652 task->tk_status = 0;
1653 if (task->tk_rqstp != NULL)
1654 return;
1655
43cedbf0
TM
1656 task->tk_timeout = 0;
1657 task->tk_status = -EAGAIN;
ba60eb25 1658 if (!xprt_throttle_congested(xprt, task))
9dc6edcf 1659 xprt_do_reserve(xprt, task);
ba60eb25
TM
1660}
1661
1662/**
1663 * xprt_retry_reserve - allocate an RPC request slot
1664 * @task: RPC task requesting a slot allocation
1665 *
1666 * If no more slots are available, place the task on the transport's
1667 * backlog queue.
1668 * Note that the only difference with xprt_reserve is that we now
1669 * ignore the value of the XPRT_CONGESTED flag.
1670 */
1671void xprt_retry_reserve(struct rpc_task *task)
1672{
fb43d172 1673 struct rpc_xprt *xprt = task->tk_xprt;
ba60eb25
TM
1674
1675 task->tk_status = 0;
1676 if (task->tk_rqstp != NULL)
1677 return;
1678
1679 task->tk_timeout = 0;
1680 task->tk_status = -EAGAIN;
9dc6edcf 1681 xprt_do_reserve(xprt, task);
1da177e4
LT
1682}
1683
edc81dcd
TM
1684static void
1685xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
1686{
1687 struct rpc_xprt *xprt = req->rq_xprt;
1688
944b0429
TM
1689 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1690 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
edc81dcd
TM
1691 xprt_is_pinned_rqst(req)) {
1692 spin_lock(&xprt->queue_lock);
944b0429 1693 xprt_request_dequeue_transmit_locked(task);
edc81dcd
TM
1694 xprt_request_dequeue_receive_locked(task);
1695 while (xprt_is_pinned_rqst(req)) {
1696 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1697 spin_unlock(&xprt->queue_lock);
1698 xprt_wait_on_pinned_rqst(req);
1699 spin_lock(&xprt->queue_lock);
1700 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1701 }
1702 spin_unlock(&xprt->queue_lock);
1703 }
1704}
1705
9903cd1c
CL
1706/**
1707 * xprt_release - release an RPC request slot
1708 * @task: task which is finished with the slot
1709 *
1da177e4 1710 */
9903cd1c 1711void xprt_release(struct rpc_task *task)
1da177e4 1712{
55ae1aab 1713 struct rpc_xprt *xprt;
87ed5003 1714 struct rpc_rqst *req = task->tk_rqstp;
1da177e4 1715
87ed5003
TM
1716 if (req == NULL) {
1717 if (task->tk_client) {
fb43d172 1718 xprt = task->tk_xprt;
bd79bc57 1719 xprt_release_write(xprt, task);
87ed5003 1720 }
1da177e4 1721 return;
87ed5003 1722 }
55ae1aab 1723
55ae1aab 1724 xprt = req->rq_xprt;
0a702195
WAA
1725 if (task->tk_ops->rpc_count_stats != NULL)
1726 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1727 else if (task->tk_client)
1728 rpc_count_iostats(task, task->tk_client->cl_metrics);
edc81dcd 1729 xprt_request_dequeue_all(task, req);
4a0f8c04 1730 spin_lock_bh(&xprt->transport_lock);
49e9a890 1731 xprt->ops->release_xprt(xprt, task);
a58dd398
CL
1732 if (xprt->ops->release_request)
1733 xprt->ops->release_request(task);
1da177e4 1734 xprt->last_used = jiffies;
ad3331ac 1735 xprt_schedule_autodisconnect(xprt);
4a0f8c04 1736 spin_unlock_bh(&xprt->transport_lock);
ee5ebe85 1737 if (req->rq_buffer)
3435c74a 1738 xprt->ops->buf_free(task);
4a068258 1739 xprt_inject_disconnect(xprt);
9d96acbc 1740 xdr_free_bvec(&req->rq_rcv_buf);
a17c2153
TM
1741 if (req->rq_cred != NULL)
1742 put_rpccred(req->rq_cred);
1da177e4 1743 task->tk_rqstp = NULL;
ead5e1c2
BF
1744 if (req->rq_release_snd_buf)
1745 req->rq_release_snd_buf(req);
55ae1aab 1746
46121cf7 1747 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
ee5ebe85 1748 if (likely(!bc_prealloc(req)))
a9cde23a 1749 xprt->ops->free_slot(xprt, req);
ee5ebe85 1750 else
c9acb42e 1751 xprt_free_bc_request(req);
1da177e4
LT
1752}
1753
902c5887
TM
1754#ifdef CONFIG_SUNRPC_BACKCHANNEL
1755void
1756xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1757{
1758 struct xdr_buf *xbufp = &req->rq_snd_buf;
1759
1760 task->tk_rqstp = req;
1761 req->rq_task = task;
1762 xprt_init_connect_cookie(req, req->rq_xprt);
1763 /*
1764 * Set up the xdr_buf length.
1765 * This also indicates that the buffer is XDR encoded already.
1766 */
1767 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1768 xbufp->tail[0].iov_len;
1769 req->rq_bytes_sent = 0;
1770}
1771#endif
1772
21de0a95 1773static void xprt_init(struct rpc_xprt *xprt, struct net *net)
c2866763 1774{
30c5116b 1775 kref_init(&xprt->kref);
c2866763
CL
1776
1777 spin_lock_init(&xprt->transport_lock);
1778 spin_lock_init(&xprt->reserve_lock);
75c84151 1779 spin_lock_init(&xprt->queue_lock);
c2866763
CL
1780
1781 INIT_LIST_HEAD(&xprt->free);
95f7691d 1782 xprt->recv_queue = RB_ROOT;
944b0429 1783 INIT_LIST_HEAD(&xprt->xmit_queue);
9e00abc3 1784#if defined(CONFIG_SUNRPC_BACKCHANNEL)
f9acac1a
RL
1785 spin_lock_init(&xprt->bc_pa_lock);
1786 INIT_LIST_HEAD(&xprt->bc_pa_list);
9e00abc3 1787#endif /* CONFIG_SUNRPC_BACKCHANNEL */
80b14d5e 1788 INIT_LIST_HEAD(&xprt->xprt_switch);
f9acac1a 1789
c2866763
CL
1790 xprt->last_used = jiffies;
1791 xprt->cwnd = RPC_INITCWND;
a509050b 1792 xprt->bind_index = 0;
c2866763
CL
1793
1794 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1795 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
79c99152 1796 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
c2866763
CL
1797 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1798
c2866763
CL
1799 xprt_init_xid(xprt);
1800
21de0a95 1801 xprt->xprt_net = get_net(net);
8d9266ff
TM
1802}
1803
1804/**
1805 * xprt_create_transport - create an RPC transport
1806 * @args: rpc transport creation arguments
1807 *
1808 */
1809struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1810{
1811 struct rpc_xprt *xprt;
1812 struct xprt_class *t;
1813
1814 spin_lock(&xprt_list_lock);
1815 list_for_each_entry(t, &xprt_list, list) {
1816 if (t->ident == args->ident) {
1817 spin_unlock(&xprt_list_lock);
1818 goto found;
1819 }
1820 }
1821 spin_unlock(&xprt_list_lock);
3c45ddf8 1822 dprintk("RPC: transport (%d) not supported\n", args->ident);
8d9266ff
TM
1823 return ERR_PTR(-EIO);
1824
1825found:
1826 xprt = t->setup(args);
1827 if (IS_ERR(xprt)) {
1828 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1829 -PTR_ERR(xprt));
21de0a95 1830 goto out;
8d9266ff 1831 }
33d90ac0
BF
1832 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1833 xprt->idle_timeout = 0;
21de0a95
TM
1834 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1835 if (xprt_has_timer(xprt))
ff861c4d 1836 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
21de0a95 1837 else
ff861c4d 1838 timer_setup(&xprt->timer, NULL, 0);
4e0038b6
TM
1839
1840 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1841 xprt_destroy(xprt);
1842 return ERR_PTR(-EINVAL);
1843 }
1844 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1845 if (xprt->servername == NULL) {
1846 xprt_destroy(xprt);
1847 return ERR_PTR(-ENOMEM);
1848 }
1849
3f940098 1850 rpc_xprt_debugfs_register(xprt);
388f0c77 1851
46121cf7 1852 dprintk("RPC: created transport %p with %u slots\n", xprt,
c2866763 1853 xprt->max_reqs);
21de0a95 1854out:
c2866763
CL
1855 return xprt;
1856}
1857
528fd354
TM
1858static void xprt_destroy_cb(struct work_struct *work)
1859{
1860 struct rpc_xprt *xprt =
1861 container_of(work, struct rpc_xprt, task_cleanup);
1862
1863 rpc_xprt_debugfs_unregister(xprt);
1864 rpc_destroy_wait_queue(&xprt->binding);
1865 rpc_destroy_wait_queue(&xprt->pending);
1866 rpc_destroy_wait_queue(&xprt->sending);
1867 rpc_destroy_wait_queue(&xprt->backlog);
1868 kfree(xprt->servername);
1869 /*
1870 * Tear down transport state and free the rpc_xprt
1871 */
1872 xprt->ops->destroy(xprt);
1873}
1874
9903cd1c
CL
1875/**
1876 * xprt_destroy - destroy an RPC transport, killing off all requests.
a8de240a 1877 * @xprt: transport to destroy
9903cd1c 1878 *
1da177e4 1879 */
a8de240a 1880static void xprt_destroy(struct rpc_xprt *xprt)
1da177e4 1881{
46121cf7 1882 dprintk("RPC: destroying transport %p\n", xprt);
79234c3d 1883
528fd354
TM
1884 /*
1885 * Exclude transport connect/disconnect handlers and autoclose
1886 */
79234c3d
TM
1887 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1888
0065db32 1889 del_timer_sync(&xprt->timer);
c8541ecd
CL
1890
1891 /*
528fd354
TM
1892 * Destroy sockets etc from the system workqueue so they can
1893 * safely flush receive work running on rpciod.
c8541ecd 1894 */
528fd354
TM
1895 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1896 schedule_work(&xprt->task_cleanup);
6b6ca86b 1897}
1da177e4 1898
30c5116b
TM
1899static void xprt_destroy_kref(struct kref *kref)
1900{
1901 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1902}
1903
1904/**
1905 * xprt_get - return a reference to an RPC transport.
1906 * @xprt: pointer to the transport
1907 *
1908 */
1909struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1910{
1911 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1912 return xprt;
1913 return NULL;
1914}
1915EXPORT_SYMBOL_GPL(xprt_get);
1916
6b6ca86b
TM
1917/**
1918 * xprt_put - release a reference to an RPC transport.
1919 * @xprt: pointer to the transport
1920 *
1921 */
1922void xprt_put(struct rpc_xprt *xprt)
1923{
30c5116b
TM
1924 if (xprt != NULL)
1925 kref_put(&xprt->kref, xprt_destroy_kref);
6b6ca86b 1926}
5d252f90 1927EXPORT_SYMBOL_GPL(xprt_put);