Merge branch 'inet-frag-fixes'
[linux-2.6-block.git] / net / sunrpc / xprt.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/xprt.c
3 *
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
6 *
7 * The interface works like this:
8 *
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
11 * (xprt_reserve).
12 * - Next, the caller puts together the RPC message, stuffs it into
55aa4f58
CL
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
55ae1aab
RL
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
17 * expired.
1da177e4 18 * - When a packet arrives, the data_ready handler walks the list of
55aa4f58 19 * pending requests for that transport. If a matching XID is found, the
1da177e4
LT
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
24 * of -ETIMEDOUT.
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
29 * again.
30 *
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
34 *
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
55aa4f58
CL
36 *
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
1da177e4
LT
38 */
39
a246b010
CL
40#include <linux/module.h>
41
1da177e4 42#include <linux/types.h>
a246b010 43#include <linux/interrupt.h>
1da177e4 44#include <linux/workqueue.h>
bf3fcf89 45#include <linux/net.h>
ff839970 46#include <linux/ktime.h>
1da177e4 47
a246b010 48#include <linux/sunrpc/clnt.h>
11c556b3 49#include <linux/sunrpc/metrics.h>
c9acb42e 50#include <linux/sunrpc/bc_xprt.h>
1da177e4 51
3705ad64
JL
52#include <trace/events/sunrpc.h>
53
55ae1aab
RL
54#include "sunrpc.h"
55
1da177e4
LT
56/*
57 * Local variables
58 */
59
f895b252 60#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1da177e4
LT
61# define RPCDBG_FACILITY RPCDBG_XPRT
62#endif
63
1da177e4
LT
64/*
65 * Local functions
66 */
21de0a95 67static void xprt_init(struct rpc_xprt *xprt, struct net *net);
1da177e4 68static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
1da177e4 69static void xprt_connect_status(struct rpc_task *task);
1da177e4 70static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
29807318 71static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
4e0038b6 72static void xprt_destroy(struct rpc_xprt *xprt);
1da177e4 73
5ba03e82 74static DEFINE_SPINLOCK(xprt_list_lock);
81c098af
TT
75static LIST_HEAD(xprt_list);
76
81c098af
TT
77/**
78 * xprt_register_transport - register a transport implementation
79 * @transport: transport to register
80 *
81 * If a transport implementation is loaded as a kernel module, it can
82 * call this interface to make itself known to the RPC client.
83 *
84 * Returns:
85 * 0: transport successfully registered
86 * -EEXIST: transport already registered
87 * -EINVAL: transport module being unloaded
88 */
89int xprt_register_transport(struct xprt_class *transport)
90{
91 struct xprt_class *t;
92 int result;
93
94 result = -EEXIST;
95 spin_lock(&xprt_list_lock);
96 list_for_each_entry(t, &xprt_list, list) {
97 /* don't register the same transport class twice */
4fa016eb 98 if (t->ident == transport->ident)
81c098af
TT
99 goto out;
100 }
101
c9f6cde6
DL
102 list_add_tail(&transport->list, &xprt_list);
103 printk(KERN_INFO "RPC: Registered %s transport module.\n",
104 transport->name);
105 result = 0;
81c098af
TT
106
107out:
108 spin_unlock(&xprt_list_lock);
109 return result;
110}
111EXPORT_SYMBOL_GPL(xprt_register_transport);
112
113/**
114 * xprt_unregister_transport - unregister a transport implementation
65b6e42c 115 * @transport: transport to unregister
81c098af
TT
116 *
117 * Returns:
118 * 0: transport successfully unregistered
119 * -ENOENT: transport never registered
120 */
121int xprt_unregister_transport(struct xprt_class *transport)
122{
123 struct xprt_class *t;
124 int result;
125
126 result = 0;
127 spin_lock(&xprt_list_lock);
128 list_for_each_entry(t, &xprt_list, list) {
129 if (t == transport) {
130 printk(KERN_INFO
131 "RPC: Unregistered %s transport module.\n",
132 transport->name);
133 list_del_init(&transport->list);
81c098af
TT
134 goto out;
135 }
136 }
137 result = -ENOENT;
138
139out:
140 spin_unlock(&xprt_list_lock);
141 return result;
142}
143EXPORT_SYMBOL_GPL(xprt_unregister_transport);
144
441e3e24
TT
145/**
146 * xprt_load_transport - load a transport implementation
147 * @transport_name: transport to load
148 *
149 * Returns:
150 * 0: transport successfully loaded
151 * -ENOENT: transport module not available
152 */
153int xprt_load_transport(const char *transport_name)
154{
155 struct xprt_class *t;
441e3e24
TT
156 int result;
157
158 result = 0;
159 spin_lock(&xprt_list_lock);
160 list_for_each_entry(t, &xprt_list, list) {
161 if (strcmp(t->name, transport_name) == 0) {
162 spin_unlock(&xprt_list_lock);
163 goto out;
164 }
165 }
166 spin_unlock(&xprt_list_lock);
ef7ffe8f 167 result = request_module("xprt%s", transport_name);
441e3e24
TT
168out:
169 return result;
170}
171EXPORT_SYMBOL_GPL(xprt_load_transport);
172
12a80469
CL
173/**
174 * xprt_reserve_xprt - serialize write access to transports
175 * @task: task that is requesting access to the transport
177c27bf 176 * @xprt: pointer to the target transport
12a80469
CL
177 *
178 * This prevents mixing the payload of separate requests, and prevents
179 * transport connects from colliding with writes. No congestion control
180 * is provided.
181 */
43cedbf0 182int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
12a80469 183{
12a80469 184 struct rpc_rqst *req = task->tk_rqstp;
34006cee 185 int priority;
12a80469
CL
186
187 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
188 if (task == xprt->snd_task)
189 return 1;
12a80469
CL
190 goto out_sleep;
191 }
192 xprt->snd_task = task;
92551948 193 if (req != NULL)
43cedbf0 194 req->rq_ntrans++;
4d4a76f3 195
12a80469
CL
196 return 1;
197
198out_sleep:
46121cf7 199 dprintk("RPC: %5u failed to lock transport %p\n",
12a80469
CL
200 task->tk_pid, xprt);
201 task->tk_timeout = 0;
202 task->tk_status = -EAGAIN;
34006cee
TM
203 if (req == NULL)
204 priority = RPC_PRIORITY_LOW;
205 else if (!req->rq_ntrans)
206 priority = RPC_PRIORITY_NORMAL;
12a80469 207 else
34006cee
TM
208 priority = RPC_PRIORITY_HIGH;
209 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
12a80469
CL
210 return 0;
211}
12444809 212EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
12a80469 213
632e3bdc
TM
214static void xprt_clear_locked(struct rpc_xprt *xprt)
215{
216 xprt->snd_task = NULL;
d19751e7 217 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
4e857c58 218 smp_mb__before_atomic();
632e3bdc 219 clear_bit(XPRT_LOCKED, &xprt->state);
4e857c58 220 smp_mb__after_atomic();
632e3bdc 221 } else
c1384c9c 222 queue_work(rpciod_workqueue, &xprt->task_cleanup);
632e3bdc
TM
223}
224
1da177e4 225/*
12a80469
CL
226 * xprt_reserve_xprt_cong - serialize write access to transports
227 * @task: task that is requesting access to the transport
228 *
229 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
230 * integrated into the decision of whether a request is allowed to be
231 * woken up and given access to the transport.
1da177e4 232 */
43cedbf0 233int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
234{
235 struct rpc_rqst *req = task->tk_rqstp;
34006cee 236 int priority;
1da177e4 237
2226feb6 238 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
1da177e4
LT
239 if (task == xprt->snd_task)
240 return 1;
1da177e4
LT
241 goto out_sleep;
242 }
43cedbf0
TM
243 if (req == NULL) {
244 xprt->snd_task = task;
245 return 1;
246 }
12a80469 247 if (__xprt_get_cong(xprt, task)) {
1da177e4 248 xprt->snd_task = task;
43cedbf0 249 req->rq_ntrans++;
1da177e4
LT
250 return 1;
251 }
632e3bdc 252 xprt_clear_locked(xprt);
1da177e4 253out_sleep:
29807318
NB
254 if (req)
255 __xprt_put_cong(xprt, req);
46121cf7 256 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
1da177e4
LT
257 task->tk_timeout = 0;
258 task->tk_status = -EAGAIN;
34006cee
TM
259 if (req == NULL)
260 priority = RPC_PRIORITY_LOW;
261 else if (!req->rq_ntrans)
262 priority = RPC_PRIORITY_NORMAL;
1da177e4 263 else
34006cee
TM
264 priority = RPC_PRIORITY_HIGH;
265 rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
1da177e4
LT
266 return 0;
267}
12444809 268EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
1da177e4 269
12a80469 270static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
271{
272 int retval;
273
4a0f8c04 274 spin_lock_bh(&xprt->transport_lock);
43cedbf0 275 retval = xprt->ops->reserve_xprt(xprt, task);
4a0f8c04 276 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
277 return retval;
278}
279
961a828d 280static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
49e9a890 281{
961a828d 282 struct rpc_xprt *xprt = data;
49e9a890
CL
283 struct rpc_rqst *req;
284
49e9a890
CL
285 req = task->tk_rqstp;
286 xprt->snd_task = task;
92551948 287 if (req)
49e9a890 288 req->rq_ntrans++;
961a828d
TM
289 return true;
290}
49e9a890 291
961a828d
TM
292static void __xprt_lock_write_next(struct rpc_xprt *xprt)
293{
294 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
295 return;
296
297 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
298 return;
632e3bdc 299 xprt_clear_locked(xprt);
49e9a890
CL
300}
301
961a828d 302static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
1da177e4 303{
961a828d 304 struct rpc_xprt *xprt = data;
43cedbf0 305 struct rpc_rqst *req;
1da177e4 306
43cedbf0
TM
307 req = task->tk_rqstp;
308 if (req == NULL) {
309 xprt->snd_task = task;
961a828d 310 return true;
43cedbf0 311 }
49e9a890 312 if (__xprt_get_cong(xprt, task)) {
1da177e4 313 xprt->snd_task = task;
43cedbf0 314 req->rq_ntrans++;
961a828d 315 return true;
1da177e4 316 }
961a828d
TM
317 return false;
318}
319
320static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
321{
322 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
323 return;
324 if (RPCXPRT_CONGESTED(xprt))
325 goto out_unlock;
326 if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
327 return;
1da177e4 328out_unlock:
632e3bdc 329 xprt_clear_locked(xprt);
1da177e4
LT
330}
331
0695314e
TM
332static void xprt_task_clear_bytes_sent(struct rpc_task *task)
333{
334 if (task != NULL) {
335 struct rpc_rqst *req = task->tk_rqstp;
336 if (req != NULL)
337 req->rq_bytes_sent = 0;
338 }
339}
340
49e9a890
CL
341/**
342 * xprt_release_xprt - allow other requests to use a transport
343 * @xprt: transport with other tasks potentially waiting
344 * @task: task that is releasing access to the transport
345 *
346 * Note that "task" can be NULL. No congestion control is provided.
1da177e4 347 */
49e9a890 348void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4
LT
349{
350 if (xprt->snd_task == task) {
0695314e 351 xprt_task_clear_bytes_sent(task);
632e3bdc 352 xprt_clear_locked(xprt);
1da177e4
LT
353 __xprt_lock_write_next(xprt);
354 }
355}
12444809 356EXPORT_SYMBOL_GPL(xprt_release_xprt);
1da177e4 357
49e9a890
CL
358/**
359 * xprt_release_xprt_cong - allow other requests to use a transport
360 * @xprt: transport with other tasks potentially waiting
361 * @task: task that is releasing access to the transport
362 *
363 * Note that "task" can be NULL. Another task is awoken to use the
364 * transport if the transport's congestion window allows it.
365 */
366void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
367{
368 if (xprt->snd_task == task) {
0695314e 369 xprt_task_clear_bytes_sent(task);
632e3bdc 370 xprt_clear_locked(xprt);
49e9a890
CL
371 __xprt_lock_write_next_cong(xprt);
372 }
373}
12444809 374EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
49e9a890
CL
375
376static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4 377{
4a0f8c04 378 spin_lock_bh(&xprt->transport_lock);
49e9a890 379 xprt->ops->release_xprt(xprt, task);
4a0f8c04 380 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
381}
382
1da177e4
LT
383/*
384 * Van Jacobson congestion avoidance. Check if the congestion window
385 * overflowed. Put the task to sleep if this is the case.
386 */
387static int
388__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
389{
390 struct rpc_rqst *req = task->tk_rqstp;
391
392 if (req->rq_cong)
393 return 1;
46121cf7 394 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
1da177e4
LT
395 task->tk_pid, xprt->cong, xprt->cwnd);
396 if (RPCXPRT_CONGESTED(xprt))
397 return 0;
398 req->rq_cong = 1;
399 xprt->cong += RPC_CWNDSCALE;
400 return 1;
401}
402
403/*
404 * Adjust the congestion window, and wake up the next task
405 * that has been sleeping due to congestion
406 */
407static void
408__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
409{
410 if (!req->rq_cong)
411 return;
412 req->rq_cong = 0;
413 xprt->cong -= RPC_CWNDSCALE;
49e9a890 414 __xprt_lock_write_next_cong(xprt);
1da177e4
LT
415}
416
a58dd398
CL
417/**
418 * xprt_release_rqst_cong - housekeeping when request is complete
419 * @task: RPC request that recently completed
420 *
421 * Useful for transports that require congestion control.
422 */
423void xprt_release_rqst_cong(struct rpc_task *task)
424{
a4f0835c
TM
425 struct rpc_rqst *req = task->tk_rqstp;
426
427 __xprt_put_cong(req->rq_xprt, req);
a58dd398 428}
12444809 429EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
a58dd398 430
46c0ee8b
CL
431/**
432 * xprt_adjust_cwnd - adjust transport congestion window
6a24dfb6 433 * @xprt: pointer to xprt
46c0ee8b
CL
434 * @task: recently completed RPC request used to adjust window
435 * @result: result code of completed RPC request
436 *
4f4cf5ad
CL
437 * The transport code maintains an estimate on the maximum number of out-
438 * standing RPC requests, using a smoothed version of the congestion
439 * avoidance implemented in 44BSD. This is basically the Van Jacobson
440 * congestion algorithm: If a retransmit occurs, the congestion window is
441 * halved; otherwise, it is incremented by 1/cwnd when
442 *
443 * - a reply is received and
444 * - a full number of requests are outstanding and
445 * - the congestion window hasn't been updated recently.
1da177e4 446 */
6a24dfb6 447void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
1da177e4 448{
46c0ee8b 449 struct rpc_rqst *req = task->tk_rqstp;
46c0ee8b 450 unsigned long cwnd = xprt->cwnd;
1da177e4 451
1da177e4
LT
452 if (result >= 0 && cwnd <= xprt->cong) {
453 /* The (cwnd >> 1) term makes sure
454 * the result gets rounded properly. */
455 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
456 if (cwnd > RPC_MAXCWND(xprt))
457 cwnd = RPC_MAXCWND(xprt);
49e9a890 458 __xprt_lock_write_next_cong(xprt);
1da177e4
LT
459 } else if (result == -ETIMEDOUT) {
460 cwnd >>= 1;
461 if (cwnd < RPC_CWNDSCALE)
462 cwnd = RPC_CWNDSCALE;
463 }
46121cf7 464 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
1da177e4
LT
465 xprt->cong, xprt->cwnd, cwnd);
466 xprt->cwnd = cwnd;
46c0ee8b 467 __xprt_put_cong(xprt, req);
1da177e4 468}
12444809 469EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
1da177e4 470
44fbac22
CL
471/**
472 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
473 * @xprt: transport with waiting tasks
474 * @status: result code to plant in each task before waking it
475 *
476 */
477void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
478{
479 if (status < 0)
480 rpc_wake_up_status(&xprt->pending, status);
481 else
482 rpc_wake_up(&xprt->pending);
483}
12444809 484EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
44fbac22 485
c7b2cae8
CL
486/**
487 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
488 * @task: task to be put to sleep
0b80ae42 489 * @action: function pointer to be executed after wait
a9a6b52e
TM
490 *
491 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
492 * we don't in general want to force a socket disconnection due to
493 * an incomplete RPC call transmission.
c7b2cae8 494 */
b6ddf64f 495void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
c7b2cae8
CL
496{
497 struct rpc_rqst *req = task->tk_rqstp;
498 struct rpc_xprt *xprt = req->rq_xprt;
499
a9a6b52e 500 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
b6ddf64f 501 rpc_sleep_on(&xprt->pending, task, action);
c7b2cae8 502}
12444809 503EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
c7b2cae8
CL
504
505/**
506 * xprt_write_space - wake the task waiting for transport output buffer space
507 * @xprt: transport with waiting tasks
508 *
509 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
510 */
511void xprt_write_space(struct rpc_xprt *xprt)
512{
c7b2cae8
CL
513 spin_lock_bh(&xprt->transport_lock);
514 if (xprt->snd_task) {
46121cf7
CL
515 dprintk("RPC: write space: waking waiting task on "
516 "xprt %p\n", xprt);
fda13939 517 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
c7b2cae8
CL
518 }
519 spin_unlock_bh(&xprt->transport_lock);
520}
12444809 521EXPORT_SYMBOL_GPL(xprt_write_space);
c7b2cae8 522
fe3aca29
CL
523/**
524 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
525 * @task: task whose timeout is to be set
526 *
527 * Set a request's retransmit timeout based on the transport's
528 * default timeout parameters. Used by transports that don't adjust
529 * the retransmit timeout based on round-trip time estimation.
530 */
531void xprt_set_retrans_timeout_def(struct rpc_task *task)
532{
533 task->tk_timeout = task->tk_rqstp->rq_timeout;
534}
12444809 535EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
fe3aca29 536
2c53040f 537/**
fe3aca29
CL
538 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
539 * @task: task whose timeout is to be set
cca5172a 540 *
fe3aca29
CL
541 * Set a request's retransmit timeout using the RTT estimator.
542 */
543void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
544{
545 int timer = task->tk_msg.rpc_proc->p_timer;
ba7392bb
TM
546 struct rpc_clnt *clnt = task->tk_client;
547 struct rpc_rtt *rtt = clnt->cl_rtt;
fe3aca29 548 struct rpc_rqst *req = task->tk_rqstp;
ba7392bb 549 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
fe3aca29
CL
550
551 task->tk_timeout = rpc_calc_rto(rtt, timer);
552 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
553 if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
554 task->tk_timeout = max_timeout;
555}
12444809 556EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
fe3aca29 557
1da177e4
LT
558static void xprt_reset_majortimeo(struct rpc_rqst *req)
559{
ba7392bb 560 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
1da177e4
LT
561
562 req->rq_majortimeo = req->rq_timeout;
563 if (to->to_exponential)
564 req->rq_majortimeo <<= to->to_retries;
565 else
566 req->rq_majortimeo += to->to_increment * to->to_retries;
567 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
568 req->rq_majortimeo = to->to_maxval;
569 req->rq_majortimeo += jiffies;
570}
571
9903cd1c
CL
572/**
573 * xprt_adjust_timeout - adjust timeout values for next retransmit
574 * @req: RPC request containing parameters to use for the adjustment
575 *
1da177e4
LT
576 */
577int xprt_adjust_timeout(struct rpc_rqst *req)
578{
579 struct rpc_xprt *xprt = req->rq_xprt;
ba7392bb 580 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
1da177e4
LT
581 int status = 0;
582
583 if (time_before(jiffies, req->rq_majortimeo)) {
584 if (to->to_exponential)
585 req->rq_timeout <<= 1;
586 else
587 req->rq_timeout += to->to_increment;
588 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
589 req->rq_timeout = to->to_maxval;
590 req->rq_retries++;
1da177e4
LT
591 } else {
592 req->rq_timeout = to->to_initval;
593 req->rq_retries = 0;
594 xprt_reset_majortimeo(req);
595 /* Reset the RTT counters == "slow start" */
4a0f8c04 596 spin_lock_bh(&xprt->transport_lock);
1da177e4 597 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
4a0f8c04 598 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
599 status = -ETIMEDOUT;
600 }
601
602 if (req->rq_timeout == 0) {
603 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
604 req->rq_timeout = 5 * HZ;
605 }
606 return status;
607}
608
65f27f38 609static void xprt_autoclose(struct work_struct *work)
1da177e4 610{
65f27f38
DH
611 struct rpc_xprt *xprt =
612 container_of(work, struct rpc_xprt, task_cleanup);
1da177e4 613
66af1e55 614 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
4876cc77 615 xprt->ops->close(xprt);
1da177e4
LT
616 xprt_release_write(xprt, NULL);
617}
618
9903cd1c 619/**
62da3b24 620 * xprt_disconnect_done - mark a transport as disconnected
9903cd1c
CL
621 * @xprt: transport to flag for disconnect
622 *
1da177e4 623 */
62da3b24 624void xprt_disconnect_done(struct rpc_xprt *xprt)
1da177e4 625{
46121cf7 626 dprintk("RPC: disconnected transport %p\n", xprt);
4a0f8c04 627 spin_lock_bh(&xprt->transport_lock);
1da177e4 628 xprt_clear_connected(xprt);
2a491991 629 xprt_wake_pending_tasks(xprt, -EAGAIN);
4a0f8c04 630 spin_unlock_bh(&xprt->transport_lock);
1da177e4 631}
62da3b24 632EXPORT_SYMBOL_GPL(xprt_disconnect_done);
1da177e4 633
66af1e55
TM
634/**
635 * xprt_force_disconnect - force a transport to disconnect
636 * @xprt: transport to disconnect
637 *
638 */
639void xprt_force_disconnect(struct rpc_xprt *xprt)
640{
641 /* Don't race with the test_bit() in xprt_clear_locked() */
642 spin_lock_bh(&xprt->transport_lock);
643 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
644 /* Try to schedule an autoclose RPC call */
645 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
646 queue_work(rpciod_workqueue, &xprt->task_cleanup);
2a491991 647 xprt_wake_pending_tasks(xprt, -EAGAIN);
66af1e55
TM
648 spin_unlock_bh(&xprt->transport_lock);
649}
66af1e55 650
7c1d71cf
TM
651/**
652 * xprt_conditional_disconnect - force a transport to disconnect
653 * @xprt: transport to disconnect
654 * @cookie: 'connection cookie'
655 *
656 * This attempts to break the connection if and only if 'cookie' matches
657 * the current transport 'connection cookie'. It ensures that we don't
658 * try to break the connection more than once when we need to retransmit
659 * a batch of RPC requests.
660 *
661 */
662void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
663{
664 /* Don't race with the test_bit() in xprt_clear_locked() */
665 spin_lock_bh(&xprt->transport_lock);
666 if (cookie != xprt->connect_cookie)
667 goto out;
668 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
669 goto out;
670 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
671 /* Try to schedule an autoclose RPC call */
672 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
673 queue_work(rpciod_workqueue, &xprt->task_cleanup);
2a491991 674 xprt_wake_pending_tasks(xprt, -EAGAIN);
7c1d71cf
TM
675out:
676 spin_unlock_bh(&xprt->transport_lock);
677}
678
1da177e4
LT
679static void
680xprt_init_autodisconnect(unsigned long data)
681{
682 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
683
4a0f8c04 684 spin_lock(&xprt->transport_lock);
d19751e7 685 if (!list_empty(&xprt->recv))
1da177e4 686 goto out_abort;
2226feb6 687 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
1da177e4 688 goto out_abort;
4a0f8c04 689 spin_unlock(&xprt->transport_lock);
f75e6745 690 queue_work(rpciod_workqueue, &xprt->task_cleanup);
1da177e4
LT
691 return;
692out_abort:
4a0f8c04 693 spin_unlock(&xprt->transport_lock);
1da177e4
LT
694}
695
718ba5b8
TM
696bool xprt_lock_connect(struct rpc_xprt *xprt,
697 struct rpc_task *task,
698 void *cookie)
699{
700 bool ret = false;
701
702 spin_lock_bh(&xprt->transport_lock);
703 if (!test_bit(XPRT_LOCKED, &xprt->state))
704 goto out;
705 if (xprt->snd_task != task)
706 goto out;
0695314e 707 xprt_task_clear_bytes_sent(task);
718ba5b8
TM
708 xprt->snd_task = cookie;
709 ret = true;
710out:
711 spin_unlock_bh(&xprt->transport_lock);
712 return ret;
713}
714
715void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
716{
717 spin_lock_bh(&xprt->transport_lock);
718 if (xprt->snd_task != cookie)
719 goto out;
720 if (!test_bit(XPRT_LOCKED, &xprt->state))
721 goto out;
722 xprt->snd_task =NULL;
723 xprt->ops->release_xprt(xprt, NULL);
724out:
725 spin_unlock_bh(&xprt->transport_lock);
726}
727
9903cd1c
CL
728/**
729 * xprt_connect - schedule a transport connect operation
730 * @task: RPC task that is requesting the connect
1da177e4
LT
731 *
732 */
733void xprt_connect(struct rpc_task *task)
734{
ad2368d6 735 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1da177e4 736
46121cf7 737 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
1da177e4
LT
738 xprt, (xprt_connected(xprt) ? "is" : "is not"));
739
ec739ef0 740 if (!xprt_bound(xprt)) {
01d37c42 741 task->tk_status = -EAGAIN;
1da177e4
LT
742 return;
743 }
744 if (!xprt_lock_write(xprt, task))
745 return;
feb8ca37
TM
746
747 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
748 xprt->ops->close(xprt);
749
718ba5b8 750 if (!xprt_connected(xprt)) {
87e3c055 751 task->tk_rqstp->rq_bytes_sent = 0;
a8ce4a8f 752 task->tk_timeout = task->tk_rqstp->rq_timeout;
5d00837b 753 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
0b9e7943
TM
754
755 if (test_bit(XPRT_CLOSING, &xprt->state))
756 return;
757 if (xprt_test_and_set_connecting(xprt))
758 return;
262ca07d 759 xprt->stat.connect_start = jiffies;
1b092092 760 xprt->ops->connect(xprt, task);
1da177e4 761 }
718ba5b8 762 xprt_release_write(xprt, task);
1da177e4
LT
763}
764
9903cd1c 765static void xprt_connect_status(struct rpc_task *task)
1da177e4 766{
ad2368d6 767 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1da177e4 768
cd983ef8 769 if (task->tk_status == 0) {
262ca07d
CL
770 xprt->stat.connect_count++;
771 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
46121cf7 772 dprintk("RPC: %5u xprt_connect_status: connection established\n",
1da177e4
LT
773 task->tk_pid);
774 return;
775 }
776
1da177e4 777 switch (task->tk_status) {
0fe8d04e
TM
778 case -ECONNREFUSED:
779 case -ECONNRESET:
780 case -ECONNABORTED:
781 case -ENETUNREACH:
782 case -EHOSTUNREACH:
2fc193cf 783 case -EPIPE:
2a491991
TM
784 case -EAGAIN:
785 dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
23475d66 786 break;
1da177e4 787 case -ETIMEDOUT:
46121cf7
CL
788 dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
789 "out\n", task->tk_pid);
1da177e4
LT
790 break;
791 default:
46121cf7
CL
792 dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
793 "server %s\n", task->tk_pid, -task->tk_status,
4e0038b6 794 xprt->servername);
23475d66 795 task->tk_status = -EIO;
1da177e4 796 }
1da177e4
LT
797}
798
9903cd1c
CL
799/**
800 * xprt_lookup_rqst - find an RPC request corresponding to an XID
801 * @xprt: transport on which the original request was transmitted
802 * @xid: RPC XID of incoming reply
803 *
1da177e4 804 */
d8ed029d 805struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1da177e4 806{
8f3a6de3 807 struct rpc_rqst *entry;
1da177e4 808
8f3a6de3 809 list_for_each_entry(entry, &xprt->recv, rq_list)
3705ad64
JL
810 if (entry->rq_xid == xid) {
811 trace_xprt_lookup_rqst(xprt, xid, 0);
262ca07d 812 return entry;
3705ad64 813 }
46121cf7
CL
814
815 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
816 ntohl(xid));
3705ad64 817 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
262ca07d
CL
818 xprt->stat.bad_xids++;
819 return NULL;
1da177e4 820}
12444809 821EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1da177e4 822
bbc72cea 823static void xprt_update_rtt(struct rpc_task *task)
1570c1e4
CL
824{
825 struct rpc_rqst *req = task->tk_rqstp;
826 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
95c96174 827 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
d60dbb20 828 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1570c1e4
CL
829
830 if (timer) {
831 if (req->rq_ntrans == 1)
ff839970 832 rpc_update_rtt(rtt, timer, m);
1570c1e4
CL
833 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
834 }
835}
836
9903cd1c
CL
837/**
838 * xprt_complete_rqst - called when reply processing is complete
1570c1e4 839 * @task: RPC request that recently completed
9903cd1c
CL
840 * @copied: actual number of bytes received from the transport
841 *
1570c1e4 842 * Caller holds transport lock.
1da177e4 843 */
1570c1e4 844void xprt_complete_rqst(struct rpc_task *task, int copied)
1da177e4 845{
1570c1e4 846 struct rpc_rqst *req = task->tk_rqstp;
fda13939 847 struct rpc_xprt *xprt = req->rq_xprt;
1da177e4 848
1570c1e4
CL
849 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
850 task->tk_pid, ntohl(req->rq_xid), copied);
3705ad64 851 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1da177e4 852
fda13939 853 xprt->stat.recvs++;
d60dbb20 854 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
bbc72cea
CL
855 if (xprt->ops->timer != NULL)
856 xprt_update_rtt(task);
ef759a2e 857
1da177e4 858 list_del_init(&req->rq_list);
1e799b67 859 req->rq_private_buf.len = copied;
dd2b63d0
RL
860 /* Ensure all writes are done before we update */
861 /* req->rq_reply_bytes_recvd */
43ac3f29 862 smp_wmb();
dd2b63d0 863 req->rq_reply_bytes_recvd = copied;
fda13939 864 rpc_wake_up_queued_task(&xprt->pending, task);
1da177e4 865}
12444809 866EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1da177e4 867
46c0ee8b 868static void xprt_timer(struct rpc_task *task)
1da177e4 869{
46c0ee8b 870 struct rpc_rqst *req = task->tk_rqstp;
1da177e4
LT
871 struct rpc_xprt *xprt = req->rq_xprt;
872
5d00837b
TM
873 if (task->tk_status != -ETIMEDOUT)
874 return;
46121cf7 875 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
1da177e4 876
5d00837b 877 spin_lock_bh(&xprt->transport_lock);
dd2b63d0 878 if (!req->rq_reply_bytes_recvd) {
46c0ee8b 879 if (xprt->ops->timer)
6a24dfb6 880 xprt->ops->timer(xprt, task);
5d00837b
TM
881 } else
882 task->tk_status = 0;
883 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
884}
885
4cfc7e60
RI
886static inline int xprt_has_timer(struct rpc_xprt *xprt)
887{
888 return xprt->idle_timeout != 0;
889}
890
9903cd1c
CL
891/**
892 * xprt_prepare_transmit - reserve the transport before sending a request
893 * @task: RPC task about to send a request
894 *
1da177e4 895 */
90051ea7 896bool xprt_prepare_transmit(struct rpc_task *task)
1da177e4
LT
897{
898 struct rpc_rqst *req = task->tk_rqstp;
899 struct rpc_xprt *xprt = req->rq_xprt;
90051ea7 900 bool ret = false;
1da177e4 901
46121cf7 902 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1da177e4 903
4a0f8c04 904 spin_lock_bh(&xprt->transport_lock);
8a19a0b6
TM
905 if (!req->rq_bytes_sent) {
906 if (req->rq_reply_bytes_recvd) {
907 task->tk_status = req->rq_reply_bytes_recvd;
908 goto out_unlock;
909 }
910 if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
911 && xprt_connected(xprt)
912 && req->rq_connect_cookie == xprt->connect_cookie) {
913 xprt->ops->set_retrans_timeout(task);
914 rpc_sleep_on(&xprt->pending, task, xprt_timer);
915 goto out_unlock;
916 }
1da177e4 917 }
90051ea7
TM
918 if (!xprt->ops->reserve_xprt(xprt, task)) {
919 task->tk_status = -EAGAIN;
920 goto out_unlock;
921 }
922 ret = true;
1da177e4 923out_unlock:
4a0f8c04 924 spin_unlock_bh(&xprt->transport_lock);
90051ea7 925 return ret;
1da177e4
LT
926}
927
e0ab53de 928void xprt_end_transmit(struct rpc_task *task)
5e5ce5be 929{
343952fa 930 xprt_release_write(task->tk_rqstp->rq_xprt, task);
5e5ce5be
TM
931}
932
9903cd1c
CL
933/**
934 * xprt_transmit - send an RPC request on a transport
935 * @task: controlling RPC task
936 *
937 * We have to copy the iovec because sendmsg fiddles with its contents.
938 */
939void xprt_transmit(struct rpc_task *task)
1da177e4 940{
1da177e4
LT
941 struct rpc_rqst *req = task->tk_rqstp;
942 struct rpc_xprt *xprt = req->rq_xprt;
15a45206 943 int status, numreqs;
1da177e4 944
46121cf7 945 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
1da177e4 946
dd2b63d0 947 if (!req->rq_reply_bytes_recvd) {
55ae1aab
RL
948 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
949 /*
950 * Add to the list only if we're expecting a reply
951 */
4a0f8c04 952 spin_lock_bh(&xprt->transport_lock);
1da177e4
LT
953 /* Update the softirq receive buffer */
954 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
955 sizeof(req->rq_private_buf));
956 /* Add request to the receive list */
957 list_add_tail(&req->rq_list, &xprt->recv);
4a0f8c04 958 spin_unlock_bh(&xprt->transport_lock);
1da177e4 959 xprt_reset_majortimeo(req);
0f9dc2b1
TM
960 /* Turn off autodisconnect */
961 del_singleshot_timer_sync(&xprt->timer);
1da177e4
LT
962 }
963 } else if (!req->rq_bytes_sent)
964 return;
965
ff839970 966 req->rq_xtime = ktime_get();
a246b010 967 status = xprt->ops->send_request(task);
3705ad64 968 trace_xprt_transmit(xprt, req->rq_xid, status);
c8485e4d
TM
969 if (status != 0) {
970 task->tk_status = status;
971 return;
972 }
4a068258 973 xprt_inject_disconnect(xprt);
262ca07d 974
c8485e4d 975 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
468f8613 976 task->tk_flags |= RPC_TASK_SENT;
c8485e4d 977 spin_lock_bh(&xprt->transport_lock);
262ca07d 978
c8485e4d 979 xprt->ops->set_retrans_timeout(task);
262ca07d 980
15a45206
AA
981 numreqs = atomic_read(&xprt->num_reqs);
982 if (numreqs > xprt->stat.max_slots)
983 xprt->stat.max_slots = numreqs;
c8485e4d
TM
984 xprt->stat.sends++;
985 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
986 xprt->stat.bklog_u += xprt->backlog.qlen;
15a45206
AA
987 xprt->stat.sending_u += xprt->sending.qlen;
988 xprt->stat.pending_u += xprt->pending.qlen;
1da177e4 989
c8485e4d
TM
990 /* Don't race with disconnect */
991 if (!xprt_connected(xprt))
992 task->tk_status = -ENOTCONN;
0a660521 993 else {
55ae1aab
RL
994 /*
995 * Sleep on the pending queue since
996 * we're expecting a reply.
997 */
0a660521
TM
998 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
999 rpc_sleep_on(&xprt->pending, task, xprt_timer);
1000 req->rq_connect_cookie = xprt->connect_cookie;
55ae1aab 1001 }
c8485e4d 1002 spin_unlock_bh(&xprt->transport_lock);
1da177e4
LT
1003}
1004
ba60eb25
TM
1005static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1006{
1007 set_bit(XPRT_CONGESTED, &xprt->state);
1008 rpc_sleep_on(&xprt->backlog, task, NULL);
1009}
1010
1011static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1012{
1013 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1014 clear_bit(XPRT_CONGESTED, &xprt->state);
1015}
1016
1017static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1018{
1019 bool ret = false;
1020
1021 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1022 goto out;
1023 spin_lock(&xprt->reserve_lock);
1024 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1025 rpc_sleep_on(&xprt->backlog, task, NULL);
1026 ret = true;
1027 }
1028 spin_unlock(&xprt->reserve_lock);
1029out:
1030 return ret;
1031}
1032
d9ba131d
TM
1033static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
1034{
1035 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1036
1037 if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
1038 goto out;
1039 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
1040 if (req != NULL)
1041 goto out;
1042 atomic_dec(&xprt->num_reqs);
1043 req = ERR_PTR(-ENOMEM);
1044out:
1045 return req;
1046}
1047
1048static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1049{
1050 if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
1051 kfree(req);
1052 return true;
1053 }
1054 return false;
1055}
1056
f39c1bfb 1057void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1da177e4 1058{
d9ba131d 1059 struct rpc_rqst *req;
1da177e4 1060
f39c1bfb 1061 spin_lock(&xprt->reserve_lock);
1da177e4 1062 if (!list_empty(&xprt->free)) {
d9ba131d
TM
1063 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1064 list_del(&req->rq_list);
1065 goto out_init_req;
1066 }
6b343099 1067 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
d9ba131d
TM
1068 if (!IS_ERR(req))
1069 goto out_init_req;
1070 switch (PTR_ERR(req)) {
1071 case -ENOMEM:
d9ba131d
TM
1072 dprintk("RPC: dynamic allocation of request slot "
1073 "failed! Retrying\n");
1afeaf5c 1074 task->tk_status = -ENOMEM;
d9ba131d
TM
1075 break;
1076 case -EAGAIN:
ba60eb25 1077 xprt_add_backlog(xprt, task);
d9ba131d 1078 dprintk("RPC: waiting for request slot\n");
1afeaf5c
TM
1079 default:
1080 task->tk_status = -EAGAIN;
1da177e4 1081 }
f39c1bfb 1082 spin_unlock(&xprt->reserve_lock);
d9ba131d
TM
1083 return;
1084out_init_req:
1085 task->tk_status = 0;
1086 task->tk_rqstp = req;
1087 xprt_request_init(task, xprt);
f39c1bfb
TM
1088 spin_unlock(&xprt->reserve_lock);
1089}
1090EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1091
1092void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1093{
1094 /* Note: grabbing the xprt_lock_write() ensures that we throttle
1095 * new slot allocation if the transport is congested (i.e. when
1096 * reconnecting a stream transport or when out of socket write
1097 * buffer space).
1098 */
1099 if (xprt_lock_write(xprt, task)) {
1100 xprt_alloc_slot(xprt, task);
1101 xprt_release_write(xprt, task);
1102 }
1da177e4 1103}
f39c1bfb 1104EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
1da177e4 1105
ee5ebe85
TM
1106static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1107{
ee5ebe85 1108 spin_lock(&xprt->reserve_lock);
c25573b5
TM
1109 if (!xprt_dynamic_free_slot(xprt, req)) {
1110 memset(req, 0, sizeof(*req)); /* mark unused */
1111 list_add(&req->rq_list, &xprt->free);
1112 }
ba60eb25 1113 xprt_wake_up_backlog(xprt);
ee5ebe85
TM
1114 spin_unlock(&xprt->reserve_lock);
1115}
1116
21de0a95
TM
1117static void xprt_free_all_slots(struct rpc_xprt *xprt)
1118{
1119 struct rpc_rqst *req;
1120 while (!list_empty(&xprt->free)) {
1121 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1122 list_del(&req->rq_list);
1123 kfree(req);
1124 }
1125}
1126
d9ba131d
TM
1127struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1128 unsigned int num_prealloc,
1129 unsigned int max_alloc)
bd1722d4
PE
1130{
1131 struct rpc_xprt *xprt;
21de0a95
TM
1132 struct rpc_rqst *req;
1133 int i;
bd1722d4
PE
1134
1135 xprt = kzalloc(size, GFP_KERNEL);
1136 if (xprt == NULL)
1137 goto out;
1138
21de0a95
TM
1139 xprt_init(xprt, net);
1140
1141 for (i = 0; i < num_prealloc; i++) {
1142 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1143 if (!req)
8313164c 1144 goto out_free;
21de0a95
TM
1145 list_add(&req->rq_list, &xprt->free);
1146 }
d9ba131d
TM
1147 if (max_alloc > num_prealloc)
1148 xprt->max_reqs = max_alloc;
1149 else
1150 xprt->max_reqs = num_prealloc;
1151 xprt->min_reqs = num_prealloc;
1152 atomic_set(&xprt->num_reqs, num_prealloc);
bd1722d4
PE
1153
1154 return xprt;
1155
1156out_free:
21de0a95 1157 xprt_free(xprt);
bd1722d4
PE
1158out:
1159 return NULL;
1160}
1161EXPORT_SYMBOL_GPL(xprt_alloc);
1162
e204e621
PE
1163void xprt_free(struct rpc_xprt *xprt)
1164{
37aa2133 1165 put_net(xprt->xprt_net);
21de0a95 1166 xprt_free_all_slots(xprt);
e204e621
PE
1167 kfree(xprt);
1168}
1169EXPORT_SYMBOL_GPL(xprt_free);
1170
9903cd1c
CL
1171/**
1172 * xprt_reserve - allocate an RPC request slot
1173 * @task: RPC task requesting a slot allocation
1174 *
ba60eb25
TM
1175 * If the transport is marked as being congested, or if no more
1176 * slots are available, place the task on the transport's
9903cd1c
CL
1177 * backlog queue.
1178 */
1179void xprt_reserve(struct rpc_task *task)
1da177e4 1180{
45bc0dce 1181 struct rpc_xprt *xprt;
1da177e4 1182
43cedbf0
TM
1183 task->tk_status = 0;
1184 if (task->tk_rqstp != NULL)
1185 return;
1186
43cedbf0
TM
1187 task->tk_timeout = 0;
1188 task->tk_status = -EAGAIN;
45bc0dce
TM
1189 rcu_read_lock();
1190 xprt = rcu_dereference(task->tk_client->cl_xprt);
ba60eb25
TM
1191 if (!xprt_throttle_congested(xprt, task))
1192 xprt->ops->alloc_slot(xprt, task);
1193 rcu_read_unlock();
1194}
1195
1196/**
1197 * xprt_retry_reserve - allocate an RPC request slot
1198 * @task: RPC task requesting a slot allocation
1199 *
1200 * If no more slots are available, place the task on the transport's
1201 * backlog queue.
1202 * Note that the only difference with xprt_reserve is that we now
1203 * ignore the value of the XPRT_CONGESTED flag.
1204 */
1205void xprt_retry_reserve(struct rpc_task *task)
1206{
1207 struct rpc_xprt *xprt;
1208
1209 task->tk_status = 0;
1210 if (task->tk_rqstp != NULL)
1211 return;
1212
1213 task->tk_timeout = 0;
1214 task->tk_status = -EAGAIN;
1215 rcu_read_lock();
1216 xprt = rcu_dereference(task->tk_client->cl_xprt);
f39c1bfb 1217 xprt->ops->alloc_slot(xprt, task);
45bc0dce 1218 rcu_read_unlock();
1da177e4
LT
1219}
1220
d8ed029d 1221static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
1da177e4 1222{
0eae88f3 1223 return (__force __be32)xprt->xid++;
1da177e4
LT
1224}
1225
1226static inline void xprt_init_xid(struct rpc_xprt *xprt)
1227{
63862b5b 1228 xprt->xid = prandom_u32();
1da177e4
LT
1229}
1230
9903cd1c 1231static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
1da177e4
LT
1232{
1233 struct rpc_rqst *req = task->tk_rqstp;
1234
d9ba131d 1235 INIT_LIST_HEAD(&req->rq_list);
ba7392bb 1236 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1da177e4
LT
1237 req->rq_task = task;
1238 req->rq_xprt = xprt;
02107148 1239 req->rq_buffer = NULL;
1da177e4 1240 req->rq_xid = xprt_alloc_xid(xprt);
0a660521 1241 req->rq_connect_cookie = xprt->connect_cookie - 1;
92551948
TM
1242 req->rq_bytes_sent = 0;
1243 req->rq_snd_buf.len = 0;
1244 req->rq_snd_buf.buflen = 0;
1245 req->rq_rcv_buf.len = 0;
1246 req->rq_rcv_buf.buflen = 0;
ead5e1c2 1247 req->rq_release_snd_buf = NULL;
da45828e 1248 xprt_reset_majortimeo(req);
46121cf7 1249 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1da177e4
LT
1250 req, ntohl(req->rq_xid));
1251}
1252
9903cd1c
CL
1253/**
1254 * xprt_release - release an RPC request slot
1255 * @task: task which is finished with the slot
1256 *
1da177e4 1257 */
9903cd1c 1258void xprt_release(struct rpc_task *task)
1da177e4 1259{
55ae1aab 1260 struct rpc_xprt *xprt;
87ed5003 1261 struct rpc_rqst *req = task->tk_rqstp;
1da177e4 1262
87ed5003
TM
1263 if (req == NULL) {
1264 if (task->tk_client) {
1265 rcu_read_lock();
1266 xprt = rcu_dereference(task->tk_client->cl_xprt);
1267 if (xprt->snd_task == task)
1268 xprt_release_write(xprt, task);
1269 rcu_read_unlock();
1270 }
1da177e4 1271 return;
87ed5003 1272 }
55ae1aab 1273
55ae1aab 1274 xprt = req->rq_xprt;
0a702195
WAA
1275 if (task->tk_ops->rpc_count_stats != NULL)
1276 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1277 else if (task->tk_client)
1278 rpc_count_iostats(task, task->tk_client->cl_metrics);
4a0f8c04 1279 spin_lock_bh(&xprt->transport_lock);
49e9a890 1280 xprt->ops->release_xprt(xprt, task);
a58dd398
CL
1281 if (xprt->ops->release_request)
1282 xprt->ops->release_request(task);
1da177e4
LT
1283 if (!list_empty(&req->rq_list))
1284 list_del(&req->rq_list);
1285 xprt->last_used = jiffies;
4cfc7e60 1286 if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
a246b010 1287 mod_timer(&xprt->timer,
03bf4b70 1288 xprt->last_used + xprt->idle_timeout);
4a0f8c04 1289 spin_unlock_bh(&xprt->transport_lock);
ee5ebe85 1290 if (req->rq_buffer)
55ae1aab 1291 xprt->ops->buf_free(req->rq_buffer);
4a068258 1292 xprt_inject_disconnect(xprt);
a17c2153
TM
1293 if (req->rq_cred != NULL)
1294 put_rpccred(req->rq_cred);
1da177e4 1295 task->tk_rqstp = NULL;
ead5e1c2
BF
1296 if (req->rq_release_snd_buf)
1297 req->rq_release_snd_buf(req);
55ae1aab 1298
46121cf7 1299 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
ee5ebe85
TM
1300 if (likely(!bc_prealloc(req)))
1301 xprt_free_slot(xprt, req);
1302 else
c9acb42e 1303 xprt_free_bc_request(req);
1da177e4
LT
1304}
1305
21de0a95 1306static void xprt_init(struct rpc_xprt *xprt, struct net *net)
c2866763 1307{
21de0a95 1308 atomic_set(&xprt->count, 1);
c2866763
CL
1309
1310 spin_lock_init(&xprt->transport_lock);
1311 spin_lock_init(&xprt->reserve_lock);
1312
1313 INIT_LIST_HEAD(&xprt->free);
1314 INIT_LIST_HEAD(&xprt->recv);
9e00abc3 1315#if defined(CONFIG_SUNRPC_BACKCHANNEL)
f9acac1a
RL
1316 spin_lock_init(&xprt->bc_pa_lock);
1317 INIT_LIST_HEAD(&xprt->bc_pa_list);
9e00abc3 1318#endif /* CONFIG_SUNRPC_BACKCHANNEL */
f9acac1a 1319
c2866763
CL
1320 xprt->last_used = jiffies;
1321 xprt->cwnd = RPC_INITCWND;
a509050b 1322 xprt->bind_index = 0;
c2866763
CL
1323
1324 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1325 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
34006cee 1326 rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
c2866763
CL
1327 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1328
c2866763
CL
1329 xprt_init_xid(xprt);
1330
21de0a95 1331 xprt->xprt_net = get_net(net);
8d9266ff
TM
1332}
1333
1334/**
1335 * xprt_create_transport - create an RPC transport
1336 * @args: rpc transport creation arguments
1337 *
1338 */
1339struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1340{
1341 struct rpc_xprt *xprt;
1342 struct xprt_class *t;
1343
1344 spin_lock(&xprt_list_lock);
1345 list_for_each_entry(t, &xprt_list, list) {
1346 if (t->ident == args->ident) {
1347 spin_unlock(&xprt_list_lock);
1348 goto found;
1349 }
1350 }
1351 spin_unlock(&xprt_list_lock);
3c45ddf8 1352 dprintk("RPC: transport (%d) not supported\n", args->ident);
8d9266ff
TM
1353 return ERR_PTR(-EIO);
1354
1355found:
1356 xprt = t->setup(args);
1357 if (IS_ERR(xprt)) {
1358 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1359 -PTR_ERR(xprt));
21de0a95 1360 goto out;
8d9266ff 1361 }
33d90ac0
BF
1362 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1363 xprt->idle_timeout = 0;
21de0a95
TM
1364 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1365 if (xprt_has_timer(xprt))
1366 setup_timer(&xprt->timer, xprt_init_autodisconnect,
1367 (unsigned long)xprt);
1368 else
1369 init_timer(&xprt->timer);
4e0038b6
TM
1370
1371 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1372 xprt_destroy(xprt);
1373 return ERR_PTR(-EINVAL);
1374 }
1375 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1376 if (xprt->servername == NULL) {
1377 xprt_destroy(xprt);
1378 return ERR_PTR(-ENOMEM);
1379 }
1380
3f940098 1381 rpc_xprt_debugfs_register(xprt);
388f0c77 1382
46121cf7 1383 dprintk("RPC: created transport %p with %u slots\n", xprt,
c2866763 1384 xprt->max_reqs);
21de0a95 1385out:
c2866763
CL
1386 return xprt;
1387}
1388
9903cd1c
CL
1389/**
1390 * xprt_destroy - destroy an RPC transport, killing off all requests.
a8de240a 1391 * @xprt: transport to destroy
9903cd1c 1392 *
1da177e4 1393 */
a8de240a 1394static void xprt_destroy(struct rpc_xprt *xprt)
1da177e4 1395{
46121cf7 1396 dprintk("RPC: destroying transport %p\n", xprt);
0065db32 1397 del_timer_sync(&xprt->timer);
c8541ecd 1398
388f0c77 1399 rpc_xprt_debugfs_unregister(xprt);
f6a1cc89
TM
1400 rpc_destroy_wait_queue(&xprt->binding);
1401 rpc_destroy_wait_queue(&xprt->pending);
1402 rpc_destroy_wait_queue(&xprt->sending);
f6a1cc89 1403 rpc_destroy_wait_queue(&xprt->backlog);
c3ae62ae 1404 cancel_work_sync(&xprt->task_cleanup);
4e0038b6 1405 kfree(xprt->servername);
c8541ecd
CL
1406 /*
1407 * Tear down transport state and free the rpc_xprt
1408 */
a246b010 1409 xprt->ops->destroy(xprt);
6b6ca86b 1410}
1da177e4 1411
6b6ca86b
TM
1412/**
1413 * xprt_put - release a reference to an RPC transport.
1414 * @xprt: pointer to the transport
1415 *
1416 */
1417void xprt_put(struct rpc_xprt *xprt)
1418{
a8de240a
TM
1419 if (atomic_dec_and_test(&xprt->count))
1420 xprt_destroy(xprt);
6b6ca86b 1421}