usb: chipidea: imx: get available runtime dr mode for wakeup setting
[linux-block.git] / net / sunrpc / xprt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -   When a process places a call, it allocates a request slot if
11  *      one is available. Otherwise, it sleeps on the backlog queue
12  *      (xprt_reserve).
13  *  -   Next, the caller puts together the RPC message, stuffs it into
14  *      the request struct, and calls xprt_transmit().
15  *  -   xprt_transmit sends the message and installs the caller on the
16  *      transport's wait list. At the same time, if a reply is expected,
17  *      it installs a timer that is run after the packet's timeout has
18  *      expired.
19  *  -   When a packet arrives, the data_ready handler walks the list of
20  *      pending requests for that transport. If a matching XID is found, the
21  *      caller is woken up, and the timer removed.
22  *  -   When no reply arrives within the timeout interval, the timer is
23  *      fired by the kernel and runs xprt_timer(). It either adjusts the
24  *      timeout values (minor timeout) or wakes up the caller with a status
25  *      of -ETIMEDOUT.
26  *  -   When the caller receives a notification from RPC that a reply arrived,
27  *      it should release the RPC slot, and process the reply.
28  *      If the call timed out, it may choose to retry the operation by
29  *      adjusting the initial timeout value, and simply calling rpc_call
30  *      again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40
41 #include <linux/module.h>
42
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "sunrpc.h"
58
59 /*
60  * Local variables
61  */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY        RPCDBG_XPRT
65 #endif
66
67 /*
68  * Local functions
69  */
70 static void      xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32   xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void      xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78 {
79         unsigned long timeout = jiffies + req->rq_timeout;
80
81         if (time_before(timeout, req->rq_majortimeo))
82                 return timeout;
83         return req->rq_majortimeo;
84 }
85
86 /**
87  * xprt_register_transport - register a transport implementation
88  * @transport: transport to register
89  *
90  * If a transport implementation is loaded as a kernel module, it can
91  * call this interface to make itself known to the RPC client.
92  *
93  * Returns:
94  * 0:           transport successfully registered
95  * -EEXIST:     transport already registered
96  * -EINVAL:     transport module being unloaded
97  */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100         struct xprt_class *t;
101         int result;
102
103         result = -EEXIST;
104         spin_lock(&xprt_list_lock);
105         list_for_each_entry(t, &xprt_list, list) {
106                 /* don't register the same transport class twice */
107                 if (t->ident == transport->ident)
108                         goto out;
109         }
110
111         list_add_tail(&transport->list, &xprt_list);
112         printk(KERN_INFO "RPC: Registered %s transport module.\n",
113                transport->name);
114         result = 0;
115
116 out:
117         spin_unlock(&xprt_list_lock);
118         return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123  * xprt_unregister_transport - unregister a transport implementation
124  * @transport: transport to unregister
125  *
126  * Returns:
127  * 0:           transport successfully unregistered
128  * -ENOENT:     transport never registered
129  */
130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132         struct xprt_class *t;
133         int result;
134
135         result = 0;
136         spin_lock(&xprt_list_lock);
137         list_for_each_entry(t, &xprt_list, list) {
138                 if (t == transport) {
139                         printk(KERN_INFO
140                                 "RPC: Unregistered %s transport module.\n",
141                                 transport->name);
142                         list_del_init(&transport->list);
143                         goto out;
144                 }
145         }
146         result = -ENOENT;
147
148 out:
149         spin_unlock(&xprt_list_lock);
150         return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 /**
155  * xprt_load_transport - load a transport implementation
156  * @transport_name: transport to load
157  *
158  * Returns:
159  * 0:           transport successfully loaded
160  * -ENOENT:     transport module not available
161  */
162 int xprt_load_transport(const char *transport_name)
163 {
164         struct xprt_class *t;
165         int result;
166
167         result = 0;
168         spin_lock(&xprt_list_lock);
169         list_for_each_entry(t, &xprt_list, list) {
170                 if (strcmp(t->name, transport_name) == 0) {
171                         spin_unlock(&xprt_list_lock);
172                         goto out;
173                 }
174         }
175         spin_unlock(&xprt_list_lock);
176         result = request_module("xprt%s", transport_name);
177 out:
178         return result;
179 }
180 EXPORT_SYMBOL_GPL(xprt_load_transport);
181
182 static void xprt_clear_locked(struct rpc_xprt *xprt)
183 {
184         xprt->snd_task = NULL;
185         if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
186                 smp_mb__before_atomic();
187                 clear_bit(XPRT_LOCKED, &xprt->state);
188                 smp_mb__after_atomic();
189         } else
190                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
191 }
192
193 /**
194  * xprt_reserve_xprt - serialize write access to transports
195  * @task: task that is requesting access to the transport
196  * @xprt: pointer to the target transport
197  *
198  * This prevents mixing the payload of separate requests, and prevents
199  * transport connects from colliding with writes.  No congestion control
200  * is provided.
201  */
202 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
203 {
204         struct rpc_rqst *req = task->tk_rqstp;
205
206         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
207                 if (task == xprt->snd_task)
208                         goto out_locked;
209                 goto out_sleep;
210         }
211         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
212                 goto out_unlock;
213         xprt->snd_task = task;
214
215 out_locked:
216         trace_xprt_reserve_xprt(xprt, task);
217         return 1;
218
219 out_unlock:
220         xprt_clear_locked(xprt);
221 out_sleep:
222         task->tk_status = -EAGAIN;
223         if  (RPC_IS_SOFT(task))
224                 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
225                                 xprt_request_timeout(req));
226         else
227                 rpc_sleep_on(&xprt->sending, task, NULL);
228         return 0;
229 }
230 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
231
232 static bool
233 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
234 {
235         return test_bit(XPRT_CWND_WAIT, &xprt->state);
236 }
237
238 static void
239 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
240 {
241         if (!list_empty(&xprt->xmit_queue)) {
242                 /* Peek at head of queue to see if it can make progress */
243                 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
244                                         rq_xmit)->rq_cong)
245                         return;
246         }
247         set_bit(XPRT_CWND_WAIT, &xprt->state);
248 }
249
250 static void
251 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
252 {
253         if (!RPCXPRT_CONGESTED(xprt))
254                 clear_bit(XPRT_CWND_WAIT, &xprt->state);
255 }
256
257 /*
258  * xprt_reserve_xprt_cong - serialize write access to transports
259  * @task: task that is requesting access to the transport
260  *
261  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
262  * integrated into the decision of whether a request is allowed to be
263  * woken up and given access to the transport.
264  * Note that the lock is only granted if we know there are free slots.
265  */
266 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
267 {
268         struct rpc_rqst *req = task->tk_rqstp;
269
270         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
271                 if (task == xprt->snd_task)
272                         goto out_locked;
273                 goto out_sleep;
274         }
275         if (req == NULL) {
276                 xprt->snd_task = task;
277                 goto out_locked;
278         }
279         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
280                 goto out_unlock;
281         if (!xprt_need_congestion_window_wait(xprt)) {
282                 xprt->snd_task = task;
283                 goto out_locked;
284         }
285 out_unlock:
286         xprt_clear_locked(xprt);
287 out_sleep:
288         task->tk_status = -EAGAIN;
289         if (RPC_IS_SOFT(task))
290                 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
291                                 xprt_request_timeout(req));
292         else
293                 rpc_sleep_on(&xprt->sending, task, NULL);
294         return 0;
295 out_locked:
296         trace_xprt_reserve_cong(xprt, task);
297         return 1;
298 }
299 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
300
301 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
302 {
303         int retval;
304
305         if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
306                 return 1;
307         spin_lock(&xprt->transport_lock);
308         retval = xprt->ops->reserve_xprt(xprt, task);
309         spin_unlock(&xprt->transport_lock);
310         return retval;
311 }
312
313 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
314 {
315         struct rpc_xprt *xprt = data;
316
317         xprt->snd_task = task;
318         return true;
319 }
320
321 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
322 {
323         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
324                 return;
325         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
326                 goto out_unlock;
327         if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
328                                 __xprt_lock_write_func, xprt))
329                 return;
330 out_unlock:
331         xprt_clear_locked(xprt);
332 }
333
334 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
335 {
336         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
337                 return;
338         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
339                 goto out_unlock;
340         if (xprt_need_congestion_window_wait(xprt))
341                 goto out_unlock;
342         if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
343                                 __xprt_lock_write_func, xprt))
344                 return;
345 out_unlock:
346         xprt_clear_locked(xprt);
347 }
348
349 /**
350  * xprt_release_xprt - allow other requests to use a transport
351  * @xprt: transport with other tasks potentially waiting
352  * @task: task that is releasing access to the transport
353  *
354  * Note that "task" can be NULL.  No congestion control is provided.
355  */
356 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
357 {
358         if (xprt->snd_task == task) {
359                 xprt_clear_locked(xprt);
360                 __xprt_lock_write_next(xprt);
361         }
362         trace_xprt_release_xprt(xprt, task);
363 }
364 EXPORT_SYMBOL_GPL(xprt_release_xprt);
365
366 /**
367  * xprt_release_xprt_cong - allow other requests to use a transport
368  * @xprt: transport with other tasks potentially waiting
369  * @task: task that is releasing access to the transport
370  *
371  * Note that "task" can be NULL.  Another task is awoken to use the
372  * transport if the transport's congestion window allows it.
373  */
374 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
375 {
376         if (xprt->snd_task == task) {
377                 xprt_clear_locked(xprt);
378                 __xprt_lock_write_next_cong(xprt);
379         }
380         trace_xprt_release_cong(xprt, task);
381 }
382 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
383
384 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
385 {
386         if (xprt->snd_task != task)
387                 return;
388         spin_lock(&xprt->transport_lock);
389         xprt->ops->release_xprt(xprt, task);
390         spin_unlock(&xprt->transport_lock);
391 }
392
393 /*
394  * Van Jacobson congestion avoidance. Check if the congestion window
395  * overflowed. Put the task to sleep if this is the case.
396  */
397 static int
398 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
399 {
400         if (req->rq_cong)
401                 return 1;
402         trace_xprt_get_cong(xprt, req->rq_task);
403         if (RPCXPRT_CONGESTED(xprt)) {
404                 xprt_set_congestion_window_wait(xprt);
405                 return 0;
406         }
407         req->rq_cong = 1;
408         xprt->cong += RPC_CWNDSCALE;
409         return 1;
410 }
411
412 /*
413  * Adjust the congestion window, and wake up the next task
414  * that has been sleeping due to congestion
415  */
416 static void
417 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
418 {
419         if (!req->rq_cong)
420                 return;
421         req->rq_cong = 0;
422         xprt->cong -= RPC_CWNDSCALE;
423         xprt_test_and_clear_congestion_window_wait(xprt);
424         trace_xprt_put_cong(xprt, req->rq_task);
425         __xprt_lock_write_next_cong(xprt);
426 }
427
428 /**
429  * xprt_request_get_cong - Request congestion control credits
430  * @xprt: pointer to transport
431  * @req: pointer to RPC request
432  *
433  * Useful for transports that require congestion control.
434  */
435 bool
436 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
437 {
438         bool ret = false;
439
440         if (req->rq_cong)
441                 return true;
442         spin_lock(&xprt->transport_lock);
443         ret = __xprt_get_cong(xprt, req) != 0;
444         spin_unlock(&xprt->transport_lock);
445         return ret;
446 }
447 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
448
449 /**
450  * xprt_release_rqst_cong - housekeeping when request is complete
451  * @task: RPC request that recently completed
452  *
453  * Useful for transports that require congestion control.
454  */
455 void xprt_release_rqst_cong(struct rpc_task *task)
456 {
457         struct rpc_rqst *req = task->tk_rqstp;
458
459         __xprt_put_cong(req->rq_xprt, req);
460 }
461 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
462
463 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
464 {
465         if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
466                 __xprt_lock_write_next_cong(xprt);
467 }
468
469 /*
470  * Clear the congestion window wait flag and wake up the next
471  * entry on xprt->sending
472  */
473 static void
474 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
475 {
476         if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
477                 spin_lock(&xprt->transport_lock);
478                 __xprt_lock_write_next_cong(xprt);
479                 spin_unlock(&xprt->transport_lock);
480         }
481 }
482
483 /**
484  * xprt_adjust_cwnd - adjust transport congestion window
485  * @xprt: pointer to xprt
486  * @task: recently completed RPC request used to adjust window
487  * @result: result code of completed RPC request
488  *
489  * The transport code maintains an estimate on the maximum number of out-
490  * standing RPC requests, using a smoothed version of the congestion
491  * avoidance implemented in 44BSD. This is basically the Van Jacobson
492  * congestion algorithm: If a retransmit occurs, the congestion window is
493  * halved; otherwise, it is incremented by 1/cwnd when
494  *
495  *      -       a reply is received and
496  *      -       a full number of requests are outstanding and
497  *      -       the congestion window hasn't been updated recently.
498  */
499 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
500 {
501         struct rpc_rqst *req = task->tk_rqstp;
502         unsigned long cwnd = xprt->cwnd;
503
504         if (result >= 0 && cwnd <= xprt->cong) {
505                 /* The (cwnd >> 1) term makes sure
506                  * the result gets rounded properly. */
507                 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
508                 if (cwnd > RPC_MAXCWND(xprt))
509                         cwnd = RPC_MAXCWND(xprt);
510                 __xprt_lock_write_next_cong(xprt);
511         } else if (result == -ETIMEDOUT) {
512                 cwnd >>= 1;
513                 if (cwnd < RPC_CWNDSCALE)
514                         cwnd = RPC_CWNDSCALE;
515         }
516         dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
517                         xprt->cong, xprt->cwnd, cwnd);
518         xprt->cwnd = cwnd;
519         __xprt_put_cong(xprt, req);
520 }
521 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
522
523 /**
524  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
525  * @xprt: transport with waiting tasks
526  * @status: result code to plant in each task before waking it
527  *
528  */
529 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
530 {
531         if (status < 0)
532                 rpc_wake_up_status(&xprt->pending, status);
533         else
534                 rpc_wake_up(&xprt->pending);
535 }
536 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
537
538 /**
539  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
540  * @xprt: transport
541  *
542  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
543  * we don't in general want to force a socket disconnection due to
544  * an incomplete RPC call transmission.
545  */
546 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
547 {
548         set_bit(XPRT_WRITE_SPACE, &xprt->state);
549 }
550 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
551
552 static bool
553 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
554 {
555         if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
556                 __xprt_lock_write_next(xprt);
557                 dprintk("RPC:       write space: waking waiting task on "
558                                 "xprt %p\n", xprt);
559                 return true;
560         }
561         return false;
562 }
563
564 /**
565  * xprt_write_space - wake the task waiting for transport output buffer space
566  * @xprt: transport with waiting tasks
567  *
568  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
569  */
570 bool xprt_write_space(struct rpc_xprt *xprt)
571 {
572         bool ret;
573
574         if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
575                 return false;
576         spin_lock(&xprt->transport_lock);
577         ret = xprt_clear_write_space_locked(xprt);
578         spin_unlock(&xprt->transport_lock);
579         return ret;
580 }
581 EXPORT_SYMBOL_GPL(xprt_write_space);
582
583 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
584 {
585         s64 delta = ktime_to_ns(ktime_get() - abstime);
586         return likely(delta >= 0) ?
587                 jiffies - nsecs_to_jiffies(delta) :
588                 jiffies + nsecs_to_jiffies(-delta);
589 }
590
591 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
592 {
593         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
594         unsigned long majortimeo = req->rq_timeout;
595
596         if (to->to_exponential)
597                 majortimeo <<= to->to_retries;
598         else
599                 majortimeo += to->to_increment * to->to_retries;
600         if (majortimeo > to->to_maxval || majortimeo == 0)
601                 majortimeo = to->to_maxval;
602         return majortimeo;
603 }
604
605 static void xprt_reset_majortimeo(struct rpc_rqst *req)
606 {
607         req->rq_majortimeo += xprt_calc_majortimeo(req);
608 }
609
610 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
611 {
612         unsigned long time_init;
613         struct rpc_xprt *xprt = req->rq_xprt;
614
615         if (likely(xprt && xprt_connected(xprt)))
616                 time_init = jiffies;
617         else
618                 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
619         req->rq_timeout = task->tk_client->cl_timeout->to_initval;
620         req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
621 }
622
623 /**
624  * xprt_adjust_timeout - adjust timeout values for next retransmit
625  * @req: RPC request containing parameters to use for the adjustment
626  *
627  */
628 int xprt_adjust_timeout(struct rpc_rqst *req)
629 {
630         struct rpc_xprt *xprt = req->rq_xprt;
631         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
632         int status = 0;
633
634         if (time_before(jiffies, req->rq_majortimeo)) {
635                 if (to->to_exponential)
636                         req->rq_timeout <<= 1;
637                 else
638                         req->rq_timeout += to->to_increment;
639                 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
640                         req->rq_timeout = to->to_maxval;
641                 req->rq_retries++;
642         } else {
643                 req->rq_timeout = to->to_initval;
644                 req->rq_retries = 0;
645                 xprt_reset_majortimeo(req);
646                 /* Reset the RTT counters == "slow start" */
647                 spin_lock(&xprt->transport_lock);
648                 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
649                 spin_unlock(&xprt->transport_lock);
650                 status = -ETIMEDOUT;
651         }
652
653         if (req->rq_timeout == 0) {
654                 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
655                 req->rq_timeout = 5 * HZ;
656         }
657         return status;
658 }
659
660 static void xprt_autoclose(struct work_struct *work)
661 {
662         struct rpc_xprt *xprt =
663                 container_of(work, struct rpc_xprt, task_cleanup);
664         unsigned int pflags = memalloc_nofs_save();
665
666         clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
667         xprt->ops->close(xprt);
668         xprt_release_write(xprt, NULL);
669         wake_up_bit(&xprt->state, XPRT_LOCKED);
670         memalloc_nofs_restore(pflags);
671 }
672
673 /**
674  * xprt_disconnect_done - mark a transport as disconnected
675  * @xprt: transport to flag for disconnect
676  *
677  */
678 void xprt_disconnect_done(struct rpc_xprt *xprt)
679 {
680         dprintk("RPC:       disconnected transport %p\n", xprt);
681         spin_lock(&xprt->transport_lock);
682         xprt_clear_connected(xprt);
683         xprt_clear_write_space_locked(xprt);
684         xprt_clear_congestion_window_wait_locked(xprt);
685         xprt_wake_pending_tasks(xprt, -ENOTCONN);
686         spin_unlock(&xprt->transport_lock);
687 }
688 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
689
690 /**
691  * xprt_force_disconnect - force a transport to disconnect
692  * @xprt: transport to disconnect
693  *
694  */
695 void xprt_force_disconnect(struct rpc_xprt *xprt)
696 {
697         /* Don't race with the test_bit() in xprt_clear_locked() */
698         spin_lock(&xprt->transport_lock);
699         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
700         /* Try to schedule an autoclose RPC call */
701         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
702                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
703         else if (xprt->snd_task)
704                 rpc_wake_up_queued_task_set_status(&xprt->pending,
705                                 xprt->snd_task, -ENOTCONN);
706         spin_unlock(&xprt->transport_lock);
707 }
708 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
709
710 static unsigned int
711 xprt_connect_cookie(struct rpc_xprt *xprt)
712 {
713         return READ_ONCE(xprt->connect_cookie);
714 }
715
716 static bool
717 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
718 {
719         struct rpc_rqst *req = task->tk_rqstp;
720         struct rpc_xprt *xprt = req->rq_xprt;
721
722         return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
723                 !xprt_connected(xprt);
724 }
725
726 /**
727  * xprt_conditional_disconnect - force a transport to disconnect
728  * @xprt: transport to disconnect
729  * @cookie: 'connection cookie'
730  *
731  * This attempts to break the connection if and only if 'cookie' matches
732  * the current transport 'connection cookie'. It ensures that we don't
733  * try to break the connection more than once when we need to retransmit
734  * a batch of RPC requests.
735  *
736  */
737 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
738 {
739         /* Don't race with the test_bit() in xprt_clear_locked() */
740         spin_lock(&xprt->transport_lock);
741         if (cookie != xprt->connect_cookie)
742                 goto out;
743         if (test_bit(XPRT_CLOSING, &xprt->state))
744                 goto out;
745         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
746         /* Try to schedule an autoclose RPC call */
747         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
748                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
749         xprt_wake_pending_tasks(xprt, -EAGAIN);
750 out:
751         spin_unlock(&xprt->transport_lock);
752 }
753
754 static bool
755 xprt_has_timer(const struct rpc_xprt *xprt)
756 {
757         return xprt->idle_timeout != 0;
758 }
759
760 static void
761 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
762         __must_hold(&xprt->transport_lock)
763 {
764         xprt->last_used = jiffies;
765         if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
766                 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
767 }
768
769 static void
770 xprt_init_autodisconnect(struct timer_list *t)
771 {
772         struct rpc_xprt *xprt = from_timer(xprt, t, timer);
773
774         if (!RB_EMPTY_ROOT(&xprt->recv_queue))
775                 return;
776         /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
777         xprt->last_used = jiffies;
778         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
779                 return;
780         queue_work(xprtiod_workqueue, &xprt->task_cleanup);
781 }
782
783 bool xprt_lock_connect(struct rpc_xprt *xprt,
784                 struct rpc_task *task,
785                 void *cookie)
786 {
787         bool ret = false;
788
789         spin_lock(&xprt->transport_lock);
790         if (!test_bit(XPRT_LOCKED, &xprt->state))
791                 goto out;
792         if (xprt->snd_task != task)
793                 goto out;
794         xprt->snd_task = cookie;
795         ret = true;
796 out:
797         spin_unlock(&xprt->transport_lock);
798         return ret;
799 }
800
801 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
802 {
803         spin_lock(&xprt->transport_lock);
804         if (xprt->snd_task != cookie)
805                 goto out;
806         if (!test_bit(XPRT_LOCKED, &xprt->state))
807                 goto out;
808         xprt->snd_task =NULL;
809         xprt->ops->release_xprt(xprt, NULL);
810         xprt_schedule_autodisconnect(xprt);
811 out:
812         spin_unlock(&xprt->transport_lock);
813         wake_up_bit(&xprt->state, XPRT_LOCKED);
814 }
815
816 /**
817  * xprt_connect - schedule a transport connect operation
818  * @task: RPC task that is requesting the connect
819  *
820  */
821 void xprt_connect(struct rpc_task *task)
822 {
823         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
824
825         dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
826                         xprt, (xprt_connected(xprt) ? "is" : "is not"));
827
828         if (!xprt_bound(xprt)) {
829                 task->tk_status = -EAGAIN;
830                 return;
831         }
832         if (!xprt_lock_write(xprt, task))
833                 return;
834
835         if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
836                 xprt->ops->close(xprt);
837
838         if (!xprt_connected(xprt)) {
839                 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
840                 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
841                                 xprt_request_timeout(task->tk_rqstp));
842
843                 if (test_bit(XPRT_CLOSING, &xprt->state))
844                         return;
845                 if (xprt_test_and_set_connecting(xprt))
846                         return;
847                 /* Race breaker */
848                 if (!xprt_connected(xprt)) {
849                         xprt->stat.connect_start = jiffies;
850                         xprt->ops->connect(xprt, task);
851                 } else {
852                         xprt_clear_connecting(xprt);
853                         task->tk_status = 0;
854                         rpc_wake_up_queued_task(&xprt->pending, task);
855                 }
856         }
857         xprt_release_write(xprt, task);
858 }
859
860 /**
861  * xprt_reconnect_delay - compute the wait before scheduling a connect
862  * @xprt: transport instance
863  *
864  */
865 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
866 {
867         unsigned long start, now = jiffies;
868
869         start = xprt->stat.connect_start + xprt->reestablish_timeout;
870         if (time_after(start, now))
871                 return start - now;
872         return 0;
873 }
874 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
875
876 /**
877  * xprt_reconnect_backoff - compute the new re-establish timeout
878  * @xprt: transport instance
879  * @init_to: initial reestablish timeout
880  *
881  */
882 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
883 {
884         xprt->reestablish_timeout <<= 1;
885         if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
886                 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
887         if (xprt->reestablish_timeout < init_to)
888                 xprt->reestablish_timeout = init_to;
889 }
890 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
891
892 enum xprt_xid_rb_cmp {
893         XID_RB_EQUAL,
894         XID_RB_LEFT,
895         XID_RB_RIGHT,
896 };
897 static enum xprt_xid_rb_cmp
898 xprt_xid_cmp(__be32 xid1, __be32 xid2)
899 {
900         if (xid1 == xid2)
901                 return XID_RB_EQUAL;
902         if ((__force u32)xid1 < (__force u32)xid2)
903                 return XID_RB_LEFT;
904         return XID_RB_RIGHT;
905 }
906
907 static struct rpc_rqst *
908 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
909 {
910         struct rb_node *n = xprt->recv_queue.rb_node;
911         struct rpc_rqst *req;
912
913         while (n != NULL) {
914                 req = rb_entry(n, struct rpc_rqst, rq_recv);
915                 switch (xprt_xid_cmp(xid, req->rq_xid)) {
916                 case XID_RB_LEFT:
917                         n = n->rb_left;
918                         break;
919                 case XID_RB_RIGHT:
920                         n = n->rb_right;
921                         break;
922                 case XID_RB_EQUAL:
923                         return req;
924                 }
925         }
926         return NULL;
927 }
928
929 static void
930 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
931 {
932         struct rb_node **p = &xprt->recv_queue.rb_node;
933         struct rb_node *n = NULL;
934         struct rpc_rqst *req;
935
936         while (*p != NULL) {
937                 n = *p;
938                 req = rb_entry(n, struct rpc_rqst, rq_recv);
939                 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
940                 case XID_RB_LEFT:
941                         p = &n->rb_left;
942                         break;
943                 case XID_RB_RIGHT:
944                         p = &n->rb_right;
945                         break;
946                 case XID_RB_EQUAL:
947                         WARN_ON_ONCE(new != req);
948                         return;
949                 }
950         }
951         rb_link_node(&new->rq_recv, n, p);
952         rb_insert_color(&new->rq_recv, &xprt->recv_queue);
953 }
954
955 static void
956 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
957 {
958         rb_erase(&req->rq_recv, &xprt->recv_queue);
959 }
960
961 /**
962  * xprt_lookup_rqst - find an RPC request corresponding to an XID
963  * @xprt: transport on which the original request was transmitted
964  * @xid: RPC XID of incoming reply
965  *
966  * Caller holds xprt->queue_lock.
967  */
968 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
969 {
970         struct rpc_rqst *entry;
971
972         entry = xprt_request_rb_find(xprt, xid);
973         if (entry != NULL) {
974                 trace_xprt_lookup_rqst(xprt, xid, 0);
975                 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
976                 return entry;
977         }
978
979         dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
980                         ntohl(xid));
981         trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
982         xprt->stat.bad_xids++;
983         return NULL;
984 }
985 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
986
987 static bool
988 xprt_is_pinned_rqst(struct rpc_rqst *req)
989 {
990         return atomic_read(&req->rq_pin) != 0;
991 }
992
993 /**
994  * xprt_pin_rqst - Pin a request on the transport receive list
995  * @req: Request to pin
996  *
997  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
998  * so should be holding xprt->queue_lock.
999  */
1000 void xprt_pin_rqst(struct rpc_rqst *req)
1001 {
1002         atomic_inc(&req->rq_pin);
1003 }
1004 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1005
1006 /**
1007  * xprt_unpin_rqst - Unpin a request on the transport receive list
1008  * @req: Request to pin
1009  *
1010  * Caller should be holding xprt->queue_lock.
1011  */
1012 void xprt_unpin_rqst(struct rpc_rqst *req)
1013 {
1014         if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1015                 atomic_dec(&req->rq_pin);
1016                 return;
1017         }
1018         if (atomic_dec_and_test(&req->rq_pin))
1019                 wake_up_var(&req->rq_pin);
1020 }
1021 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1022
1023 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1024 {
1025         wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1026 }
1027
1028 static bool
1029 xprt_request_data_received(struct rpc_task *task)
1030 {
1031         return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1032                 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1033 }
1034
1035 static bool
1036 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1037 {
1038         return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1039                 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1040 }
1041
1042 /**
1043  * xprt_request_enqueue_receive - Add an request to the receive queue
1044  * @task: RPC task
1045  *
1046  */
1047 void
1048 xprt_request_enqueue_receive(struct rpc_task *task)
1049 {
1050         struct rpc_rqst *req = task->tk_rqstp;
1051         struct rpc_xprt *xprt = req->rq_xprt;
1052
1053         if (!xprt_request_need_enqueue_receive(task, req))
1054                 return;
1055
1056         xprt_request_prepare(task->tk_rqstp);
1057         spin_lock(&xprt->queue_lock);
1058
1059         /* Update the softirq receive buffer */
1060         memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1061                         sizeof(req->rq_private_buf));
1062
1063         /* Add request to the receive list */
1064         xprt_request_rb_insert(xprt, req);
1065         set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1066         spin_unlock(&xprt->queue_lock);
1067
1068         /* Turn off autodisconnect */
1069         del_singleshot_timer_sync(&xprt->timer);
1070 }
1071
1072 /**
1073  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1074  * @task: RPC task
1075  *
1076  * Caller must hold xprt->queue_lock.
1077  */
1078 static void
1079 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1080 {
1081         struct rpc_rqst *req = task->tk_rqstp;
1082
1083         if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1084                 xprt_request_rb_remove(req->rq_xprt, req);
1085 }
1086
1087 /**
1088  * xprt_update_rtt - Update RPC RTT statistics
1089  * @task: RPC request that recently completed
1090  *
1091  * Caller holds xprt->queue_lock.
1092  */
1093 void xprt_update_rtt(struct rpc_task *task)
1094 {
1095         struct rpc_rqst *req = task->tk_rqstp;
1096         struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1097         unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1098         long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1099
1100         if (timer) {
1101                 if (req->rq_ntrans == 1)
1102                         rpc_update_rtt(rtt, timer, m);
1103                 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1104         }
1105 }
1106 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1107
1108 /**
1109  * xprt_complete_rqst - called when reply processing is complete
1110  * @task: RPC request that recently completed
1111  * @copied: actual number of bytes received from the transport
1112  *
1113  * Caller holds xprt->queue_lock.
1114  */
1115 void xprt_complete_rqst(struct rpc_task *task, int copied)
1116 {
1117         struct rpc_rqst *req = task->tk_rqstp;
1118         struct rpc_xprt *xprt = req->rq_xprt;
1119
1120         trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1121
1122         xprt->stat.recvs++;
1123
1124         req->rq_private_buf.len = copied;
1125         /* Ensure all writes are done before we update */
1126         /* req->rq_reply_bytes_recvd */
1127         smp_wmb();
1128         req->rq_reply_bytes_recvd = copied;
1129         xprt_request_dequeue_receive_locked(task);
1130         rpc_wake_up_queued_task(&xprt->pending, task);
1131 }
1132 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1133
1134 static void xprt_timer(struct rpc_task *task)
1135 {
1136         struct rpc_rqst *req = task->tk_rqstp;
1137         struct rpc_xprt *xprt = req->rq_xprt;
1138
1139         if (task->tk_status != -ETIMEDOUT)
1140                 return;
1141
1142         trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1143         if (!req->rq_reply_bytes_recvd) {
1144                 if (xprt->ops->timer)
1145                         xprt->ops->timer(xprt, task);
1146         } else
1147                 task->tk_status = 0;
1148 }
1149
1150 /**
1151  * xprt_wait_for_reply_request_def - wait for reply
1152  * @task: pointer to rpc_task
1153  *
1154  * Set a request's retransmit timeout based on the transport's
1155  * default timeout parameters.  Used by transports that don't adjust
1156  * the retransmit timeout based on round-trip time estimation,
1157  * and put the task to sleep on the pending queue.
1158  */
1159 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1160 {
1161         struct rpc_rqst *req = task->tk_rqstp;
1162
1163         rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1164                         xprt_request_timeout(req));
1165 }
1166 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1167
1168 /**
1169  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1170  * @task: pointer to rpc_task
1171  *
1172  * Set a request's retransmit timeout using the RTT estimator,
1173  * and put the task to sleep on the pending queue.
1174  */
1175 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1176 {
1177         int timer = task->tk_msg.rpc_proc->p_timer;
1178         struct rpc_clnt *clnt = task->tk_client;
1179         struct rpc_rtt *rtt = clnt->cl_rtt;
1180         struct rpc_rqst *req = task->tk_rqstp;
1181         unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1182         unsigned long timeout;
1183
1184         timeout = rpc_calc_rto(rtt, timer);
1185         timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1186         if (timeout > max_timeout || timeout == 0)
1187                 timeout = max_timeout;
1188         rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1189                         jiffies + timeout);
1190 }
1191 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1192
1193 /**
1194  * xprt_request_wait_receive - wait for the reply to an RPC request
1195  * @task: RPC task about to send a request
1196  *
1197  */
1198 void xprt_request_wait_receive(struct rpc_task *task)
1199 {
1200         struct rpc_rqst *req = task->tk_rqstp;
1201         struct rpc_xprt *xprt = req->rq_xprt;
1202
1203         if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1204                 return;
1205         /*
1206          * Sleep on the pending queue if we're expecting a reply.
1207          * The spinlock ensures atomicity between the test of
1208          * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1209          */
1210         spin_lock(&xprt->queue_lock);
1211         if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1212                 xprt->ops->wait_for_reply_request(task);
1213                 /*
1214                  * Send an extra queue wakeup call if the
1215                  * connection was dropped in case the call to
1216                  * rpc_sleep_on() raced.
1217                  */
1218                 if (xprt_request_retransmit_after_disconnect(task))
1219                         rpc_wake_up_queued_task_set_status(&xprt->pending,
1220                                         task, -ENOTCONN);
1221         }
1222         spin_unlock(&xprt->queue_lock);
1223 }
1224
1225 static bool
1226 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1227 {
1228         return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1229 }
1230
1231 /**
1232  * xprt_request_enqueue_transmit - queue a task for transmission
1233  * @task: pointer to rpc_task
1234  *
1235  * Add a task to the transmission queue.
1236  */
1237 void
1238 xprt_request_enqueue_transmit(struct rpc_task *task)
1239 {
1240         struct rpc_rqst *pos, *req = task->tk_rqstp;
1241         struct rpc_xprt *xprt = req->rq_xprt;
1242
1243         if (xprt_request_need_enqueue_transmit(task, req)) {
1244                 req->rq_bytes_sent = 0;
1245                 spin_lock(&xprt->queue_lock);
1246                 /*
1247                  * Requests that carry congestion control credits are added
1248                  * to the head of the list to avoid starvation issues.
1249                  */
1250                 if (req->rq_cong) {
1251                         xprt_clear_congestion_window_wait(xprt);
1252                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1253                                 if (pos->rq_cong)
1254                                         continue;
1255                                 /* Note: req is added _before_ pos */
1256                                 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1257                                 INIT_LIST_HEAD(&req->rq_xmit2);
1258                                 trace_xprt_enq_xmit(task, 1);
1259                                 goto out;
1260                         }
1261                 } else if (RPC_IS_SWAPPER(task)) {
1262                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1263                                 if (pos->rq_cong || pos->rq_bytes_sent)
1264                                         continue;
1265                                 if (RPC_IS_SWAPPER(pos->rq_task))
1266                                         continue;
1267                                 /* Note: req is added _before_ pos */
1268                                 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1269                                 INIT_LIST_HEAD(&req->rq_xmit2);
1270                                 trace_xprt_enq_xmit(task, 2);
1271                                 goto out;
1272                         }
1273                 } else if (!req->rq_seqno) {
1274                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1275                                 if (pos->rq_task->tk_owner != task->tk_owner)
1276                                         continue;
1277                                 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1278                                 INIT_LIST_HEAD(&req->rq_xmit);
1279                                 trace_xprt_enq_xmit(task, 3);
1280                                 goto out;
1281                         }
1282                 }
1283                 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1284                 INIT_LIST_HEAD(&req->rq_xmit2);
1285                 trace_xprt_enq_xmit(task, 4);
1286 out:
1287                 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1288                 spin_unlock(&xprt->queue_lock);
1289         }
1290 }
1291
1292 /**
1293  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1294  * @task: pointer to rpc_task
1295  *
1296  * Remove a task from the transmission queue
1297  * Caller must hold xprt->queue_lock
1298  */
1299 static void
1300 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1301 {
1302         struct rpc_rqst *req = task->tk_rqstp;
1303
1304         if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1305                 return;
1306         if (!list_empty(&req->rq_xmit)) {
1307                 list_del(&req->rq_xmit);
1308                 if (!list_empty(&req->rq_xmit2)) {
1309                         struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1310                                         struct rpc_rqst, rq_xmit2);
1311                         list_del(&req->rq_xmit2);
1312                         list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1313                 }
1314         } else
1315                 list_del(&req->rq_xmit2);
1316 }
1317
1318 /**
1319  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1320  * @task: pointer to rpc_task
1321  *
1322  * Remove a task from the transmission queue
1323  */
1324 static void
1325 xprt_request_dequeue_transmit(struct rpc_task *task)
1326 {
1327         struct rpc_rqst *req = task->tk_rqstp;
1328         struct rpc_xprt *xprt = req->rq_xprt;
1329
1330         spin_lock(&xprt->queue_lock);
1331         xprt_request_dequeue_transmit_locked(task);
1332         spin_unlock(&xprt->queue_lock);
1333 }
1334
1335 /**
1336  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1337  * @task: pointer to rpc_task
1338  *
1339  * Remove a task from the transmit and receive queues, and ensure that
1340  * it is not pinned by the receive work item.
1341  */
1342 void
1343 xprt_request_dequeue_xprt(struct rpc_task *task)
1344 {
1345         struct rpc_rqst *req = task->tk_rqstp;
1346         struct rpc_xprt *xprt = req->rq_xprt;
1347
1348         if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1349             test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1350             xprt_is_pinned_rqst(req)) {
1351                 spin_lock(&xprt->queue_lock);
1352                 xprt_request_dequeue_transmit_locked(task);
1353                 xprt_request_dequeue_receive_locked(task);
1354                 while (xprt_is_pinned_rqst(req)) {
1355                         set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1356                         spin_unlock(&xprt->queue_lock);
1357                         xprt_wait_on_pinned_rqst(req);
1358                         spin_lock(&xprt->queue_lock);
1359                         clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1360                 }
1361                 spin_unlock(&xprt->queue_lock);
1362         }
1363 }
1364
1365 /**
1366  * xprt_request_prepare - prepare an encoded request for transport
1367  * @req: pointer to rpc_rqst
1368  *
1369  * Calls into the transport layer to do whatever is needed to prepare
1370  * the request for transmission or receive.
1371  */
1372 void
1373 xprt_request_prepare(struct rpc_rqst *req)
1374 {
1375         struct rpc_xprt *xprt = req->rq_xprt;
1376
1377         if (xprt->ops->prepare_request)
1378                 xprt->ops->prepare_request(req);
1379 }
1380
1381 /**
1382  * xprt_request_need_retransmit - Test if a task needs retransmission
1383  * @task: pointer to rpc_task
1384  *
1385  * Test for whether a connection breakage requires the task to retransmit
1386  */
1387 bool
1388 xprt_request_need_retransmit(struct rpc_task *task)
1389 {
1390         return xprt_request_retransmit_after_disconnect(task);
1391 }
1392
1393 /**
1394  * xprt_prepare_transmit - reserve the transport before sending a request
1395  * @task: RPC task about to send a request
1396  *
1397  */
1398 bool xprt_prepare_transmit(struct rpc_task *task)
1399 {
1400         struct rpc_rqst *req = task->tk_rqstp;
1401         struct rpc_xprt *xprt = req->rq_xprt;
1402
1403         dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1404
1405         if (!xprt_lock_write(xprt, task)) {
1406                 /* Race breaker: someone may have transmitted us */
1407                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1408                         rpc_wake_up_queued_task_set_status(&xprt->sending,
1409                                         task, 0);
1410                 return false;
1411
1412         }
1413         return true;
1414 }
1415
1416 void xprt_end_transmit(struct rpc_task *task)
1417 {
1418         xprt_release_write(task->tk_rqstp->rq_xprt, task);
1419 }
1420
1421 /**
1422  * xprt_request_transmit - send an RPC request on a transport
1423  * @req: pointer to request to transmit
1424  * @snd_task: RPC task that owns the transport lock
1425  *
1426  * This performs the transmission of a single request.
1427  * Note that if the request is not the same as snd_task, then it
1428  * does need to be pinned.
1429  * Returns '0' on success.
1430  */
1431 static int
1432 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1433 {
1434         struct rpc_xprt *xprt = req->rq_xprt;
1435         struct rpc_task *task = req->rq_task;
1436         unsigned int connect_cookie;
1437         int is_retrans = RPC_WAS_SENT(task);
1438         int status;
1439
1440         if (!req->rq_bytes_sent) {
1441                 if (xprt_request_data_received(task)) {
1442                         status = 0;
1443                         goto out_dequeue;
1444                 }
1445                 /* Verify that our message lies in the RPCSEC_GSS window */
1446                 if (rpcauth_xmit_need_reencode(task)) {
1447                         status = -EBADMSG;
1448                         goto out_dequeue;
1449                 }
1450                 if (RPC_SIGNALLED(task)) {
1451                         status = -ERESTARTSYS;
1452                         goto out_dequeue;
1453                 }
1454         }
1455
1456         /*
1457          * Update req->rq_ntrans before transmitting to avoid races with
1458          * xprt_update_rtt(), which needs to know that it is recording a
1459          * reply to the first transmission.
1460          */
1461         req->rq_ntrans++;
1462
1463         trace_xprt_sendto(&req->rq_snd_buf);
1464         connect_cookie = xprt->connect_cookie;
1465         status = xprt->ops->send_request(req);
1466         if (status != 0) {
1467                 req->rq_ntrans--;
1468                 trace_xprt_transmit(req, status);
1469                 return status;
1470         }
1471
1472         if (is_retrans)
1473                 task->tk_client->cl_stats->rpcretrans++;
1474
1475         xprt_inject_disconnect(xprt);
1476
1477         task->tk_flags |= RPC_TASK_SENT;
1478         spin_lock(&xprt->transport_lock);
1479
1480         xprt->stat.sends++;
1481         xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1482         xprt->stat.bklog_u += xprt->backlog.qlen;
1483         xprt->stat.sending_u += xprt->sending.qlen;
1484         xprt->stat.pending_u += xprt->pending.qlen;
1485         spin_unlock(&xprt->transport_lock);
1486
1487         req->rq_connect_cookie = connect_cookie;
1488 out_dequeue:
1489         trace_xprt_transmit(req, status);
1490         xprt_request_dequeue_transmit(task);
1491         rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1492         return status;
1493 }
1494
1495 /**
1496  * xprt_transmit - send an RPC request on a transport
1497  * @task: controlling RPC task
1498  *
1499  * Attempts to drain the transmit queue. On exit, either the transport
1500  * signalled an error that needs to be handled before transmission can
1501  * resume, or @task finished transmitting, and detected that it already
1502  * received a reply.
1503  */
1504 void
1505 xprt_transmit(struct rpc_task *task)
1506 {
1507         struct rpc_rqst *next, *req = task->tk_rqstp;
1508         struct rpc_xprt *xprt = req->rq_xprt;
1509         int status;
1510
1511         spin_lock(&xprt->queue_lock);
1512         while (!list_empty(&xprt->xmit_queue)) {
1513                 next = list_first_entry(&xprt->xmit_queue,
1514                                 struct rpc_rqst, rq_xmit);
1515                 xprt_pin_rqst(next);
1516                 spin_unlock(&xprt->queue_lock);
1517                 status = xprt_request_transmit(next, task);
1518                 if (status == -EBADMSG && next != req)
1519                         status = 0;
1520                 cond_resched();
1521                 spin_lock(&xprt->queue_lock);
1522                 xprt_unpin_rqst(next);
1523                 if (status == 0) {
1524                         if (!xprt_request_data_received(task) ||
1525                             test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1526                                 continue;
1527                 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1528                         task->tk_status = status;
1529                 break;
1530         }
1531         spin_unlock(&xprt->queue_lock);
1532 }
1533
1534 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1535 {
1536         set_bit(XPRT_CONGESTED, &xprt->state);
1537         rpc_sleep_on(&xprt->backlog, task, NULL);
1538 }
1539
1540 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1541 {
1542         if (rpc_wake_up_next(&xprt->backlog) == NULL)
1543                 clear_bit(XPRT_CONGESTED, &xprt->state);
1544 }
1545
1546 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1547 {
1548         bool ret = false;
1549
1550         if (!test_bit(XPRT_CONGESTED, &xprt->state))
1551                 goto out;
1552         spin_lock(&xprt->reserve_lock);
1553         if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1554                 rpc_sleep_on(&xprt->backlog, task, NULL);
1555                 ret = true;
1556         }
1557         spin_unlock(&xprt->reserve_lock);
1558 out:
1559         return ret;
1560 }
1561
1562 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1563 {
1564         struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1565
1566         if (xprt->num_reqs >= xprt->max_reqs)
1567                 goto out;
1568         ++xprt->num_reqs;
1569         spin_unlock(&xprt->reserve_lock);
1570         req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1571         spin_lock(&xprt->reserve_lock);
1572         if (req != NULL)
1573                 goto out;
1574         --xprt->num_reqs;
1575         req = ERR_PTR(-ENOMEM);
1576 out:
1577         return req;
1578 }
1579
1580 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1581 {
1582         if (xprt->num_reqs > xprt->min_reqs) {
1583                 --xprt->num_reqs;
1584                 kfree(req);
1585                 return true;
1586         }
1587         return false;
1588 }
1589
1590 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1591 {
1592         struct rpc_rqst *req;
1593
1594         spin_lock(&xprt->reserve_lock);
1595         if (!list_empty(&xprt->free)) {
1596                 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1597                 list_del(&req->rq_list);
1598                 goto out_init_req;
1599         }
1600         req = xprt_dynamic_alloc_slot(xprt);
1601         if (!IS_ERR(req))
1602                 goto out_init_req;
1603         switch (PTR_ERR(req)) {
1604         case -ENOMEM:
1605                 dprintk("RPC:       dynamic allocation of request slot "
1606                                 "failed! Retrying\n");
1607                 task->tk_status = -ENOMEM;
1608                 break;
1609         case -EAGAIN:
1610                 xprt_add_backlog(xprt, task);
1611                 dprintk("RPC:       waiting for request slot\n");
1612                 /* fall through */
1613         default:
1614                 task->tk_status = -EAGAIN;
1615         }
1616         spin_unlock(&xprt->reserve_lock);
1617         return;
1618 out_init_req:
1619         xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1620                                      xprt->num_reqs);
1621         spin_unlock(&xprt->reserve_lock);
1622
1623         task->tk_status = 0;
1624         task->tk_rqstp = req;
1625 }
1626 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1627
1628 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1629 {
1630         spin_lock(&xprt->reserve_lock);
1631         if (!xprt_dynamic_free_slot(xprt, req)) {
1632                 memset(req, 0, sizeof(*req));   /* mark unused */
1633                 list_add(&req->rq_list, &xprt->free);
1634         }
1635         xprt_wake_up_backlog(xprt);
1636         spin_unlock(&xprt->reserve_lock);
1637 }
1638 EXPORT_SYMBOL_GPL(xprt_free_slot);
1639
1640 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1641 {
1642         struct rpc_rqst *req;
1643         while (!list_empty(&xprt->free)) {
1644                 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1645                 list_del(&req->rq_list);
1646                 kfree(req);
1647         }
1648 }
1649
1650 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1651                 unsigned int num_prealloc,
1652                 unsigned int max_alloc)
1653 {
1654         struct rpc_xprt *xprt;
1655         struct rpc_rqst *req;
1656         int i;
1657
1658         xprt = kzalloc(size, GFP_KERNEL);
1659         if (xprt == NULL)
1660                 goto out;
1661
1662         xprt_init(xprt, net);
1663
1664         for (i = 0; i < num_prealloc; i++) {
1665                 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1666                 if (!req)
1667                         goto out_free;
1668                 list_add(&req->rq_list, &xprt->free);
1669         }
1670         if (max_alloc > num_prealloc)
1671                 xprt->max_reqs = max_alloc;
1672         else
1673                 xprt->max_reqs = num_prealloc;
1674         xprt->min_reqs = num_prealloc;
1675         xprt->num_reqs = num_prealloc;
1676
1677         return xprt;
1678
1679 out_free:
1680         xprt_free(xprt);
1681 out:
1682         return NULL;
1683 }
1684 EXPORT_SYMBOL_GPL(xprt_alloc);
1685
1686 void xprt_free(struct rpc_xprt *xprt)
1687 {
1688         put_net(xprt->xprt_net);
1689         xprt_free_all_slots(xprt);
1690         kfree_rcu(xprt, rcu);
1691 }
1692 EXPORT_SYMBOL_GPL(xprt_free);
1693
1694 static void
1695 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1696 {
1697         req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1698 }
1699
1700 static __be32
1701 xprt_alloc_xid(struct rpc_xprt *xprt)
1702 {
1703         __be32 xid;
1704
1705         spin_lock(&xprt->reserve_lock);
1706         xid = (__force __be32)xprt->xid++;
1707         spin_unlock(&xprt->reserve_lock);
1708         return xid;
1709 }
1710
1711 static void
1712 xprt_init_xid(struct rpc_xprt *xprt)
1713 {
1714         xprt->xid = prandom_u32();
1715 }
1716
1717 static void
1718 xprt_request_init(struct rpc_task *task)
1719 {
1720         struct rpc_xprt *xprt = task->tk_xprt;
1721         struct rpc_rqst *req = task->tk_rqstp;
1722
1723         req->rq_task    = task;
1724         req->rq_xprt    = xprt;
1725         req->rq_buffer  = NULL;
1726         req->rq_xid     = xprt_alloc_xid(xprt);
1727         xprt_init_connect_cookie(req, xprt);
1728         req->rq_snd_buf.len = 0;
1729         req->rq_snd_buf.buflen = 0;
1730         req->rq_rcv_buf.len = 0;
1731         req->rq_rcv_buf.buflen = 0;
1732         req->rq_snd_buf.bvec = NULL;
1733         req->rq_rcv_buf.bvec = NULL;
1734         req->rq_release_snd_buf = NULL;
1735         xprt_init_majortimeo(task, req);
1736         dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1737                         req, ntohl(req->rq_xid));
1738 }
1739
1740 static void
1741 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1742 {
1743         xprt->ops->alloc_slot(xprt, task);
1744         if (task->tk_rqstp != NULL)
1745                 xprt_request_init(task);
1746 }
1747
1748 /**
1749  * xprt_reserve - allocate an RPC request slot
1750  * @task: RPC task requesting a slot allocation
1751  *
1752  * If the transport is marked as being congested, or if no more
1753  * slots are available, place the task on the transport's
1754  * backlog queue.
1755  */
1756 void xprt_reserve(struct rpc_task *task)
1757 {
1758         struct rpc_xprt *xprt = task->tk_xprt;
1759
1760         task->tk_status = 0;
1761         if (task->tk_rqstp != NULL)
1762                 return;
1763
1764         task->tk_status = -EAGAIN;
1765         if (!xprt_throttle_congested(xprt, task))
1766                 xprt_do_reserve(xprt, task);
1767 }
1768
1769 /**
1770  * xprt_retry_reserve - allocate an RPC request slot
1771  * @task: RPC task requesting a slot allocation
1772  *
1773  * If no more slots are available, place the task on the transport's
1774  * backlog queue.
1775  * Note that the only difference with xprt_reserve is that we now
1776  * ignore the value of the XPRT_CONGESTED flag.
1777  */
1778 void xprt_retry_reserve(struct rpc_task *task)
1779 {
1780         struct rpc_xprt *xprt = task->tk_xprt;
1781
1782         task->tk_status = 0;
1783         if (task->tk_rqstp != NULL)
1784                 return;
1785
1786         task->tk_status = -EAGAIN;
1787         xprt_do_reserve(xprt, task);
1788 }
1789
1790 /**
1791  * xprt_release - release an RPC request slot
1792  * @task: task which is finished with the slot
1793  *
1794  */
1795 void xprt_release(struct rpc_task *task)
1796 {
1797         struct rpc_xprt *xprt;
1798         struct rpc_rqst *req = task->tk_rqstp;
1799
1800         if (req == NULL) {
1801                 if (task->tk_client) {
1802                         xprt = task->tk_xprt;
1803                         xprt_release_write(xprt, task);
1804                 }
1805                 return;
1806         }
1807
1808         xprt = req->rq_xprt;
1809         xprt_request_dequeue_xprt(task);
1810         spin_lock(&xprt->transport_lock);
1811         xprt->ops->release_xprt(xprt, task);
1812         if (xprt->ops->release_request)
1813                 xprt->ops->release_request(task);
1814         xprt_schedule_autodisconnect(xprt);
1815         spin_unlock(&xprt->transport_lock);
1816         if (req->rq_buffer)
1817                 xprt->ops->buf_free(task);
1818         xprt_inject_disconnect(xprt);
1819         xdr_free_bvec(&req->rq_rcv_buf);
1820         xdr_free_bvec(&req->rq_snd_buf);
1821         if (req->rq_cred != NULL)
1822                 put_rpccred(req->rq_cred);
1823         task->tk_rqstp = NULL;
1824         if (req->rq_release_snd_buf)
1825                 req->rq_release_snd_buf(req);
1826
1827         dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1828         if (likely(!bc_prealloc(req)))
1829                 xprt->ops->free_slot(xprt, req);
1830         else
1831                 xprt_free_bc_request(req);
1832 }
1833
1834 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1835 void
1836 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1837 {
1838         struct xdr_buf *xbufp = &req->rq_snd_buf;
1839
1840         task->tk_rqstp = req;
1841         req->rq_task = task;
1842         xprt_init_connect_cookie(req, req->rq_xprt);
1843         /*
1844          * Set up the xdr_buf length.
1845          * This also indicates that the buffer is XDR encoded already.
1846          */
1847         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1848                 xbufp->tail[0].iov_len;
1849 }
1850 #endif
1851
1852 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1853 {
1854         kref_init(&xprt->kref);
1855
1856         spin_lock_init(&xprt->transport_lock);
1857         spin_lock_init(&xprt->reserve_lock);
1858         spin_lock_init(&xprt->queue_lock);
1859
1860         INIT_LIST_HEAD(&xprt->free);
1861         xprt->recv_queue = RB_ROOT;
1862         INIT_LIST_HEAD(&xprt->xmit_queue);
1863 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1864         spin_lock_init(&xprt->bc_pa_lock);
1865         INIT_LIST_HEAD(&xprt->bc_pa_list);
1866 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1867         INIT_LIST_HEAD(&xprt->xprt_switch);
1868
1869         xprt->last_used = jiffies;
1870         xprt->cwnd = RPC_INITCWND;
1871         xprt->bind_index = 0;
1872
1873         rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1874         rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1875         rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1876         rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1877
1878         xprt_init_xid(xprt);
1879
1880         xprt->xprt_net = get_net(net);
1881 }
1882
1883 /**
1884  * xprt_create_transport - create an RPC transport
1885  * @args: rpc transport creation arguments
1886  *
1887  */
1888 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1889 {
1890         struct rpc_xprt *xprt;
1891         struct xprt_class *t;
1892
1893         spin_lock(&xprt_list_lock);
1894         list_for_each_entry(t, &xprt_list, list) {
1895                 if (t->ident == args->ident) {
1896                         spin_unlock(&xprt_list_lock);
1897                         goto found;
1898                 }
1899         }
1900         spin_unlock(&xprt_list_lock);
1901         dprintk("RPC: transport (%d) not supported\n", args->ident);
1902         return ERR_PTR(-EIO);
1903
1904 found:
1905         xprt = t->setup(args);
1906         if (IS_ERR(xprt)) {
1907                 dprintk("RPC:       xprt_create_transport: failed, %ld\n",
1908                                 -PTR_ERR(xprt));
1909                 goto out;
1910         }
1911         if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1912                 xprt->idle_timeout = 0;
1913         INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1914         if (xprt_has_timer(xprt))
1915                 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1916         else
1917                 timer_setup(&xprt->timer, NULL, 0);
1918
1919         if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1920                 xprt_destroy(xprt);
1921                 return ERR_PTR(-EINVAL);
1922         }
1923         xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1924         if (xprt->servername == NULL) {
1925                 xprt_destroy(xprt);
1926                 return ERR_PTR(-ENOMEM);
1927         }
1928
1929         rpc_xprt_debugfs_register(xprt);
1930
1931         dprintk("RPC:       created transport %p with %u slots\n", xprt,
1932                         xprt->max_reqs);
1933 out:
1934         return xprt;
1935 }
1936
1937 static void xprt_destroy_cb(struct work_struct *work)
1938 {
1939         struct rpc_xprt *xprt =
1940                 container_of(work, struct rpc_xprt, task_cleanup);
1941
1942         rpc_xprt_debugfs_unregister(xprt);
1943         rpc_destroy_wait_queue(&xprt->binding);
1944         rpc_destroy_wait_queue(&xprt->pending);
1945         rpc_destroy_wait_queue(&xprt->sending);
1946         rpc_destroy_wait_queue(&xprt->backlog);
1947         kfree(xprt->servername);
1948         /*
1949          * Destroy any existing back channel
1950          */
1951         xprt_destroy_backchannel(xprt, UINT_MAX);
1952
1953         /*
1954          * Tear down transport state and free the rpc_xprt
1955          */
1956         xprt->ops->destroy(xprt);
1957 }
1958
1959 /**
1960  * xprt_destroy - destroy an RPC transport, killing off all requests.
1961  * @xprt: transport to destroy
1962  *
1963  */
1964 static void xprt_destroy(struct rpc_xprt *xprt)
1965 {
1966         dprintk("RPC:       destroying transport %p\n", xprt);
1967
1968         /*
1969          * Exclude transport connect/disconnect handlers and autoclose
1970          */
1971         wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1972
1973         del_timer_sync(&xprt->timer);
1974
1975         /*
1976          * Destroy sockets etc from the system workqueue so they can
1977          * safely flush receive work running on rpciod.
1978          */
1979         INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1980         schedule_work(&xprt->task_cleanup);
1981 }
1982
1983 static void xprt_destroy_kref(struct kref *kref)
1984 {
1985         xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1986 }
1987
1988 /**
1989  * xprt_get - return a reference to an RPC transport.
1990  * @xprt: pointer to the transport
1991  *
1992  */
1993 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1994 {
1995         if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1996                 return xprt;
1997         return NULL;
1998 }
1999 EXPORT_SYMBOL_GPL(xprt_get);
2000
2001 /**
2002  * xprt_put - release a reference to an RPC transport.
2003  * @xprt: pointer to the transport
2004  *
2005  */
2006 void xprt_put(struct rpc_xprt *xprt)
2007 {
2008         if (xprt != NULL)
2009                 kref_put(&xprt->kref, xprt_destroy_kref);
2010 }
2011 EXPORT_SYMBOL_GPL(xprt_put);