SUNRPC: Refactor rpc_xdr_buf_init()
[linux-2.6-block.git] / net / sunrpc / sched.c
CommitLineData
1da177e4
LT
1/*
2 * linux/net/sunrpc/sched.c
3 *
4 * Scheduling for synchronous and asynchronous RPC requests.
5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
cca5172a 7 *
1da177e4
LT
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */
11
12#include <linux/module.h>
13
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/mempool.h>
18#include <linux/smp.h>
1da177e4 19#include <linux/spinlock.h>
4a3e2f71 20#include <linux/mutex.h>
d310310c 21#include <linux/freezer.h>
1da177e4
LT
22
23#include <linux/sunrpc/clnt.h>
1da177e4 24
6951867b
BH
25#include "sunrpc.h"
26
f895b252 27#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1da177e4 28#define RPCDBG_FACILITY RPCDBG_SCHED
1da177e4
LT
29#endif
30
82b0a4c3
TM
31#define CREATE_TRACE_POINTS
32#include <trace/events/sunrpc.h>
33
1da177e4
LT
34/*
35 * RPC slabs and memory pools
36 */
37#define RPC_BUFFER_MAXSIZE (2048)
38#define RPC_BUFFER_POOLSIZE (8)
39#define RPC_TASK_POOLSIZE (8)
e18b890b
CL
40static struct kmem_cache *rpc_task_slabp __read_mostly;
41static struct kmem_cache *rpc_buffer_slabp __read_mostly;
ba89966c
ED
42static mempool_t *rpc_task_mempool __read_mostly;
43static mempool_t *rpc_buffer_mempool __read_mostly;
1da177e4 44
65f27f38 45static void rpc_async_schedule(struct work_struct *);
bde8f00c 46static void rpc_release_task(struct rpc_task *task);
36df9aae 47static void __rpc_queue_timer_fn(unsigned long ptr);
1da177e4 48
1da177e4
LT
49/*
50 * RPC tasks sit here while waiting for conditions to improve.
51 */
a4a87499 52static struct rpc_wait_queue delay_queue;
1da177e4 53
1da177e4
LT
54/*
55 * rpciod-related stuff
56 */
40a5f1b1
TM
57struct workqueue_struct *rpciod_workqueue __read_mostly;
58struct workqueue_struct *xprtiod_workqueue __read_mostly;
1da177e4 59
1da177e4
LT
60/*
61 * Disable the timer for a given RPC task. Should be called with
62 * queue->lock and bh_disabled in order to avoid races within
63 * rpc_run_timer().
64 */
5d00837b 65static void
eb276c0e 66__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 67{
36df9aae
TM
68 if (task->tk_timeout == 0)
69 return;
46121cf7 70 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
1da177e4 71 task->tk_timeout = 0;
36df9aae 72 list_del(&task->u.tk_wait.timer_list);
eb276c0e
TM
73 if (list_empty(&queue->timer_list.list))
74 del_timer(&queue->timer_list.timer);
36df9aae
TM
75}
76
77static void
78rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
79{
80 queue->timer_list.expires = expires;
81 mod_timer(&queue->timer_list.timer, expires);
1da177e4
LT
82}
83
1da177e4
LT
84/*
85 * Set up a timer for the current task.
86 */
5d00837b 87static void
eb276c0e 88__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4
LT
89{
90 if (!task->tk_timeout)
91 return;
92
55cc1d78
NMG
93 dprintk("RPC: %5u setting alarm for %u ms\n",
94 task->tk_pid, jiffies_to_msecs(task->tk_timeout));
1da177e4 95
eb276c0e
TM
96 task->u.tk_wait.expires = jiffies + task->tk_timeout;
97 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
98 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
99 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
1da177e4
LT
100}
101
edd2e36f
TM
102static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
103{
104 struct list_head *q = &queue->tasks[queue->priority];
105 struct rpc_task *task;
106
107 if (!list_empty(q)) {
108 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
109 if (task->tk_owner == queue->owner)
110 list_move_tail(&task->u.tk_wait.list, q);
111 }
112}
113
c05eecf6
TM
114static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
115{
edd2e36f
TM
116 if (queue->priority != priority) {
117 /* Fairness: rotate the list when changing priority */
118 rpc_rotate_queue_owner(queue);
119 queue->priority = priority;
120 }
c05eecf6
TM
121}
122
123static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
124{
125 queue->owner = pid;
126 queue->nr = RPC_BATCH_COUNT;
127}
128
129static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
130{
131 rpc_set_waitqueue_priority(queue, queue->maxpriority);
132 rpc_set_waitqueue_owner(queue, 0);
133}
134
1da177e4
LT
135/*
136 * Add new request to a priority queue.
137 */
3b27bad7
TM
138static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
139 struct rpc_task *task,
140 unsigned char queue_priority)
1da177e4
LT
141{
142 struct list_head *q;
143 struct rpc_task *t;
144
145 INIT_LIST_HEAD(&task->u.tk_wait.links);
3b27bad7 146 if (unlikely(queue_priority > queue->maxpriority))
c05eecf6
TM
147 queue_priority = queue->maxpriority;
148 if (queue_priority > queue->priority)
149 rpc_set_waitqueue_priority(queue, queue_priority);
150 q = &queue->tasks[queue_priority];
1da177e4 151 list_for_each_entry(t, q, u.tk_wait.list) {
3ff7576d 152 if (t->tk_owner == task->tk_owner) {
1da177e4
LT
153 list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
154 return;
155 }
156 }
157 list_add_tail(&task->u.tk_wait.list, q);
158}
159
160/*
161 * Add new request to wait queue.
162 *
163 * Swapper tasks always get inserted at the head of the queue.
164 * This should avoid many nasty memory deadlocks and hopefully
165 * improve overall performance.
166 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
167 */
3b27bad7
TM
168static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
169 struct rpc_task *task,
170 unsigned char queue_priority)
1da177e4 171{
2bd4eef8
WAA
172 WARN_ON_ONCE(RPC_IS_QUEUED(task));
173 if (RPC_IS_QUEUED(task))
174 return;
1da177e4
LT
175
176 if (RPC_IS_PRIORITY(queue))
3b27bad7 177 __rpc_add_wait_queue_priority(queue, task, queue_priority);
1da177e4
LT
178 else if (RPC_IS_SWAPPER(task))
179 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
180 else
181 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
96ef13b2 182 task->tk_waitqueue = queue;
e19b63da 183 queue->qlen++;
1166fde6
TM
184 /* barrier matches the read in rpc_wake_up_task_queue_locked() */
185 smp_wmb();
1da177e4
LT
186 rpc_set_queued(task);
187
46121cf7
CL
188 dprintk("RPC: %5u added to queue %p \"%s\"\n",
189 task->tk_pid, queue, rpc_qname(queue));
1da177e4
LT
190}
191
192/*
193 * Remove request from a priority queue.
194 */
195static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
196{
197 struct rpc_task *t;
198
199 if (!list_empty(&task->u.tk_wait.links)) {
200 t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
201 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
202 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
203 }
1da177e4
LT
204}
205
206/*
207 * Remove request from queue.
208 * Note: must be called with spin lock held.
209 */
96ef13b2 210static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 211{
eb276c0e 212 __rpc_disable_timer(queue, task);
1da177e4
LT
213 if (RPC_IS_PRIORITY(queue))
214 __rpc_remove_wait_queue_priority(task);
36df9aae 215 list_del(&task->u.tk_wait.list);
e19b63da 216 queue->qlen--;
46121cf7
CL
217 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
218 task->tk_pid, queue, rpc_qname(queue));
1da177e4
LT
219}
220
3ff7576d 221static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
1da177e4
LT
222{
223 int i;
224
225 spin_lock_init(&queue->lock);
226 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
227 INIT_LIST_HEAD(&queue->tasks[i]);
3ff7576d 228 queue->maxpriority = nr_queues - 1;
1da177e4 229 rpc_reset_waitqueue_priority(queue);
36df9aae
TM
230 queue->qlen = 0;
231 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
232 INIT_LIST_HEAD(&queue->timer_list.list);
2f09c242 233 rpc_assign_waitqueue_name(queue, qname);
1da177e4
LT
234}
235
236void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
237{
3ff7576d 238 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
1da177e4 239}
689cf5c1 240EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
1da177e4
LT
241
242void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
243{
3ff7576d 244 __rpc_init_priority_wait_queue(queue, qname, 1);
1da177e4 245}
e8914c65 246EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
1da177e4 247
f6a1cc89
TM
248void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
249{
36df9aae 250 del_timer_sync(&queue->timer_list.timer);
f6a1cc89
TM
251}
252EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
253
dfd01f02 254static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
44c28873 255{
416ad3c9 256 freezable_schedule_unsafe();
dfd01f02
PZ
257 if (signal_pending_state(mode, current))
258 return -ERESTARTSYS;
44c28873
TM
259 return 0;
260}
261
1306729b 262#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
c44fe705
TM
263static void rpc_task_set_debuginfo(struct rpc_task *task)
264{
265 static atomic_t rpc_pid;
266
c44fe705
TM
267 task->tk_pid = atomic_inc_return(&rpc_pid);
268}
269#else
270static inline void rpc_task_set_debuginfo(struct rpc_task *task)
271{
272}
273#endif
274
e6b3c4db
TM
275static void rpc_set_active(struct rpc_task *task)
276{
82b0a4c3
TM
277 trace_rpc_task_begin(task->tk_client, task, NULL);
278
c44fe705 279 rpc_task_set_debuginfo(task);
58f9612c 280 set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
e6b3c4db
TM
281}
282
44c28873
TM
283/*
284 * Mark an RPC call as having completed by clearing the 'active' bit
bf294b41 285 * and then waking up all tasks that were sleeping.
44c28873 286 */
bf294b41 287static int rpc_complete_task(struct rpc_task *task)
44c28873 288{
bf294b41
TM
289 void *m = &task->tk_runstate;
290 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
291 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
292 unsigned long flags;
293 int ret;
294
82b0a4c3
TM
295 trace_rpc_task_complete(task->tk_client, task, NULL);
296
bf294b41 297 spin_lock_irqsave(&wq->lock, flags);
e6b3c4db 298 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
bf294b41
TM
299 ret = atomic_dec_and_test(&task->tk_count);
300 if (waitqueue_active(wq))
ac5be6b4 301 __wake_up_locked_key(wq, TASK_NORMAL, &k);
bf294b41
TM
302 spin_unlock_irqrestore(&wq->lock, flags);
303 return ret;
44c28873
TM
304}
305
306/*
307 * Allow callers to wait for completion of an RPC call
bf294b41
TM
308 *
309 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
310 * to enforce taking of the wq->lock and hence avoid races with
311 * rpc_complete_task().
44c28873 312 */
c1221321 313int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
44c28873
TM
314{
315 if (action == NULL)
150030b7 316 action = rpc_wait_bit_killable;
bf294b41 317 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
150030b7 318 action, TASK_KILLABLE);
44c28873 319}
e8914c65 320EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
44c28873 321
1da177e4
LT
322/*
323 * Make an RPC task runnable.
324 *
506026c3
JL
325 * Note: If the task is ASYNC, and is being made runnable after sitting on an
326 * rpc_wait_queue, this must be called with the queue spinlock held to protect
327 * the wait queue operation.
a3c3cac5
TM
328 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
329 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
330 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
331 * the RPC_TASK_RUNNING flag.
1da177e4 332 */
f1dc237c
TM
333static void rpc_make_runnable(struct workqueue_struct *wq,
334 struct rpc_task *task)
1da177e4 335{
a3c3cac5
TM
336 bool need_wakeup = !rpc_test_and_set_running(task);
337
1da177e4 338 rpc_clear_queued(task);
a3c3cac5 339 if (!need_wakeup)
cc4dc59e 340 return;
1da177e4 341 if (RPC_IS_ASYNC(task)) {
65f27f38 342 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
f1dc237c 343 queue_work(wq, &task->u.tk_work);
1da177e4 344 } else
96651ab3 345 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
1da177e4
LT
346}
347
1da177e4
LT
348/*
349 * Prepare for sleeping on a wait queue.
350 * By always appending tasks to the list we ensure FIFO behavior.
351 * NB: An RPC task will only receive interrupt-driven events as long
352 * as it's on a wait queue.
353 */
3b27bad7
TM
354static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
355 struct rpc_task *task,
356 rpc_action action,
357 unsigned char queue_priority)
1da177e4 358{
46121cf7
CL
359 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
360 task->tk_pid, rpc_qname(q), jiffies);
1da177e4 361
82b0a4c3
TM
362 trace_rpc_task_sleep(task->tk_client, task, q);
363
3b27bad7 364 __rpc_add_wait_queue(q, task, queue_priority);
1da177e4 365
f50ad428 366 WARN_ON_ONCE(task->tk_callback != NULL);
1da177e4 367 task->tk_callback = action;
eb276c0e 368 __rpc_add_timer(q, task);
1da177e4
LT
369}
370
371void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
5d00837b 372 rpc_action action)
1da177e4 373{
58f9612c 374 /* We shouldn't ever put an inactive task to sleep */
e454a7a8
WAA
375 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
376 if (!RPC_IS_ACTIVATED(task)) {
377 task->tk_status = -EIO;
378 rpc_put_task_async(task);
379 return;
380 }
e6b3c4db 381
1da177e4
LT
382 /*
383 * Protect the queue operations.
384 */
385 spin_lock_bh(&q->lock);
3b27bad7 386 __rpc_sleep_on_priority(q, task, action, task->tk_priority);
1da177e4
LT
387 spin_unlock_bh(&q->lock);
388}
e8914c65 389EXPORT_SYMBOL_GPL(rpc_sleep_on);
1da177e4 390
3b27bad7
TM
391void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
392 rpc_action action, int priority)
393{
394 /* We shouldn't ever put an inactive task to sleep */
e454a7a8
WAA
395 WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
396 if (!RPC_IS_ACTIVATED(task)) {
397 task->tk_status = -EIO;
398 rpc_put_task_async(task);
399 return;
400 }
3b27bad7
TM
401
402 /*
403 * Protect the queue operations.
404 */
405 spin_lock_bh(&q->lock);
406 __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
407 spin_unlock_bh(&q->lock);
408}
1e1093c7 409EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
3b27bad7 410
1da177e4 411/**
f1dc237c
TM
412 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
413 * @wq: workqueue on which to run task
96ef13b2 414 * @queue: wait queue
1da177e4
LT
415 * @task: task to be woken up
416 *
417 * Caller must hold queue->lock, and have cleared the task queued flag.
418 */
f1dc237c
TM
419static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
420 struct rpc_wait_queue *queue,
421 struct rpc_task *task)
1da177e4 422{
46121cf7
CL
423 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
424 task->tk_pid, jiffies);
1da177e4 425
1da177e4
LT
426 /* Has the task been executed yet? If not, we cannot wake it up! */
427 if (!RPC_IS_ACTIVATED(task)) {
428 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
429 return;
430 }
431
82b0a4c3
TM
432 trace_rpc_task_wakeup(task->tk_client, task, queue);
433
96ef13b2 434 __rpc_remove_wait_queue(queue, task);
1da177e4 435
f1dc237c 436 rpc_make_runnable(wq, task);
1da177e4 437
46121cf7 438 dprintk("RPC: __rpc_wake_up_task done\n");
1da177e4
LT
439}
440
441/*
96ef13b2 442 * Wake up a queued task while the queue lock is being held
1da177e4 443 */
f1dc237c
TM
444static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
445 struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 446{
1166fde6
TM
447 if (RPC_IS_QUEUED(task)) {
448 smp_rmb();
449 if (task->tk_waitqueue == queue)
f1dc237c 450 __rpc_do_wake_up_task_on_wq(wq, queue, task);
1166fde6 451 }
1da177e4
LT
452}
453
f1dc237c
TM
454/*
455 * Wake up a queued task while the queue lock is being held
456 */
457static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
458{
459 rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
460}
461
1da177e4 462/*
96ef13b2 463 * Wake up a task on a specific queue
1da177e4 464 */
96ef13b2 465void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
1da177e4 466{
5e4424af 467 spin_lock_bh(&queue->lock);
96ef13b2 468 rpc_wake_up_task_queue_locked(queue, task);
5e4424af 469 spin_unlock_bh(&queue->lock);
1da177e4 470}
96ef13b2
TM
471EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
472
1da177e4
LT
473/*
474 * Wake up the next task on a priority queue.
475 */
961a828d 476static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
1da177e4
LT
477{
478 struct list_head *q;
479 struct rpc_task *task;
480
481 /*
3ff7576d 482 * Service a batch of tasks from a single owner.
1da177e4
LT
483 */
484 q = &queue->tasks[queue->priority];
485 if (!list_empty(q)) {
486 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
3ff7576d 487 if (queue->owner == task->tk_owner) {
1da177e4
LT
488 if (--queue->nr)
489 goto out;
490 list_move_tail(&task->u.tk_wait.list, q);
491 }
492 /*
493 * Check if we need to switch queues.
494 */
c05eecf6 495 goto new_owner;
1da177e4
LT
496 }
497
498 /*
499 * Service the next queue.
500 */
501 do {
502 if (q == &queue->tasks[0])
503 q = &queue->tasks[queue->maxpriority];
504 else
505 q = q - 1;
506 if (!list_empty(q)) {
507 task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
508 goto new_queue;
509 }
510 } while (q != &queue->tasks[queue->priority]);
511
512 rpc_reset_waitqueue_priority(queue);
513 return NULL;
514
515new_queue:
516 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
3ff7576d
TM
517new_owner:
518 rpc_set_waitqueue_owner(queue, task->tk_owner);
1da177e4 519out:
1da177e4
LT
520 return task;
521}
522
961a828d
TM
523static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
524{
525 if (RPC_IS_PRIORITY(queue))
526 return __rpc_find_next_queued_priority(queue);
527 if (!list_empty(&queue->tasks[0]))
528 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
529 return NULL;
530}
531
1da177e4 532/*
961a828d 533 * Wake up the first task on the wait queue.
1da177e4 534 */
f1dc237c
TM
535struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
536 struct rpc_wait_queue *queue,
961a828d 537 bool (*func)(struct rpc_task *, void *), void *data)
1da177e4
LT
538{
539 struct rpc_task *task = NULL;
540
961a828d 541 dprintk("RPC: wake_up_first(%p \"%s\")\n",
46121cf7 542 queue, rpc_qname(queue));
5e4424af 543 spin_lock_bh(&queue->lock);
961a828d
TM
544 task = __rpc_find_next_queued(queue);
545 if (task != NULL) {
546 if (func(task, data))
f1dc237c 547 rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
961a828d
TM
548 else
549 task = NULL;
1da177e4 550 }
5e4424af 551 spin_unlock_bh(&queue->lock);
1da177e4
LT
552
553 return task;
554}
f1dc237c
TM
555
556/*
557 * Wake up the first task on the wait queue.
558 */
559struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
560 bool (*func)(struct rpc_task *, void *), void *data)
561{
562 return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
563}
961a828d
TM
564EXPORT_SYMBOL_GPL(rpc_wake_up_first);
565
566static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
567{
568 return true;
569}
570
571/*
572 * Wake up the next task on the wait queue.
573*/
574struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
575{
576 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
577}
e8914c65 578EXPORT_SYMBOL_GPL(rpc_wake_up_next);
1da177e4
LT
579
580/**
581 * rpc_wake_up - wake up all rpc_tasks
582 * @queue: rpc_wait_queue on which the tasks are sleeping
583 *
584 * Grabs queue->lock
585 */
586void rpc_wake_up(struct rpc_wait_queue *queue)
587{
1da177e4 588 struct list_head *head;
e6d83d55 589
5e4424af 590 spin_lock_bh(&queue->lock);
1da177e4
LT
591 head = &queue->tasks[queue->maxpriority];
592 for (;;) {
540a0f75
TM
593 while (!list_empty(head)) {
594 struct rpc_task *task;
595 task = list_first_entry(head,
596 struct rpc_task,
597 u.tk_wait.list);
96ef13b2 598 rpc_wake_up_task_queue_locked(queue, task);
540a0f75 599 }
1da177e4
LT
600 if (head == &queue->tasks[0])
601 break;
602 head--;
603 }
5e4424af 604 spin_unlock_bh(&queue->lock);
1da177e4 605}
e8914c65 606EXPORT_SYMBOL_GPL(rpc_wake_up);
1da177e4
LT
607
608/**
609 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
610 * @queue: rpc_wait_queue on which the tasks are sleeping
611 * @status: status value to set
612 *
613 * Grabs queue->lock
614 */
615void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
616{
617 struct list_head *head;
1da177e4 618
5e4424af 619 spin_lock_bh(&queue->lock);
1da177e4
LT
620 head = &queue->tasks[queue->maxpriority];
621 for (;;) {
540a0f75
TM
622 while (!list_empty(head)) {
623 struct rpc_task *task;
624 task = list_first_entry(head,
625 struct rpc_task,
626 u.tk_wait.list);
1da177e4 627 task->tk_status = status;
96ef13b2 628 rpc_wake_up_task_queue_locked(queue, task);
1da177e4
LT
629 }
630 if (head == &queue->tasks[0])
631 break;
632 head--;
633 }
5e4424af 634 spin_unlock_bh(&queue->lock);
1da177e4 635}
e8914c65 636EXPORT_SYMBOL_GPL(rpc_wake_up_status);
1da177e4 637
36df9aae
TM
638static void __rpc_queue_timer_fn(unsigned long ptr)
639{
640 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
641 struct rpc_task *task, *n;
642 unsigned long expires, now, timeo;
643
644 spin_lock(&queue->lock);
645 expires = now = jiffies;
646 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
647 timeo = task->u.tk_wait.expires;
648 if (time_after_eq(now, timeo)) {
36df9aae
TM
649 dprintk("RPC: %5u timeout\n", task->tk_pid);
650 task->tk_status = -ETIMEDOUT;
651 rpc_wake_up_task_queue_locked(queue, task);
652 continue;
653 }
654 if (expires == now || time_after(expires, timeo))
655 expires = timeo;
656 }
657 if (!list_empty(&queue->timer_list.list))
658 rpc_set_queue_timer(queue, expires);
659 spin_unlock(&queue->lock);
660}
661
8014793b
TM
662static void __rpc_atrun(struct rpc_task *task)
663{
6bd14416
TM
664 if (task->tk_status == -ETIMEDOUT)
665 task->tk_status = 0;
8014793b
TM
666}
667
1da177e4
LT
668/*
669 * Run a task at a later time
670 */
8014793b 671void rpc_delay(struct rpc_task *task, unsigned long delay)
1da177e4
LT
672{
673 task->tk_timeout = delay;
5d00837b 674 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
1da177e4 675}
e8914c65 676EXPORT_SYMBOL_GPL(rpc_delay);
1da177e4 677
4ce70ada
TM
678/*
679 * Helper to call task->tk_ops->rpc_call_prepare
680 */
aae2006e 681void rpc_prepare_task(struct rpc_task *task)
4ce70ada
TM
682{
683 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
684}
685
7fdcf13b
TM
686static void
687rpc_init_task_statistics(struct rpc_task *task)
688{
689 /* Initialize retry counters */
690 task->tk_garb_retry = 2;
691 task->tk_cred_retry = 2;
692 task->tk_rebind_retry = 2;
693
694 /* starting timestamp */
695 task->tk_start = ktime_get();
696}
697
698static void
699rpc_reset_task_statistics(struct rpc_task *task)
700{
701 task->tk_timeouts = 0;
702 task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
703
704 rpc_init_task_statistics(task);
705}
706
d05fdb0c 707/*
963d8fe5 708 * Helper that calls task->tk_ops->rpc_call_done if it exists
d05fdb0c 709 */
abbcf28f 710void rpc_exit_task(struct rpc_task *task)
d05fdb0c 711{
abbcf28f 712 task->tk_action = NULL;
963d8fe5
TM
713 if (task->tk_ops->rpc_call_done != NULL) {
714 task->tk_ops->rpc_call_done(task, task->tk_calldata);
d05fdb0c 715 if (task->tk_action != NULL) {
abbcf28f
TM
716 WARN_ON(RPC_ASSASSINATED(task));
717 /* Always release the RPC slot and buffer memory */
718 xprt_release(task);
7fdcf13b 719 rpc_reset_task_statistics(task);
d05fdb0c
TM
720 }
721 }
d05fdb0c 722}
d9b6cd94
TM
723
724void rpc_exit(struct rpc_task *task, int status)
725{
726 task->tk_status = status;
727 task->tk_action = rpc_exit_task;
728 if (RPC_IS_QUEUED(task))
729 rpc_wake_up_queued_task(task->tk_waitqueue, task);
730}
731EXPORT_SYMBOL_GPL(rpc_exit);
d05fdb0c 732
bbd5a1f9
TM
733void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
734{
a86dc496 735 if (ops->rpc_release != NULL)
bbd5a1f9 736 ops->rpc_release(calldata);
bbd5a1f9
TM
737}
738
1da177e4
LT
739/*
740 * This is the RPC `scheduler' (or rather, the finite state machine).
741 */
2efef837 742static void __rpc_execute(struct rpc_task *task)
1da177e4 743{
eb9b55ab
TM
744 struct rpc_wait_queue *queue;
745 int task_is_async = RPC_IS_ASYNC(task);
746 int status = 0;
1da177e4 747
46121cf7
CL
748 dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
749 task->tk_pid, task->tk_flags);
1da177e4 750
2bd4eef8
WAA
751 WARN_ON_ONCE(RPC_IS_QUEUED(task));
752 if (RPC_IS_QUEUED(task))
753 return;
1da177e4 754
d05fdb0c 755 for (;;) {
b55c5989 756 void (*do_action)(struct rpc_task *);
1da177e4
LT
757
758 /*
b55c5989 759 * Execute any pending callback first.
1da177e4 760 */
b55c5989
TM
761 do_action = task->tk_callback;
762 task->tk_callback = NULL;
763 if (do_action == NULL) {
e020c680
TM
764 /*
765 * Perform the next FSM step.
b55c5989
TM
766 * tk_action may be NULL if the task has been killed.
767 * In particular, note that rpc_killall_tasks may
768 * do this at any time, so beware when dereferencing.
e020c680 769 */
b55c5989
TM
770 do_action = task->tk_action;
771 if (do_action == NULL)
1da177e4 772 break;
1da177e4 773 }
82b0a4c3 774 trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
b55c5989 775 do_action(task);
1da177e4
LT
776
777 /*
778 * Lockless check for whether task is sleeping or not.
779 */
780 if (!RPC_IS_QUEUED(task))
781 continue;
eb9b55ab
TM
782 /*
783 * The queue->lock protects against races with
784 * rpc_make_runnable().
785 *
786 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
787 * rpc_task, rpc_make_runnable() can assign it to a
788 * different workqueue. We therefore cannot assume that the
789 * rpc_task pointer may still be dereferenced.
790 */
791 queue = task->tk_waitqueue;
792 spin_lock_bh(&queue->lock);
793 if (!RPC_IS_QUEUED(task)) {
794 spin_unlock_bh(&queue->lock);
1da177e4
LT
795 continue;
796 }
eb9b55ab
TM
797 rpc_clear_running(task);
798 spin_unlock_bh(&queue->lock);
799 if (task_is_async)
800 return;
1da177e4
LT
801
802 /* sync task: sleep here */
46121cf7 803 dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
96651ab3 804 status = out_of_line_wait_on_bit(&task->tk_runstate,
150030b7
MW
805 RPC_TASK_QUEUED, rpc_wait_bit_killable,
806 TASK_KILLABLE);
96651ab3 807 if (status == -ERESTARTSYS) {
1da177e4
LT
808 /*
809 * When a sync task receives a signal, it exits with
810 * -ERESTARTSYS. In order to catch any callbacks that
811 * clean up after sleeping on some queue, we don't
812 * break the loop here, but go around once more.
813 */
46121cf7 814 dprintk("RPC: %5u got signal\n", task->tk_pid);
96651ab3
TM
815 task->tk_flags |= RPC_TASK_KILLED;
816 rpc_exit(task, -ERESTARTSYS);
1da177e4 817 }
46121cf7 818 dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
1da177e4
LT
819 }
820
46121cf7
CL
821 dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
822 task->tk_status);
1da177e4
LT
823 /* Release all resources associated with the task */
824 rpc_release_task(task);
1da177e4
LT
825}
826
827/*
828 * User-visible entry point to the scheduler.
829 *
830 * This may be called recursively if e.g. an async NFS task updates
831 * the attributes and finds that dirty pages must be flushed.
832 * NOTE: Upon exit of this function the task is guaranteed to be
833 * released. In particular note that tk_release() will have
834 * been called, so your task memory may have been freed.
835 */
2efef837 836void rpc_execute(struct rpc_task *task)
1da177e4 837{
a76580fb
TM
838 bool is_async = RPC_IS_ASYNC(task);
839
44c28873 840 rpc_set_active(task);
f1dc237c 841 rpc_make_runnable(rpciod_workqueue, task);
a76580fb 842 if (!is_async)
d6a1ed08 843 __rpc_execute(task);
1da177e4
LT
844}
845
65f27f38 846static void rpc_async_schedule(struct work_struct *work)
1da177e4 847{
65f27f38 848 __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
1da177e4
LT
849}
850
02107148
CL
851/**
852 * rpc_malloc - allocate an RPC buffer
853 * @task: RPC task that will use this buffer
854 * @size: requested byte size
1da177e4 855 *
c5a4dd8b 856 * To prevent rpciod from hanging, this allocator never sleeps,
c6c8fe79
DR
857 * returning NULL and suppressing warning if the request cannot be serviced
858 * immediately.
c5a4dd8b
CL
859 * The caller can arrange to sleep in a way that is safe for rpciod.
860 *
861 * Most requests are 'small' (under 2KiB) and can be serviced from a
862 * mempool, ensuring that NFS reads and writes can always proceed,
863 * and that there is good locality of reference for these buffers.
864 *
1da177e4 865 * In order to avoid memory starvation triggering more writebacks of
c5a4dd8b 866 * NFS requests, we avoid using GFP_KERNEL.
1da177e4 867 */
c5a4dd8b 868void *rpc_malloc(struct rpc_task *task, size_t size)
1da177e4 869{
aa3d1fae 870 struct rpc_buffer *buf;
c4a7ca77 871 gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
a564b8f0
MG
872
873 if (RPC_IS_SWAPPER(task))
c4a7ca77 874 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1da177e4 875
aa3d1fae 876 size += sizeof(struct rpc_buffer);
c5a4dd8b
CL
877 if (size <= RPC_BUFFER_MAXSIZE)
878 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1da177e4 879 else
c5a4dd8b 880 buf = kmalloc(size, gfp);
ddce40df
PZ
881
882 if (!buf)
883 return NULL;
884
aa3d1fae 885 buf->len = size;
215d0678 886 dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
c5a4dd8b 887 task->tk_pid, size, buf);
aa3d1fae 888 return &buf->data;
1da177e4 889}
12444809 890EXPORT_SYMBOL_GPL(rpc_malloc);
1da177e4 891
02107148
CL
892/**
893 * rpc_free - free buffer allocated via rpc_malloc
c5a4dd8b 894 * @buffer: buffer to free
02107148
CL
895 *
896 */
c5a4dd8b 897void rpc_free(void *buffer)
1da177e4 898{
aa3d1fae
CL
899 size_t size;
900 struct rpc_buffer *buf;
02107148 901
c5a4dd8b
CL
902 if (!buffer)
903 return;
aa3d1fae
CL
904
905 buf = container_of(buffer, struct rpc_buffer, data);
906 size = buf->len;
c5a4dd8b 907
215d0678 908 dprintk("RPC: freeing buffer of size %zu at %p\n",
c5a4dd8b 909 size, buf);
aa3d1fae 910
c5a4dd8b
CL
911 if (size <= RPC_BUFFER_MAXSIZE)
912 mempool_free(buf, rpc_buffer_mempool);
913 else
914 kfree(buf);
1da177e4 915}
12444809 916EXPORT_SYMBOL_GPL(rpc_free);
1da177e4
LT
917
918/*
919 * Creation and deletion of RPC task structures
920 */
47fe0648 921static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1da177e4
LT
922{
923 memset(task, 0, sizeof(*task));
44c28873 924 atomic_set(&task->tk_count, 1);
84115e1c
TM
925 task->tk_flags = task_setup_data->flags;
926 task->tk_ops = task_setup_data->callback_ops;
927 task->tk_calldata = task_setup_data->callback_data;
6529eba0 928 INIT_LIST_HEAD(&task->tk_task);
1da177e4 929
3ff7576d
TM
930 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
931 task->tk_owner = current->tgid;
1da177e4
LT
932
933 /* Initialize workqueue for async tasks */
32bfb5c0 934 task->tk_workqueue = task_setup_data->workqueue;
1da177e4 935
9d61498d
TM
936 task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);
937
84115e1c
TM
938 if (task->tk_ops->rpc_call_prepare != NULL)
939 task->tk_action = rpc_prepare_task;
963d8fe5 940
7fdcf13b 941 rpc_init_task_statistics(task);
ef759a2e 942
46121cf7 943 dprintk("RPC: new task initialized, procpid %u\n",
ba25f9dc 944 task_pid_nr(current));
1da177e4
LT
945}
946
947static struct rpc_task *
948rpc_alloc_task(void)
949{
a564b8f0 950 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
1da177e4
LT
951}
952
1da177e4 953/*
90c5755f 954 * Create a new task for the specified client.
1da177e4 955 */
84115e1c 956struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1da177e4 957{
e8f5d77c
TM
958 struct rpc_task *task = setup_data->task;
959 unsigned short flags = 0;
960
961 if (task == NULL) {
962 task = rpc_alloc_task();
19445b99
TM
963 if (task == NULL) {
964 rpc_release_calldata(setup_data->callback_ops,
965 setup_data->callback_data);
966 return ERR_PTR(-ENOMEM);
967 }
e8f5d77c
TM
968 flags = RPC_TASK_DYNAMIC;
969 }
1da177e4 970
84115e1c 971 rpc_init_task(task, setup_data);
e8f5d77c 972 task->tk_flags |= flags;
46121cf7 973 dprintk("RPC: allocated task %p\n", task);
1da177e4 974 return task;
1da177e4
LT
975}
976
c6567ed1
TM
977/*
978 * rpc_free_task - release rpc task and perform cleanups
979 *
980 * Note that we free up the rpc_task _after_ rpc_release_calldata()
981 * in order to work around a workqueue dependency issue.
982 *
983 * Tejun Heo states:
984 * "Workqueue currently considers two work items to be the same if they're
985 * on the same address and won't execute them concurrently - ie. it
986 * makes a work item which is queued again while being executed wait
987 * for the previous execution to complete.
988 *
989 * If a work function frees the work item, and then waits for an event
990 * which should be performed by another work item and *that* work item
991 * recycles the freed work item, it can create a false dependency loop.
992 * There really is no reliable way to detect this short of verifying
993 * every memory free."
994 *
995 */
32bfb5c0 996static void rpc_free_task(struct rpc_task *task)
1da177e4 997{
c6567ed1
TM
998 unsigned short tk_flags = task->tk_flags;
999
1000 rpc_release_calldata(task->tk_ops, task->tk_calldata);
1da177e4 1001
c6567ed1 1002 if (tk_flags & RPC_TASK_DYNAMIC) {
5e4424af
TM
1003 dprintk("RPC: %5u freeing task\n", task->tk_pid);
1004 mempool_free(task, rpc_task_mempool);
1005 }
32bfb5c0
TM
1006}
1007
1008static void rpc_async_release(struct work_struct *work)
1009{
1010 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1011}
1012
bf294b41 1013static void rpc_release_resources_task(struct rpc_task *task)
32bfb5c0 1014{
87ed5003 1015 xprt_release(task);
a271c5a0 1016 if (task->tk_msg.rpc_cred) {
a17c2153 1017 put_rpccred(task->tk_msg.rpc_cred);
a271c5a0
OH
1018 task->tk_msg.rpc_cred = NULL;
1019 }
58f9612c 1020 rpc_task_release_client(task);
bf294b41
TM
1021}
1022
1023static void rpc_final_put_task(struct rpc_task *task,
1024 struct workqueue_struct *q)
1025{
1026 if (q != NULL) {
32bfb5c0 1027 INIT_WORK(&task->u.tk_work, rpc_async_release);
bf294b41 1028 queue_work(q, &task->u.tk_work);
32bfb5c0
TM
1029 } else
1030 rpc_free_task(task);
e6b3c4db 1031}
bf294b41
TM
1032
1033static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1034{
1035 if (atomic_dec_and_test(&task->tk_count)) {
1036 rpc_release_resources_task(task);
1037 rpc_final_put_task(task, q);
1038 }
1039}
1040
1041void rpc_put_task(struct rpc_task *task)
1042{
1043 rpc_do_put_task(task, NULL);
1044}
e8914c65 1045EXPORT_SYMBOL_GPL(rpc_put_task);
e6b3c4db 1046
bf294b41
TM
1047void rpc_put_task_async(struct rpc_task *task)
1048{
1049 rpc_do_put_task(task, task->tk_workqueue);
1050}
1051EXPORT_SYMBOL_GPL(rpc_put_task_async);
1052
bde8f00c 1053static void rpc_release_task(struct rpc_task *task)
e6b3c4db 1054{
46121cf7 1055 dprintk("RPC: %5u release task\n", task->tk_pid);
1da177e4 1056
0a0c2a57 1057 WARN_ON_ONCE(RPC_IS_QUEUED(task));
1da177e4 1058
bf294b41 1059 rpc_release_resources_task(task);
e6b3c4db 1060
bf294b41
TM
1061 /*
1062 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1063 * so it should be safe to use task->tk_count as a test for whether
1064 * or not any other processes still hold references to our rpc_task.
1065 */
1066 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1067 /* Wake up anyone who may be waiting for task completion */
1068 if (!rpc_complete_task(task))
1069 return;
1070 } else {
1071 if (!atomic_dec_and_test(&task->tk_count))
1072 return;
1073 }
1074 rpc_final_put_task(task, task->tk_workqueue);
1da177e4
LT
1075}
1076
b247bbf1
TM
1077int rpciod_up(void)
1078{
1079 return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1080}
1081
1082void rpciod_down(void)
1083{
1084 module_put(THIS_MODULE);
1085}
1086
1da177e4 1087/*
b247bbf1 1088 * Start up the rpciod workqueue.
1da177e4 1089 */
b247bbf1 1090static int rpciod_start(void)
1da177e4
LT
1091{
1092 struct workqueue_struct *wq;
ab418d70 1093
1da177e4
LT
1094 /*
1095 * Create the rpciod thread and wait for it to start.
1096 */
ab418d70 1097 dprintk("RPC: creating workqueue rpciod\n");
40a5f1b1
TM
1098 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
1099 if (!wq)
1100 goto out_failed;
1da177e4 1101 rpciod_workqueue = wq;
40a5f1b1
TM
1102 /* Note: highpri because network receive is latency sensitive */
1103 wq = alloc_workqueue("xprtiod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1104 if (!wq)
1105 goto free_rpciod;
1106 xprtiod_workqueue = wq;
1107 return 1;
1108free_rpciod:
1109 wq = rpciod_workqueue;
1110 rpciod_workqueue = NULL;
1111 destroy_workqueue(wq);
1112out_failed:
1113 return 0;
1da177e4
LT
1114}
1115
b247bbf1 1116static void rpciod_stop(void)
1da177e4 1117{
b247bbf1 1118 struct workqueue_struct *wq = NULL;
ab418d70 1119
b247bbf1
TM
1120 if (rpciod_workqueue == NULL)
1121 return;
ab418d70 1122 dprintk("RPC: destroying workqueue rpciod\n");
1da177e4 1123
b247bbf1
TM
1124 wq = rpciod_workqueue;
1125 rpciod_workqueue = NULL;
1126 destroy_workqueue(wq);
40a5f1b1
TM
1127 wq = xprtiod_workqueue;
1128 xprtiod_workqueue = NULL;
1129 destroy_workqueue(wq);
1da177e4
LT
1130}
1131
1da177e4
LT
1132void
1133rpc_destroy_mempool(void)
1134{
b247bbf1 1135 rpciod_stop();
17a9618e
JL
1136 mempool_destroy(rpc_buffer_mempool);
1137 mempool_destroy(rpc_task_mempool);
1138 kmem_cache_destroy(rpc_task_slabp);
1139 kmem_cache_destroy(rpc_buffer_slabp);
f6a1cc89 1140 rpc_destroy_wait_queue(&delay_queue);
1da177e4
LT
1141}
1142
1143int
1144rpc_init_mempool(void)
1145{
f6a1cc89
TM
1146 /*
1147 * The following is not strictly a mempool initialisation,
1148 * but there is no harm in doing it here
1149 */
1150 rpc_init_wait_queue(&delay_queue, "delayq");
1151 if (!rpciod_start())
1152 goto err_nomem;
1153
1da177e4
LT
1154 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1155 sizeof(struct rpc_task),
1156 0, SLAB_HWCACHE_ALIGN,
20c2df83 1157 NULL);
1da177e4
LT
1158 if (!rpc_task_slabp)
1159 goto err_nomem;
1160 rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1161 RPC_BUFFER_MAXSIZE,
1162 0, SLAB_HWCACHE_ALIGN,
20c2df83 1163 NULL);
1da177e4
LT
1164 if (!rpc_buffer_slabp)
1165 goto err_nomem;
93d2341c
MD
1166 rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1167 rpc_task_slabp);
1da177e4
LT
1168 if (!rpc_task_mempool)
1169 goto err_nomem;
93d2341c
MD
1170 rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1171 rpc_buffer_slabp);
1da177e4
LT
1172 if (!rpc_buffer_mempool)
1173 goto err_nomem;
1174 return 0;
1175err_nomem:
1176 rpc_destroy_mempool();
1177 return -ENOMEM;
1178}