Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/net/sunrpc/sched.c | |
3 | * | |
4 | * Scheduling for synchronous and asynchronous RPC requests. | |
5 | * | |
6 | * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> | |
7 | * | |
8 | * TCP NFS related read + write fixes | |
9 | * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | ||
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/mempool.h> | |
18 | #include <linux/smp.h> | |
19 | #include <linux/smp_lock.h> | |
20 | #include <linux/spinlock.h> | |
4a3e2f71 | 21 | #include <linux/mutex.h> |
1da177e4 LT |
22 | |
23 | #include <linux/sunrpc/clnt.h> | |
1da177e4 LT |
24 | |
25 | #ifdef RPC_DEBUG | |
26 | #define RPCDBG_FACILITY RPCDBG_SCHED | |
27 | #define RPC_TASK_MAGIC_ID 0xf00baa | |
28 | static int rpc_task_id; | |
29 | #endif | |
30 | ||
31 | /* | |
32 | * RPC slabs and memory pools | |
33 | */ | |
34 | #define RPC_BUFFER_MAXSIZE (2048) | |
35 | #define RPC_BUFFER_POOLSIZE (8) | |
36 | #define RPC_TASK_POOLSIZE (8) | |
ba89966c ED |
37 | static kmem_cache_t *rpc_task_slabp __read_mostly; |
38 | static kmem_cache_t *rpc_buffer_slabp __read_mostly; | |
39 | static mempool_t *rpc_task_mempool __read_mostly; | |
40 | static mempool_t *rpc_buffer_mempool __read_mostly; | |
1da177e4 LT |
41 | |
42 | static void __rpc_default_timer(struct rpc_task *task); | |
43 | static void rpciod_killall(void); | |
1da177e4 LT |
44 | static void rpc_async_schedule(void *); |
45 | ||
1da177e4 LT |
46 | /* |
47 | * RPC tasks sit here while waiting for conditions to improve. | |
48 | */ | |
49 | static RPC_WAITQ(delay_queue, "delayq"); | |
50 | ||
51 | /* | |
52 | * All RPC tasks are linked into this list | |
53 | */ | |
54 | static LIST_HEAD(all_tasks); | |
55 | ||
56 | /* | |
57 | * rpciod-related stuff | |
58 | */ | |
4a3e2f71 | 59 | static DEFINE_MUTEX(rpciod_mutex); |
1da177e4 | 60 | static unsigned int rpciod_users; |
24c5d9d7 | 61 | struct workqueue_struct *rpciod_workqueue; |
1da177e4 LT |
62 | |
63 | /* | |
64 | * Spinlock for other critical sections of code. | |
65 | */ | |
66 | static DEFINE_SPINLOCK(rpc_sched_lock); | |
67 | ||
68 | /* | |
69 | * Disable the timer for a given RPC task. Should be called with | |
70 | * queue->lock and bh_disabled in order to avoid races within | |
71 | * rpc_run_timer(). | |
72 | */ | |
73 | static inline void | |
74 | __rpc_disable_timer(struct rpc_task *task) | |
75 | { | |
76 | dprintk("RPC: %4d disabling timer\n", task->tk_pid); | |
77 | task->tk_timeout_fn = NULL; | |
78 | task->tk_timeout = 0; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Run a timeout function. | |
83 | * We use the callback in order to allow __rpc_wake_up_task() | |
84 | * and friends to disable the timer synchronously on SMP systems | |
85 | * without calling del_timer_sync(). The latter could cause a | |
86 | * deadlock if called while we're holding spinlocks... | |
87 | */ | |
88 | static void rpc_run_timer(struct rpc_task *task) | |
89 | { | |
90 | void (*callback)(struct rpc_task *); | |
91 | ||
92 | callback = task->tk_timeout_fn; | |
93 | task->tk_timeout_fn = NULL; | |
94 | if (callback && RPC_IS_QUEUED(task)) { | |
95 | dprintk("RPC: %4d running timer\n", task->tk_pid); | |
96 | callback(task); | |
97 | } | |
98 | smp_mb__before_clear_bit(); | |
99 | clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | |
100 | smp_mb__after_clear_bit(); | |
101 | } | |
102 | ||
103 | /* | |
104 | * Set up a timer for the current task. | |
105 | */ | |
106 | static inline void | |
107 | __rpc_add_timer(struct rpc_task *task, rpc_action timer) | |
108 | { | |
109 | if (!task->tk_timeout) | |
110 | return; | |
111 | ||
112 | dprintk("RPC: %4d setting alarm for %lu ms\n", | |
113 | task->tk_pid, task->tk_timeout * 1000 / HZ); | |
114 | ||
115 | if (timer) | |
116 | task->tk_timeout_fn = timer; | |
117 | else | |
118 | task->tk_timeout_fn = __rpc_default_timer; | |
119 | set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate); | |
120 | mod_timer(&task->tk_timer, jiffies + task->tk_timeout); | |
121 | } | |
122 | ||
123 | /* | |
124 | * Delete any timer for the current task. Because we use del_timer_sync(), | |
125 | * this function should never be called while holding queue->lock. | |
126 | */ | |
127 | static void | |
128 | rpc_delete_timer(struct rpc_task *task) | |
129 | { | |
130 | if (RPC_IS_QUEUED(task)) | |
131 | return; | |
132 | if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) { | |
133 | del_singleshot_timer_sync(&task->tk_timer); | |
134 | dprintk("RPC: %4d deleting timer\n", task->tk_pid); | |
135 | } | |
136 | } | |
137 | ||
138 | /* | |
139 | * Add new request to a priority queue. | |
140 | */ | |
141 | static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task) | |
142 | { | |
143 | struct list_head *q; | |
144 | struct rpc_task *t; | |
145 | ||
146 | INIT_LIST_HEAD(&task->u.tk_wait.links); | |
147 | q = &queue->tasks[task->tk_priority]; | |
148 | if (unlikely(task->tk_priority > queue->maxpriority)) | |
149 | q = &queue->tasks[queue->maxpriority]; | |
150 | list_for_each_entry(t, q, u.tk_wait.list) { | |
151 | if (t->tk_cookie == task->tk_cookie) { | |
152 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); | |
153 | return; | |
154 | } | |
155 | } | |
156 | list_add_tail(&task->u.tk_wait.list, q); | |
157 | } | |
158 | ||
159 | /* | |
160 | * Add new request to wait queue. | |
161 | * | |
162 | * Swapper tasks always get inserted at the head of the queue. | |
163 | * This should avoid many nasty memory deadlocks and hopefully | |
164 | * improve overall performance. | |
165 | * Everyone else gets appended to the queue to ensure proper FIFO behavior. | |
166 | */ | |
167 | static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) | |
168 | { | |
169 | BUG_ON (RPC_IS_QUEUED(task)); | |
170 | ||
171 | if (RPC_IS_PRIORITY(queue)) | |
172 | __rpc_add_wait_queue_priority(queue, task); | |
173 | else if (RPC_IS_SWAPPER(task)) | |
174 | list_add(&task->u.tk_wait.list, &queue->tasks[0]); | |
175 | else | |
176 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | |
177 | task->u.tk_wait.rpc_waitq = queue; | |
e19b63da | 178 | queue->qlen++; |
1da177e4 LT |
179 | rpc_set_queued(task); |
180 | ||
181 | dprintk("RPC: %4d added to queue %p \"%s\"\n", | |
182 | task->tk_pid, queue, rpc_qname(queue)); | |
183 | } | |
184 | ||
185 | /* | |
186 | * Remove request from a priority queue. | |
187 | */ | |
188 | static void __rpc_remove_wait_queue_priority(struct rpc_task *task) | |
189 | { | |
190 | struct rpc_task *t; | |
191 | ||
192 | if (!list_empty(&task->u.tk_wait.links)) { | |
193 | t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list); | |
194 | list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); | |
195 | list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); | |
196 | } | |
197 | list_del(&task->u.tk_wait.list); | |
198 | } | |
199 | ||
200 | /* | |
201 | * Remove request from queue. | |
202 | * Note: must be called with spin lock held. | |
203 | */ | |
204 | static void __rpc_remove_wait_queue(struct rpc_task *task) | |
205 | { | |
206 | struct rpc_wait_queue *queue; | |
207 | queue = task->u.tk_wait.rpc_waitq; | |
208 | ||
209 | if (RPC_IS_PRIORITY(queue)) | |
210 | __rpc_remove_wait_queue_priority(task); | |
211 | else | |
212 | list_del(&task->u.tk_wait.list); | |
e19b63da | 213 | queue->qlen--; |
1da177e4 LT |
214 | dprintk("RPC: %4d removed from queue %p \"%s\"\n", |
215 | task->tk_pid, queue, rpc_qname(queue)); | |
216 | } | |
217 | ||
218 | static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) | |
219 | { | |
220 | queue->priority = priority; | |
221 | queue->count = 1 << (priority * 2); | |
222 | } | |
223 | ||
224 | static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie) | |
225 | { | |
226 | queue->cookie = cookie; | |
227 | queue->nr = RPC_BATCH_COUNT; | |
228 | } | |
229 | ||
230 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | |
231 | { | |
232 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | |
233 | rpc_set_waitqueue_cookie(queue, 0); | |
234 | } | |
235 | ||
236 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio) | |
237 | { | |
238 | int i; | |
239 | ||
240 | spin_lock_init(&queue->lock); | |
241 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | |
242 | INIT_LIST_HEAD(&queue->tasks[i]); | |
243 | queue->maxpriority = maxprio; | |
244 | rpc_reset_waitqueue_priority(queue); | |
245 | #ifdef RPC_DEBUG | |
246 | queue->name = qname; | |
247 | #endif | |
248 | } | |
249 | ||
250 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
251 | { | |
252 | __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH); | |
253 | } | |
254 | ||
255 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | |
256 | { | |
257 | __rpc_init_priority_wait_queue(queue, qname, 0); | |
258 | } | |
259 | EXPORT_SYMBOL(rpc_init_wait_queue); | |
260 | ||
44c28873 TM |
261 | static int rpc_wait_bit_interruptible(void *word) |
262 | { | |
263 | if (signal_pending(current)) | |
264 | return -ERESTARTSYS; | |
265 | schedule(); | |
266 | return 0; | |
267 | } | |
268 | ||
e6b3c4db TM |
269 | static void rpc_set_active(struct rpc_task *task) |
270 | { | |
271 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | |
272 | return; | |
273 | spin_lock(&rpc_sched_lock); | |
274 | #ifdef RPC_DEBUG | |
275 | task->tk_magic = RPC_TASK_MAGIC_ID; | |
276 | task->tk_pid = rpc_task_id++; | |
277 | #endif | |
278 | /* Add to global list of all tasks */ | |
279 | list_add_tail(&task->tk_task, &all_tasks); | |
280 | spin_unlock(&rpc_sched_lock); | |
281 | } | |
282 | ||
44c28873 TM |
283 | /* |
284 | * Mark an RPC call as having completed by clearing the 'active' bit | |
285 | */ | |
e6b3c4db | 286 | static void rpc_mark_complete_task(struct rpc_task *task) |
44c28873 | 287 | { |
e6b3c4db TM |
288 | smp_mb__before_clear_bit(); |
289 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | |
290 | smp_mb__after_clear_bit(); | |
44c28873 TM |
291 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
292 | } | |
293 | ||
294 | /* | |
295 | * Allow callers to wait for completion of an RPC call | |
296 | */ | |
297 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | |
298 | { | |
299 | if (action == NULL) | |
300 | action = rpc_wait_bit_interruptible; | |
301 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | |
302 | action, TASK_INTERRUPTIBLE); | |
303 | } | |
304 | EXPORT_SYMBOL(__rpc_wait_for_completion_task); | |
305 | ||
1da177e4 LT |
306 | /* |
307 | * Make an RPC task runnable. | |
308 | * | |
309 | * Note: If the task is ASYNC, this must be called with | |
310 | * the spinlock held to protect the wait queue operation. | |
311 | */ | |
312 | static void rpc_make_runnable(struct rpc_task *task) | |
313 | { | |
1da177e4 | 314 | BUG_ON(task->tk_timeout_fn); |
1da177e4 | 315 | rpc_clear_queued(task); |
cc4dc59e CS |
316 | if (rpc_test_and_set_running(task)) |
317 | return; | |
318 | /* We might have raced */ | |
319 | if (RPC_IS_QUEUED(task)) { | |
320 | rpc_clear_running(task); | |
1da177e4 | 321 | return; |
cc4dc59e | 322 | } |
1da177e4 LT |
323 | if (RPC_IS_ASYNC(task)) { |
324 | int status; | |
325 | ||
326 | INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); | |
327 | status = queue_work(task->tk_workqueue, &task->u.tk_work); | |
328 | if (status < 0) { | |
329 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | |
330 | task->tk_status = status; | |
331 | return; | |
332 | } | |
333 | } else | |
96651ab3 | 334 | wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED); |
1da177e4 LT |
335 | } |
336 | ||
1da177e4 LT |
337 | /* |
338 | * Prepare for sleeping on a wait queue. | |
339 | * By always appending tasks to the list we ensure FIFO behavior. | |
340 | * NB: An RPC task will only receive interrupt-driven events as long | |
341 | * as it's on a wait queue. | |
342 | */ | |
343 | static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
344 | rpc_action action, rpc_action timer) | |
345 | { | |
346 | dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid, | |
347 | rpc_qname(q), jiffies); | |
348 | ||
349 | if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { | |
350 | printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); | |
351 | return; | |
352 | } | |
353 | ||
1da177e4 LT |
354 | __rpc_add_wait_queue(q, task); |
355 | ||
356 | BUG_ON(task->tk_callback != NULL); | |
357 | task->tk_callback = action; | |
358 | __rpc_add_timer(task, timer); | |
359 | } | |
360 | ||
361 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |
362 | rpc_action action, rpc_action timer) | |
363 | { | |
e6b3c4db TM |
364 | /* Mark the task as being activated if so needed */ |
365 | rpc_set_active(task); | |
366 | ||
1da177e4 LT |
367 | /* |
368 | * Protect the queue operations. | |
369 | */ | |
370 | spin_lock_bh(&q->lock); | |
371 | __rpc_sleep_on(q, task, action, timer); | |
372 | spin_unlock_bh(&q->lock); | |
373 | } | |
374 | ||
375 | /** | |
376 | * __rpc_do_wake_up_task - wake up a single rpc_task | |
377 | * @task: task to be woken up | |
378 | * | |
379 | * Caller must hold queue->lock, and have cleared the task queued flag. | |
380 | */ | |
381 | static void __rpc_do_wake_up_task(struct rpc_task *task) | |
382 | { | |
383 | dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies); | |
384 | ||
385 | #ifdef RPC_DEBUG | |
386 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
387 | #endif | |
388 | /* Has the task been executed yet? If not, we cannot wake it up! */ | |
389 | if (!RPC_IS_ACTIVATED(task)) { | |
390 | printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); | |
391 | return; | |
392 | } | |
393 | ||
394 | __rpc_disable_timer(task); | |
395 | __rpc_remove_wait_queue(task); | |
396 | ||
397 | rpc_make_runnable(task); | |
398 | ||
399 | dprintk("RPC: __rpc_wake_up_task done\n"); | |
400 | } | |
401 | ||
402 | /* | |
403 | * Wake up the specified task | |
404 | */ | |
405 | static void __rpc_wake_up_task(struct rpc_task *task) | |
406 | { | |
407 | if (rpc_start_wakeup(task)) { | |
408 | if (RPC_IS_QUEUED(task)) | |
409 | __rpc_do_wake_up_task(task); | |
410 | rpc_finish_wakeup(task); | |
411 | } | |
412 | } | |
413 | ||
414 | /* | |
415 | * Default timeout handler if none specified by user | |
416 | */ | |
417 | static void | |
418 | __rpc_default_timer(struct rpc_task *task) | |
419 | { | |
420 | dprintk("RPC: %d timeout (default timer)\n", task->tk_pid); | |
421 | task->tk_status = -ETIMEDOUT; | |
422 | rpc_wake_up_task(task); | |
423 | } | |
424 | ||
425 | /* | |
426 | * Wake up the specified task | |
427 | */ | |
428 | void rpc_wake_up_task(struct rpc_task *task) | |
429 | { | |
430 | if (rpc_start_wakeup(task)) { | |
431 | if (RPC_IS_QUEUED(task)) { | |
432 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | |
433 | ||
434 | spin_lock_bh(&queue->lock); | |
435 | __rpc_do_wake_up_task(task); | |
436 | spin_unlock_bh(&queue->lock); | |
437 | } | |
438 | rpc_finish_wakeup(task); | |
439 | } | |
440 | } | |
441 | ||
442 | /* | |
443 | * Wake up the next task on a priority queue. | |
444 | */ | |
445 | static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) | |
446 | { | |
447 | struct list_head *q; | |
448 | struct rpc_task *task; | |
449 | ||
450 | /* | |
451 | * Service a batch of tasks from a single cookie. | |
452 | */ | |
453 | q = &queue->tasks[queue->priority]; | |
454 | if (!list_empty(q)) { | |
455 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
456 | if (queue->cookie == task->tk_cookie) { | |
457 | if (--queue->nr) | |
458 | goto out; | |
459 | list_move_tail(&task->u.tk_wait.list, q); | |
460 | } | |
461 | /* | |
462 | * Check if we need to switch queues. | |
463 | */ | |
464 | if (--queue->count) | |
465 | goto new_cookie; | |
466 | } | |
467 | ||
468 | /* | |
469 | * Service the next queue. | |
470 | */ | |
471 | do { | |
472 | if (q == &queue->tasks[0]) | |
473 | q = &queue->tasks[queue->maxpriority]; | |
474 | else | |
475 | q = q - 1; | |
476 | if (!list_empty(q)) { | |
477 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | |
478 | goto new_queue; | |
479 | } | |
480 | } while (q != &queue->tasks[queue->priority]); | |
481 | ||
482 | rpc_reset_waitqueue_priority(queue); | |
483 | return NULL; | |
484 | ||
485 | new_queue: | |
486 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | |
487 | new_cookie: | |
488 | rpc_set_waitqueue_cookie(queue, task->tk_cookie); | |
489 | out: | |
490 | __rpc_wake_up_task(task); | |
491 | return task; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Wake up the next task on the wait queue. | |
496 | */ | |
497 | struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |
498 | { | |
499 | struct rpc_task *task = NULL; | |
500 | ||
501 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); | |
502 | spin_lock_bh(&queue->lock); | |
503 | if (RPC_IS_PRIORITY(queue)) | |
504 | task = __rpc_wake_up_next_priority(queue); | |
505 | else { | |
506 | task_for_first(task, &queue->tasks[0]) | |
507 | __rpc_wake_up_task(task); | |
508 | } | |
509 | spin_unlock_bh(&queue->lock); | |
510 | ||
511 | return task; | |
512 | } | |
513 | ||
514 | /** | |
515 | * rpc_wake_up - wake up all rpc_tasks | |
516 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
517 | * | |
518 | * Grabs queue->lock | |
519 | */ | |
520 | void rpc_wake_up(struct rpc_wait_queue *queue) | |
521 | { | |
e6d83d55 | 522 | struct rpc_task *task, *next; |
1da177e4 | 523 | struct list_head *head; |
e6d83d55 | 524 | |
1da177e4 LT |
525 | spin_lock_bh(&queue->lock); |
526 | head = &queue->tasks[queue->maxpriority]; | |
527 | for (;;) { | |
e6d83d55 | 528 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
1da177e4 | 529 | __rpc_wake_up_task(task); |
1da177e4 LT |
530 | if (head == &queue->tasks[0]) |
531 | break; | |
532 | head--; | |
533 | } | |
534 | spin_unlock_bh(&queue->lock); | |
535 | } | |
536 | ||
537 | /** | |
538 | * rpc_wake_up_status - wake up all rpc_tasks and set their status value. | |
539 | * @queue: rpc_wait_queue on which the tasks are sleeping | |
540 | * @status: status value to set | |
541 | * | |
542 | * Grabs queue->lock | |
543 | */ | |
544 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |
545 | { | |
e6d83d55 | 546 | struct rpc_task *task, *next; |
1da177e4 | 547 | struct list_head *head; |
1da177e4 LT |
548 | |
549 | spin_lock_bh(&queue->lock); | |
550 | head = &queue->tasks[queue->maxpriority]; | |
551 | for (;;) { | |
e6d83d55 | 552 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
1da177e4 LT |
553 | task->tk_status = status; |
554 | __rpc_wake_up_task(task); | |
555 | } | |
556 | if (head == &queue->tasks[0]) | |
557 | break; | |
558 | head--; | |
559 | } | |
560 | spin_unlock_bh(&queue->lock); | |
561 | } | |
562 | ||
8014793b TM |
563 | static void __rpc_atrun(struct rpc_task *task) |
564 | { | |
565 | rpc_wake_up_task(task); | |
566 | } | |
567 | ||
1da177e4 LT |
568 | /* |
569 | * Run a task at a later time | |
570 | */ | |
8014793b | 571 | void rpc_delay(struct rpc_task *task, unsigned long delay) |
1da177e4 LT |
572 | { |
573 | task->tk_timeout = delay; | |
574 | rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); | |
575 | } | |
576 | ||
4ce70ada TM |
577 | /* |
578 | * Helper to call task->tk_ops->rpc_call_prepare | |
579 | */ | |
580 | static void rpc_prepare_task(struct rpc_task *task) | |
581 | { | |
582 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | |
583 | } | |
584 | ||
d05fdb0c | 585 | /* |
963d8fe5 | 586 | * Helper that calls task->tk_ops->rpc_call_done if it exists |
d05fdb0c | 587 | */ |
abbcf28f | 588 | void rpc_exit_task(struct rpc_task *task) |
d05fdb0c | 589 | { |
abbcf28f | 590 | task->tk_action = NULL; |
963d8fe5 TM |
591 | if (task->tk_ops->rpc_call_done != NULL) { |
592 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | |
d05fdb0c | 593 | if (task->tk_action != NULL) { |
abbcf28f TM |
594 | WARN_ON(RPC_ASSASSINATED(task)); |
595 | /* Always release the RPC slot and buffer memory */ | |
596 | xprt_release(task); | |
d05fdb0c TM |
597 | } |
598 | } | |
d05fdb0c | 599 | } |
abbcf28f | 600 | EXPORT_SYMBOL(rpc_exit_task); |
d05fdb0c | 601 | |
1da177e4 LT |
602 | /* |
603 | * This is the RPC `scheduler' (or rather, the finite state machine). | |
604 | */ | |
605 | static int __rpc_execute(struct rpc_task *task) | |
606 | { | |
607 | int status = 0; | |
608 | ||
609 | dprintk("RPC: %4d rpc_execute flgs %x\n", | |
610 | task->tk_pid, task->tk_flags); | |
611 | ||
612 | BUG_ON(RPC_IS_QUEUED(task)); | |
613 | ||
d05fdb0c | 614 | for (;;) { |
1da177e4 LT |
615 | /* |
616 | * Garbage collection of pending timers... | |
617 | */ | |
618 | rpc_delete_timer(task); | |
619 | ||
620 | /* | |
621 | * Execute any pending callback. | |
622 | */ | |
623 | if (RPC_DO_CALLBACK(task)) { | |
624 | /* Define a callback save pointer */ | |
625 | void (*save_callback)(struct rpc_task *); | |
626 | ||
627 | /* | |
628 | * If a callback exists, save it, reset it, | |
629 | * call it. | |
630 | * The save is needed to stop from resetting | |
631 | * another callback set within the callback handler | |
632 | * - Dave | |
633 | */ | |
634 | save_callback=task->tk_callback; | |
635 | task->tk_callback=NULL; | |
636 | lock_kernel(); | |
637 | save_callback(task); | |
638 | unlock_kernel(); | |
639 | } | |
640 | ||
641 | /* | |
642 | * Perform the next FSM step. | |
643 | * tk_action may be NULL when the task has been killed | |
644 | * by someone else. | |
645 | */ | |
646 | if (!RPC_IS_QUEUED(task)) { | |
abbcf28f | 647 | if (task->tk_action == NULL) |
1da177e4 | 648 | break; |
abbcf28f TM |
649 | lock_kernel(); |
650 | task->tk_action(task); | |
651 | unlock_kernel(); | |
1da177e4 LT |
652 | } |
653 | ||
654 | /* | |
655 | * Lockless check for whether task is sleeping or not. | |
656 | */ | |
657 | if (!RPC_IS_QUEUED(task)) | |
658 | continue; | |
659 | rpc_clear_running(task); | |
660 | if (RPC_IS_ASYNC(task)) { | |
661 | /* Careful! we may have raced... */ | |
662 | if (RPC_IS_QUEUED(task)) | |
663 | return 0; | |
664 | if (rpc_test_and_set_running(task)) | |
665 | return 0; | |
666 | continue; | |
667 | } | |
668 | ||
669 | /* sync task: sleep here */ | |
670 | dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); | |
96651ab3 TM |
671 | /* Note: Caller should be using rpc_clnt_sigmask() */ |
672 | status = out_of_line_wait_on_bit(&task->tk_runstate, | |
673 | RPC_TASK_QUEUED, rpc_wait_bit_interruptible, | |
674 | TASK_INTERRUPTIBLE); | |
675 | if (status == -ERESTARTSYS) { | |
1da177e4 LT |
676 | /* |
677 | * When a sync task receives a signal, it exits with | |
678 | * -ERESTARTSYS. In order to catch any callbacks that | |
679 | * clean up after sleeping on some queue, we don't | |
680 | * break the loop here, but go around once more. | |
681 | */ | |
96651ab3 TM |
682 | dprintk("RPC: %4d got signal\n", task->tk_pid); |
683 | task->tk_flags |= RPC_TASK_KILLED; | |
684 | rpc_exit(task, -ERESTARTSYS); | |
685 | rpc_wake_up_task(task); | |
1da177e4 LT |
686 | } |
687 | rpc_set_running(task); | |
688 | dprintk("RPC: %4d sync task resuming\n", task->tk_pid); | |
689 | } | |
690 | ||
e60859ac | 691 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); |
1da177e4 LT |
692 | /* Release all resources associated with the task */ |
693 | rpc_release_task(task); | |
694 | return status; | |
695 | } | |
696 | ||
697 | /* | |
698 | * User-visible entry point to the scheduler. | |
699 | * | |
700 | * This may be called recursively if e.g. an async NFS task updates | |
701 | * the attributes and finds that dirty pages must be flushed. | |
702 | * NOTE: Upon exit of this function the task is guaranteed to be | |
703 | * released. In particular note that tk_release() will have | |
704 | * been called, so your task memory may have been freed. | |
705 | */ | |
706 | int | |
707 | rpc_execute(struct rpc_task *task) | |
708 | { | |
44c28873 | 709 | rpc_set_active(task); |
1da177e4 LT |
710 | rpc_set_running(task); |
711 | return __rpc_execute(task); | |
712 | } | |
713 | ||
714 | static void rpc_async_schedule(void *arg) | |
715 | { | |
716 | __rpc_execute((struct rpc_task *)arg); | |
717 | } | |
718 | ||
02107148 CL |
719 | /** |
720 | * rpc_malloc - allocate an RPC buffer | |
721 | * @task: RPC task that will use this buffer | |
722 | * @size: requested byte size | |
1da177e4 LT |
723 | * |
724 | * We try to ensure that some NFS reads and writes can always proceed | |
725 | * by using a mempool when allocating 'small' buffers. | |
726 | * In order to avoid memory starvation triggering more writebacks of | |
727 | * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. | |
728 | */ | |
02107148 | 729 | void * rpc_malloc(struct rpc_task *task, size_t size) |
1da177e4 | 730 | { |
02107148 | 731 | struct rpc_rqst *req = task->tk_rqstp; |
dd0fc66f | 732 | gfp_t gfp; |
1da177e4 LT |
733 | |
734 | if (task->tk_flags & RPC_TASK_SWAPPER) | |
735 | gfp = GFP_ATOMIC; | |
736 | else | |
737 | gfp = GFP_NOFS; | |
738 | ||
739 | if (size > RPC_BUFFER_MAXSIZE) { | |
02107148 CL |
740 | req->rq_buffer = kmalloc(size, gfp); |
741 | if (req->rq_buffer) | |
742 | req->rq_bufsize = size; | |
1da177e4 | 743 | } else { |
02107148 CL |
744 | req->rq_buffer = mempool_alloc(rpc_buffer_mempool, gfp); |
745 | if (req->rq_buffer) | |
746 | req->rq_bufsize = RPC_BUFFER_MAXSIZE; | |
1da177e4 | 747 | } |
02107148 | 748 | return req->rq_buffer; |
1da177e4 LT |
749 | } |
750 | ||
02107148 CL |
751 | /** |
752 | * rpc_free - free buffer allocated via rpc_malloc | |
753 | * @task: RPC task with a buffer to be freed | |
754 | * | |
755 | */ | |
756 | void rpc_free(struct rpc_task *task) | |
1da177e4 | 757 | { |
02107148 CL |
758 | struct rpc_rqst *req = task->tk_rqstp; |
759 | ||
760 | if (req->rq_buffer) { | |
761 | if (req->rq_bufsize == RPC_BUFFER_MAXSIZE) | |
762 | mempool_free(req->rq_buffer, rpc_buffer_mempool); | |
1da177e4 | 763 | else |
02107148 CL |
764 | kfree(req->rq_buffer); |
765 | req->rq_buffer = NULL; | |
766 | req->rq_bufsize = 0; | |
1da177e4 LT |
767 | } |
768 | } | |
769 | ||
770 | /* | |
771 | * Creation and deletion of RPC task structures | |
772 | */ | |
963d8fe5 | 773 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
1da177e4 LT |
774 | { |
775 | memset(task, 0, sizeof(*task)); | |
776 | init_timer(&task->tk_timer); | |
777 | task->tk_timer.data = (unsigned long) task; | |
778 | task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; | |
44c28873 | 779 | atomic_set(&task->tk_count, 1); |
1da177e4 LT |
780 | task->tk_client = clnt; |
781 | task->tk_flags = flags; | |
963d8fe5 | 782 | task->tk_ops = tk_ops; |
4ce70ada TM |
783 | if (tk_ops->rpc_call_prepare != NULL) |
784 | task->tk_action = rpc_prepare_task; | |
963d8fe5 | 785 | task->tk_calldata = calldata; |
1da177e4 LT |
786 | |
787 | /* Initialize retry counters */ | |
788 | task->tk_garb_retry = 2; | |
789 | task->tk_cred_retry = 2; | |
790 | ||
791 | task->tk_priority = RPC_PRIORITY_NORMAL; | |
792 | task->tk_cookie = (unsigned long)current; | |
793 | ||
794 | /* Initialize workqueue for async tasks */ | |
795 | task->tk_workqueue = rpciod_workqueue; | |
1da177e4 LT |
796 | |
797 | if (clnt) { | |
798 | atomic_inc(&clnt->cl_users); | |
799 | if (clnt->cl_softrtry) | |
800 | task->tk_flags |= RPC_TASK_SOFT; | |
801 | if (!clnt->cl_intr) | |
802 | task->tk_flags |= RPC_TASK_NOINTR; | |
803 | } | |
804 | ||
963d8fe5 TM |
805 | BUG_ON(task->tk_ops == NULL); |
806 | ||
ef759a2e CL |
807 | /* starting timestamp */ |
808 | task->tk_start = jiffies; | |
809 | ||
1da177e4 LT |
810 | dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, |
811 | current->pid); | |
812 | } | |
813 | ||
814 | static struct rpc_task * | |
815 | rpc_alloc_task(void) | |
816 | { | |
817 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | |
818 | } | |
819 | ||
963d8fe5 | 820 | static void rpc_free_task(struct rpc_task *task) |
1da177e4 LT |
821 | { |
822 | dprintk("RPC: %4d freeing task\n", task->tk_pid); | |
823 | mempool_free(task, rpc_task_mempool); | |
824 | } | |
825 | ||
826 | /* | |
827 | * Create a new task for the specified client. We have to | |
828 | * clean up after an allocation failure, as the client may | |
829 | * have specified "oneshot". | |
830 | */ | |
963d8fe5 | 831 | struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, int flags, const struct rpc_call_ops *tk_ops, void *calldata) |
1da177e4 LT |
832 | { |
833 | struct rpc_task *task; | |
834 | ||
835 | task = rpc_alloc_task(); | |
836 | if (!task) | |
837 | goto cleanup; | |
838 | ||
963d8fe5 | 839 | rpc_init_task(task, clnt, flags, tk_ops, calldata); |
1da177e4 LT |
840 | |
841 | dprintk("RPC: %4d allocated task\n", task->tk_pid); | |
842 | task->tk_flags |= RPC_TASK_DYNAMIC; | |
843 | out: | |
844 | return task; | |
845 | ||
846 | cleanup: | |
847 | /* Check whether to release the client */ | |
848 | if (clnt) { | |
849 | printk("rpc_new_task: failed, users=%d, oneshot=%d\n", | |
850 | atomic_read(&clnt->cl_users), clnt->cl_oneshot); | |
851 | atomic_inc(&clnt->cl_users); /* pretend we were used ... */ | |
852 | rpc_release_client(clnt); | |
853 | } | |
854 | goto out; | |
855 | } | |
856 | ||
e6b3c4db TM |
857 | |
858 | void rpc_put_task(struct rpc_task *task) | |
1da177e4 | 859 | { |
963d8fe5 TM |
860 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
861 | void *calldata = task->tk_calldata; | |
1da177e4 | 862 | |
e6b3c4db TM |
863 | if (!atomic_dec_and_test(&task->tk_count)) |
864 | return; | |
865 | /* Release resources */ | |
866 | if (task->tk_rqstp) | |
867 | xprt_release(task); | |
868 | if (task->tk_msg.rpc_cred) | |
869 | rpcauth_unbindcred(task); | |
870 | if (task->tk_client) { | |
871 | rpc_release_client(task->tk_client); | |
872 | task->tk_client = NULL; | |
873 | } | |
874 | if (task->tk_flags & RPC_TASK_DYNAMIC) | |
875 | rpc_free_task(task); | |
876 | if (tk_ops->rpc_release) | |
877 | tk_ops->rpc_release(calldata); | |
878 | } | |
879 | EXPORT_SYMBOL(rpc_put_task); | |
880 | ||
881 | void rpc_release_task(struct rpc_task *task) | |
882 | { | |
1da177e4 LT |
883 | #ifdef RPC_DEBUG |
884 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | |
885 | #endif | |
44c28873 | 886 | dprintk("RPC: %4d release task\n", task->tk_pid); |
1da177e4 LT |
887 | |
888 | /* Remove from global task list */ | |
889 | spin_lock(&rpc_sched_lock); | |
890 | list_del(&task->tk_task); | |
891 | spin_unlock(&rpc_sched_lock); | |
892 | ||
893 | BUG_ON (RPC_IS_QUEUED(task)); | |
1da177e4 LT |
894 | |
895 | /* Synchronously delete any running timer */ | |
896 | rpc_delete_timer(task); | |
897 | ||
1da177e4 LT |
898 | #ifdef RPC_DEBUG |
899 | task->tk_magic = 0; | |
900 | #endif | |
e6b3c4db TM |
901 | /* Wake up anyone who is waiting for task completion */ |
902 | rpc_mark_complete_task(task); | |
903 | ||
904 | rpc_put_task(task); | |
1da177e4 LT |
905 | } |
906 | ||
44c28873 TM |
907 | /** |
908 | * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it | |
99acf044 MW |
909 | * @clnt: pointer to RPC client |
910 | * @flags: RPC flags | |
911 | * @ops: RPC call ops | |
912 | * @data: user call data | |
44c28873 TM |
913 | */ |
914 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | |
915 | const struct rpc_call_ops *ops, | |
916 | void *data) | |
917 | { | |
918 | struct rpc_task *task; | |
919 | task = rpc_new_task(clnt, flags, ops, data); | |
7a1218a2 TM |
920 | if (task == NULL) { |
921 | if (ops->rpc_release != NULL) | |
922 | ops->rpc_release(data); | |
44c28873 | 923 | return ERR_PTR(-ENOMEM); |
7a1218a2 | 924 | } |
44c28873 TM |
925 | atomic_inc(&task->tk_count); |
926 | rpc_execute(task); | |
927 | return task; | |
928 | } | |
929 | EXPORT_SYMBOL(rpc_run_task); | |
930 | ||
1da177e4 LT |
931 | /* |
932 | * Kill all tasks for the given client. | |
933 | * XXX: kill their descendants as well? | |
934 | */ | |
935 | void rpc_killall_tasks(struct rpc_clnt *clnt) | |
936 | { | |
937 | struct rpc_task *rovr; | |
938 | struct list_head *le; | |
939 | ||
940 | dprintk("RPC: killing all tasks for client %p\n", clnt); | |
941 | ||
942 | /* | |
943 | * Spin lock all_tasks to prevent changes... | |
944 | */ | |
945 | spin_lock(&rpc_sched_lock); | |
946 | alltask_for_each(rovr, le, &all_tasks) { | |
947 | if (! RPC_IS_ACTIVATED(rovr)) | |
948 | continue; | |
949 | if (!clnt || rovr->tk_client == clnt) { | |
950 | rovr->tk_flags |= RPC_TASK_KILLED; | |
951 | rpc_exit(rovr, -EIO); | |
952 | rpc_wake_up_task(rovr); | |
953 | } | |
954 | } | |
955 | spin_unlock(&rpc_sched_lock); | |
956 | } | |
957 | ||
958 | static DECLARE_MUTEX_LOCKED(rpciod_running); | |
959 | ||
960 | static void rpciod_killall(void) | |
961 | { | |
962 | unsigned long flags; | |
963 | ||
964 | while (!list_empty(&all_tasks)) { | |
965 | clear_thread_flag(TIF_SIGPENDING); | |
966 | rpc_killall_tasks(NULL); | |
967 | flush_workqueue(rpciod_workqueue); | |
968 | if (!list_empty(&all_tasks)) { | |
969 | dprintk("rpciod_killall: waiting for tasks to exit\n"); | |
970 | yield(); | |
971 | } | |
972 | } | |
973 | ||
974 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
975 | recalc_sigpending(); | |
976 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
977 | } | |
978 | ||
979 | /* | |
980 | * Start up the rpciod process if it's not already running. | |
981 | */ | |
982 | int | |
983 | rpciod_up(void) | |
984 | { | |
985 | struct workqueue_struct *wq; | |
986 | int error = 0; | |
987 | ||
4a3e2f71 | 988 | mutex_lock(&rpciod_mutex); |
1da177e4 LT |
989 | dprintk("rpciod_up: users %d\n", rpciod_users); |
990 | rpciod_users++; | |
991 | if (rpciod_workqueue) | |
992 | goto out; | |
993 | /* | |
994 | * If there's no pid, we should be the first user. | |
995 | */ | |
996 | if (rpciod_users > 1) | |
997 | printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users); | |
998 | /* | |
999 | * Create the rpciod thread and wait for it to start. | |
1000 | */ | |
1001 | error = -ENOMEM; | |
1002 | wq = create_workqueue("rpciod"); | |
1003 | if (wq == NULL) { | |
1004 | printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error); | |
1005 | rpciod_users--; | |
1006 | goto out; | |
1007 | } | |
1008 | rpciod_workqueue = wq; | |
1009 | error = 0; | |
1010 | out: | |
4a3e2f71 | 1011 | mutex_unlock(&rpciod_mutex); |
1da177e4 LT |
1012 | return error; |
1013 | } | |
1014 | ||
1015 | void | |
1016 | rpciod_down(void) | |
1017 | { | |
4a3e2f71 | 1018 | mutex_lock(&rpciod_mutex); |
1da177e4 LT |
1019 | dprintk("rpciod_down sema %d\n", rpciod_users); |
1020 | if (rpciod_users) { | |
1021 | if (--rpciod_users) | |
1022 | goto out; | |
1023 | } else | |
1024 | printk(KERN_WARNING "rpciod_down: no users??\n"); | |
1025 | ||
1026 | if (!rpciod_workqueue) { | |
1027 | dprintk("rpciod_down: Nothing to do!\n"); | |
1028 | goto out; | |
1029 | } | |
1030 | rpciod_killall(); | |
1031 | ||
1032 | destroy_workqueue(rpciod_workqueue); | |
1033 | rpciod_workqueue = NULL; | |
1034 | out: | |
4a3e2f71 | 1035 | mutex_unlock(&rpciod_mutex); |
1da177e4 LT |
1036 | } |
1037 | ||
1038 | #ifdef RPC_DEBUG | |
1039 | void rpc_show_tasks(void) | |
1040 | { | |
1041 | struct list_head *le; | |
1042 | struct rpc_task *t; | |
1043 | ||
1044 | spin_lock(&rpc_sched_lock); | |
1045 | if (list_empty(&all_tasks)) { | |
1046 | spin_unlock(&rpc_sched_lock); | |
1047 | return; | |
1048 | } | |
1049 | printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " | |
963d8fe5 | 1050 | "-rpcwait -action- ---ops--\n"); |
1da177e4 LT |
1051 | alltask_for_each(t, le, &all_tasks) { |
1052 | const char *rpc_waitq = "none"; | |
1053 | ||
1054 | if (RPC_IS_QUEUED(t)) | |
1055 | rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); | |
1056 | ||
1057 | printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n", | |
1058 | t->tk_pid, | |
1059 | (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1), | |
1060 | t->tk_flags, t->tk_status, | |
1061 | t->tk_client, | |
1062 | (t->tk_client ? t->tk_client->cl_prog : 0), | |
1063 | t->tk_rqstp, t->tk_timeout, | |
1064 | rpc_waitq, | |
963d8fe5 | 1065 | t->tk_action, t->tk_ops); |
1da177e4 LT |
1066 | } |
1067 | spin_unlock(&rpc_sched_lock); | |
1068 | } | |
1069 | #endif | |
1070 | ||
1071 | void | |
1072 | rpc_destroy_mempool(void) | |
1073 | { | |
1074 | if (rpc_buffer_mempool) | |
1075 | mempool_destroy(rpc_buffer_mempool); | |
1076 | if (rpc_task_mempool) | |
1077 | mempool_destroy(rpc_task_mempool); | |
1a1d92c1 AD |
1078 | if (rpc_task_slabp) |
1079 | kmem_cache_destroy(rpc_task_slabp); | |
1080 | if (rpc_buffer_slabp) | |
1081 | kmem_cache_destroy(rpc_buffer_slabp); | |
1da177e4 LT |
1082 | } |
1083 | ||
1084 | int | |
1085 | rpc_init_mempool(void) | |
1086 | { | |
1087 | rpc_task_slabp = kmem_cache_create("rpc_tasks", | |
1088 | sizeof(struct rpc_task), | |
1089 | 0, SLAB_HWCACHE_ALIGN, | |
1090 | NULL, NULL); | |
1091 | if (!rpc_task_slabp) | |
1092 | goto err_nomem; | |
1093 | rpc_buffer_slabp = kmem_cache_create("rpc_buffers", | |
1094 | RPC_BUFFER_MAXSIZE, | |
1095 | 0, SLAB_HWCACHE_ALIGN, | |
1096 | NULL, NULL); | |
1097 | if (!rpc_buffer_slabp) | |
1098 | goto err_nomem; | |
93d2341c MD |
1099 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1100 | rpc_task_slabp); | |
1da177e4 LT |
1101 | if (!rpc_task_mempool) |
1102 | goto err_nomem; | |
93d2341c MD |
1103 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1104 | rpc_buffer_slabp); | |
1da177e4 LT |
1105 | if (!rpc_buffer_mempool) |
1106 | goto err_nomem; | |
1107 | return 0; | |
1108 | err_nomem: | |
1109 | rpc_destroy_mempool(); | |
1110 | return -ENOMEM; | |
1111 | } |