Commit | Line | Data |
---|---|---|
8b712842 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/kthread.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
8b712842 CM |
21 | #include <linux/list.h> |
22 | #include <linux/spinlock.h> | |
b51912c9 | 23 | #include <linux/freezer.h> |
8b712842 CM |
24 | #include "async-thread.h" |
25 | ||
4a69a410 CM |
26 | #define WORK_QUEUED_BIT 0 |
27 | #define WORK_DONE_BIT 1 | |
28 | #define WORK_ORDER_DONE_BIT 2 | |
d313d7a3 | 29 | #define WORK_HIGH_PRIO_BIT 3 |
4a69a410 | 30 | |
8b712842 CM |
31 | /* |
32 | * container for the kthread task pointer and the list of pending work | |
33 | * One of these is allocated per thread. | |
34 | */ | |
35 | struct btrfs_worker_thread { | |
35d8ba66 CM |
36 | /* pool we belong to */ |
37 | struct btrfs_workers *workers; | |
38 | ||
8b712842 CM |
39 | /* list of struct btrfs_work that are waiting for service */ |
40 | struct list_head pending; | |
d313d7a3 | 41 | struct list_head prio_pending; |
8b712842 CM |
42 | |
43 | /* list of worker threads from struct btrfs_workers */ | |
44 | struct list_head worker_list; | |
45 | ||
46 | /* kthread */ | |
47 | struct task_struct *task; | |
48 | ||
49 | /* number of things on the pending list */ | |
50 | atomic_t num_pending; | |
53863232 | 51 | |
9042846b CM |
52 | /* reference counter for this struct */ |
53 | atomic_t refs; | |
54 | ||
4854ddd0 | 55 | unsigned long sequence; |
8b712842 CM |
56 | |
57 | /* protects the pending list. */ | |
58 | spinlock_t lock; | |
59 | ||
60 | /* set to non-zero when this thread is already awake and kicking */ | |
61 | int working; | |
35d8ba66 CM |
62 | |
63 | /* are we currently idle */ | |
64 | int idle; | |
8b712842 CM |
65 | }; |
66 | ||
61d92c32 CM |
67 | /* |
68 | * btrfs_start_workers uses kthread_run, which can block waiting for memory | |
69 | * for a very long time. It will actually throttle on page writeback, | |
70 | * and so it may not make progress until after our btrfs worker threads | |
71 | * process all of the pending work structs in their queue | |
72 | * | |
73 | * This means we can't use btrfs_start_workers from inside a btrfs worker | |
74 | * thread that is used as part of cleaning dirty memory, which pretty much | |
75 | * involves all of the worker threads. | |
76 | * | |
77 | * Instead we have a helper queue who never has more than one thread | |
78 | * where we scheduler thread start operations. This worker_start struct | |
79 | * is used to contain the work and hold a pointer to the queue that needs | |
80 | * another worker. | |
81 | */ | |
82 | struct worker_start { | |
83 | struct btrfs_work work; | |
84 | struct btrfs_workers *queue; | |
85 | }; | |
86 | ||
87 | static void start_new_worker_func(struct btrfs_work *work) | |
88 | { | |
89 | struct worker_start *start; | |
90 | start = container_of(work, struct worker_start, work); | |
91 | btrfs_start_workers(start->queue, 1); | |
92 | kfree(start); | |
93 | } | |
94 | ||
95 | static int start_new_worker(struct btrfs_workers *queue) | |
96 | { | |
97 | struct worker_start *start; | |
98 | int ret; | |
99 | ||
100 | start = kzalloc(sizeof(*start), GFP_NOFS); | |
101 | if (!start) | |
102 | return -ENOMEM; | |
103 | ||
104 | start->work.func = start_new_worker_func; | |
105 | start->queue = queue; | |
106 | ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work); | |
107 | if (ret) | |
108 | kfree(start); | |
109 | return ret; | |
110 | } | |
111 | ||
35d8ba66 CM |
112 | /* |
113 | * helper function to move a thread onto the idle list after it | |
114 | * has finished some requests. | |
115 | */ | |
116 | static void check_idle_worker(struct btrfs_worker_thread *worker) | |
117 | { | |
118 | if (!worker->idle && atomic_read(&worker->num_pending) < | |
119 | worker->workers->idle_thresh / 2) { | |
120 | unsigned long flags; | |
121 | spin_lock_irqsave(&worker->workers->lock, flags); | |
122 | worker->idle = 1; | |
3e99d8eb CM |
123 | |
124 | /* the list may be empty if the worker is just starting */ | |
125 | if (!list_empty(&worker->worker_list)) { | |
126 | list_move(&worker->worker_list, | |
127 | &worker->workers->idle_list); | |
128 | } | |
35d8ba66 CM |
129 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
130 | } | |
131 | } | |
132 | ||
133 | /* | |
134 | * helper function to move a thread off the idle list after new | |
135 | * pending work is added. | |
136 | */ | |
137 | static void check_busy_worker(struct btrfs_worker_thread *worker) | |
138 | { | |
139 | if (worker->idle && atomic_read(&worker->num_pending) >= | |
140 | worker->workers->idle_thresh) { | |
141 | unsigned long flags; | |
142 | spin_lock_irqsave(&worker->workers->lock, flags); | |
143 | worker->idle = 0; | |
3e99d8eb CM |
144 | |
145 | if (!list_empty(&worker->worker_list)) { | |
146 | list_move_tail(&worker->worker_list, | |
147 | &worker->workers->worker_list); | |
148 | } | |
35d8ba66 CM |
149 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
150 | } | |
151 | } | |
152 | ||
9042846b CM |
153 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) |
154 | { | |
155 | struct btrfs_workers *workers = worker->workers; | |
156 | unsigned long flags; | |
157 | ||
158 | rmb(); | |
159 | if (!workers->atomic_start_pending) | |
160 | return; | |
161 | ||
162 | spin_lock_irqsave(&workers->lock, flags); | |
163 | if (!workers->atomic_start_pending) | |
164 | goto out; | |
165 | ||
166 | workers->atomic_start_pending = 0; | |
61d92c32 CM |
167 | if (workers->num_workers + workers->num_workers_starting >= |
168 | workers->max_workers) | |
9042846b CM |
169 | goto out; |
170 | ||
61d92c32 | 171 | workers->num_workers_starting += 1; |
9042846b | 172 | spin_unlock_irqrestore(&workers->lock, flags); |
61d92c32 | 173 | start_new_worker(workers); |
9042846b CM |
174 | return; |
175 | ||
176 | out: | |
177 | spin_unlock_irqrestore(&workers->lock, flags); | |
178 | } | |
179 | ||
4a69a410 CM |
180 | static noinline int run_ordered_completions(struct btrfs_workers *workers, |
181 | struct btrfs_work *work) | |
182 | { | |
4a69a410 CM |
183 | if (!workers->ordered) |
184 | return 0; | |
185 | ||
186 | set_bit(WORK_DONE_BIT, &work->flags); | |
187 | ||
4e3f9c50 | 188 | spin_lock(&workers->order_lock); |
4a69a410 | 189 | |
d313d7a3 CM |
190 | while (1) { |
191 | if (!list_empty(&workers->prio_order_list)) { | |
192 | work = list_entry(workers->prio_order_list.next, | |
193 | struct btrfs_work, order_list); | |
194 | } else if (!list_empty(&workers->order_list)) { | |
195 | work = list_entry(workers->order_list.next, | |
196 | struct btrfs_work, order_list); | |
197 | } else { | |
198 | break; | |
199 | } | |
4a69a410 CM |
200 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
201 | break; | |
202 | ||
203 | /* we are going to call the ordered done function, but | |
204 | * we leave the work item on the list as a barrier so | |
205 | * that later work items that are done don't have their | |
206 | * functions called before this one returns | |
207 | */ | |
208 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | |
209 | break; | |
210 | ||
4e3f9c50 | 211 | spin_unlock(&workers->order_lock); |
4a69a410 CM |
212 | |
213 | work->ordered_func(work); | |
214 | ||
215 | /* now take the lock again and call the freeing code */ | |
4e3f9c50 | 216 | spin_lock(&workers->order_lock); |
4a69a410 CM |
217 | list_del(&work->order_list); |
218 | work->ordered_free(work); | |
219 | } | |
220 | ||
4e3f9c50 | 221 | spin_unlock(&workers->order_lock); |
4a69a410 CM |
222 | return 0; |
223 | } | |
224 | ||
9042846b CM |
225 | static void put_worker(struct btrfs_worker_thread *worker) |
226 | { | |
227 | if (atomic_dec_and_test(&worker->refs)) | |
228 | kfree(worker); | |
229 | } | |
230 | ||
231 | static int try_worker_shutdown(struct btrfs_worker_thread *worker) | |
232 | { | |
233 | int freeit = 0; | |
234 | ||
235 | spin_lock_irq(&worker->lock); | |
627e421a | 236 | spin_lock(&worker->workers->lock); |
9042846b CM |
237 | if (worker->workers->num_workers > 1 && |
238 | worker->idle && | |
239 | !worker->working && | |
240 | !list_empty(&worker->worker_list) && | |
241 | list_empty(&worker->prio_pending) && | |
6e74057c CM |
242 | list_empty(&worker->pending) && |
243 | atomic_read(&worker->num_pending) == 0) { | |
9042846b CM |
244 | freeit = 1; |
245 | list_del_init(&worker->worker_list); | |
246 | worker->workers->num_workers--; | |
247 | } | |
627e421a | 248 | spin_unlock(&worker->workers->lock); |
9042846b CM |
249 | spin_unlock_irq(&worker->lock); |
250 | ||
251 | if (freeit) | |
252 | put_worker(worker); | |
253 | return freeit; | |
254 | } | |
255 | ||
4f878e84 CM |
256 | static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker, |
257 | struct list_head *prio_head, | |
258 | struct list_head *head) | |
259 | { | |
260 | struct btrfs_work *work = NULL; | |
261 | struct list_head *cur = NULL; | |
262 | ||
263 | if(!list_empty(prio_head)) | |
264 | cur = prio_head->next; | |
265 | ||
266 | smp_mb(); | |
267 | if (!list_empty(&worker->prio_pending)) | |
268 | goto refill; | |
269 | ||
270 | if (!list_empty(head)) | |
271 | cur = head->next; | |
272 | ||
273 | if (cur) | |
274 | goto out; | |
275 | ||
276 | refill: | |
277 | spin_lock_irq(&worker->lock); | |
278 | list_splice_tail_init(&worker->prio_pending, prio_head); | |
279 | list_splice_tail_init(&worker->pending, head); | |
280 | ||
281 | if (!list_empty(prio_head)) | |
282 | cur = prio_head->next; | |
283 | else if (!list_empty(head)) | |
284 | cur = head->next; | |
285 | spin_unlock_irq(&worker->lock); | |
286 | ||
287 | if (!cur) | |
288 | goto out_fail; | |
289 | ||
290 | out: | |
291 | work = list_entry(cur, struct btrfs_work, list); | |
292 | ||
293 | out_fail: | |
294 | return work; | |
295 | } | |
296 | ||
8b712842 CM |
297 | /* |
298 | * main loop for servicing work items | |
299 | */ | |
300 | static int worker_loop(void *arg) | |
301 | { | |
302 | struct btrfs_worker_thread *worker = arg; | |
4f878e84 CM |
303 | struct list_head head; |
304 | struct list_head prio_head; | |
8b712842 | 305 | struct btrfs_work *work; |
4f878e84 CM |
306 | |
307 | INIT_LIST_HEAD(&head); | |
308 | INIT_LIST_HEAD(&prio_head); | |
309 | ||
8b712842 | 310 | do { |
4f878e84 | 311 | again: |
d313d7a3 | 312 | while (1) { |
4f878e84 CM |
313 | |
314 | ||
315 | work = get_next_work(worker, &prio_head, &head); | |
316 | if (!work) | |
d313d7a3 CM |
317 | break; |
318 | ||
8b712842 | 319 | list_del(&work->list); |
4a69a410 | 320 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
8b712842 CM |
321 | |
322 | work->worker = worker; | |
8b712842 CM |
323 | |
324 | work->func(work); | |
325 | ||
326 | atomic_dec(&worker->num_pending); | |
4a69a410 CM |
327 | /* |
328 | * unless this is an ordered work queue, | |
329 | * 'work' was probably freed by func above. | |
330 | */ | |
331 | run_ordered_completions(worker->workers, work); | |
332 | ||
9042846b CM |
333 | check_pending_worker_creates(worker); |
334 | ||
8b712842 | 335 | } |
4f878e84 CM |
336 | |
337 | spin_lock_irq(&worker->lock); | |
338 | check_idle_worker(worker); | |
339 | ||
8b712842 | 340 | if (freezing(current)) { |
b51912c9 CM |
341 | worker->working = 0; |
342 | spin_unlock_irq(&worker->lock); | |
8b712842 CM |
343 | refrigerator(); |
344 | } else { | |
8b712842 | 345 | spin_unlock_irq(&worker->lock); |
b51912c9 CM |
346 | if (!kthread_should_stop()) { |
347 | cpu_relax(); | |
348 | /* | |
349 | * we've dropped the lock, did someone else | |
350 | * jump_in? | |
351 | */ | |
352 | smp_mb(); | |
d313d7a3 CM |
353 | if (!list_empty(&worker->pending) || |
354 | !list_empty(&worker->prio_pending)) | |
b51912c9 CM |
355 | continue; |
356 | ||
357 | /* | |
358 | * this short schedule allows more work to | |
359 | * come in without the queue functions | |
360 | * needing to go through wake_up_process() | |
361 | * | |
362 | * worker->working is still 1, so nobody | |
363 | * is going to try and wake us up | |
364 | */ | |
365 | schedule_timeout(1); | |
366 | smp_mb(); | |
d313d7a3 CM |
367 | if (!list_empty(&worker->pending) || |
368 | !list_empty(&worker->prio_pending)) | |
b51912c9 CM |
369 | continue; |
370 | ||
b5555f77 AG |
371 | if (kthread_should_stop()) |
372 | break; | |
373 | ||
b51912c9 CM |
374 | /* still no more work?, sleep for real */ |
375 | spin_lock_irq(&worker->lock); | |
376 | set_current_state(TASK_INTERRUPTIBLE); | |
d313d7a3 | 377 | if (!list_empty(&worker->pending) || |
4f878e84 CM |
378 | !list_empty(&worker->prio_pending)) { |
379 | spin_unlock_irq(&worker->lock); | |
380 | goto again; | |
381 | } | |
b51912c9 CM |
382 | |
383 | /* | |
384 | * this makes sure we get a wakeup when someone | |
385 | * adds something new to the queue | |
386 | */ | |
387 | worker->working = 0; | |
388 | spin_unlock_irq(&worker->lock); | |
389 | ||
9042846b CM |
390 | if (!kthread_should_stop()) { |
391 | schedule_timeout(HZ * 120); | |
392 | if (!worker->working && | |
393 | try_worker_shutdown(worker)) { | |
394 | return 0; | |
395 | } | |
396 | } | |
b51912c9 | 397 | } |
8b712842 CM |
398 | __set_current_state(TASK_RUNNING); |
399 | } | |
400 | } while (!kthread_should_stop()); | |
401 | return 0; | |
402 | } | |
403 | ||
404 | /* | |
405 | * this will wait for all the worker threads to shutdown | |
406 | */ | |
407 | int btrfs_stop_workers(struct btrfs_workers *workers) | |
408 | { | |
409 | struct list_head *cur; | |
410 | struct btrfs_worker_thread *worker; | |
9042846b | 411 | int can_stop; |
8b712842 | 412 | |
9042846b | 413 | spin_lock_irq(&workers->lock); |
35d8ba66 | 414 | list_splice_init(&workers->idle_list, &workers->worker_list); |
d397712b | 415 | while (!list_empty(&workers->worker_list)) { |
8b712842 CM |
416 | cur = workers->worker_list.next; |
417 | worker = list_entry(cur, struct btrfs_worker_thread, | |
418 | worker_list); | |
9042846b CM |
419 | |
420 | atomic_inc(&worker->refs); | |
421 | workers->num_workers -= 1; | |
422 | if (!list_empty(&worker->worker_list)) { | |
423 | list_del_init(&worker->worker_list); | |
424 | put_worker(worker); | |
425 | can_stop = 1; | |
426 | } else | |
427 | can_stop = 0; | |
428 | spin_unlock_irq(&workers->lock); | |
429 | if (can_stop) | |
430 | kthread_stop(worker->task); | |
431 | spin_lock_irq(&workers->lock); | |
432 | put_worker(worker); | |
8b712842 | 433 | } |
9042846b | 434 | spin_unlock_irq(&workers->lock); |
8b712842 CM |
435 | return 0; |
436 | } | |
437 | ||
438 | /* | |
439 | * simple init on struct btrfs_workers | |
440 | */ | |
61d92c32 CM |
441 | void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, |
442 | struct btrfs_workers *async_helper) | |
8b712842 CM |
443 | { |
444 | workers->num_workers = 0; | |
61d92c32 | 445 | workers->num_workers_starting = 0; |
8b712842 | 446 | INIT_LIST_HEAD(&workers->worker_list); |
35d8ba66 | 447 | INIT_LIST_HEAD(&workers->idle_list); |
4a69a410 | 448 | INIT_LIST_HEAD(&workers->order_list); |
d313d7a3 | 449 | INIT_LIST_HEAD(&workers->prio_order_list); |
8b712842 | 450 | spin_lock_init(&workers->lock); |
4e3f9c50 | 451 | spin_lock_init(&workers->order_lock); |
8b712842 | 452 | workers->max_workers = max; |
61b49440 | 453 | workers->idle_thresh = 32; |
5443be45 | 454 | workers->name = name; |
4a69a410 | 455 | workers->ordered = 0; |
9042846b | 456 | workers->atomic_start_pending = 0; |
61d92c32 | 457 | workers->atomic_worker_start = async_helper; |
8b712842 CM |
458 | } |
459 | ||
460 | /* | |
461 | * starts new worker threads. This does not enforce the max worker | |
462 | * count in case you need to temporarily go past it. | |
463 | */ | |
61d92c32 CM |
464 | static int __btrfs_start_workers(struct btrfs_workers *workers, |
465 | int num_workers) | |
8b712842 CM |
466 | { |
467 | struct btrfs_worker_thread *worker; | |
468 | int ret = 0; | |
469 | int i; | |
470 | ||
471 | for (i = 0; i < num_workers; i++) { | |
472 | worker = kzalloc(sizeof(*worker), GFP_NOFS); | |
473 | if (!worker) { | |
474 | ret = -ENOMEM; | |
475 | goto fail; | |
476 | } | |
477 | ||
478 | INIT_LIST_HEAD(&worker->pending); | |
d313d7a3 | 479 | INIT_LIST_HEAD(&worker->prio_pending); |
8b712842 CM |
480 | INIT_LIST_HEAD(&worker->worker_list); |
481 | spin_lock_init(&worker->lock); | |
4e3f9c50 | 482 | |
8b712842 | 483 | atomic_set(&worker->num_pending, 0); |
9042846b | 484 | atomic_set(&worker->refs, 1); |
fd0fb038 | 485 | worker->workers = workers; |
5443be45 CM |
486 | worker->task = kthread_run(worker_loop, worker, |
487 | "btrfs-%s-%d", workers->name, | |
488 | workers->num_workers + i); | |
8b712842 CM |
489 | if (IS_ERR(worker->task)) { |
490 | ret = PTR_ERR(worker->task); | |
9b627e9b | 491 | kfree(worker); |
8b712842 CM |
492 | goto fail; |
493 | } | |
8b712842 | 494 | spin_lock_irq(&workers->lock); |
35d8ba66 | 495 | list_add_tail(&worker->worker_list, &workers->idle_list); |
4854ddd0 | 496 | worker->idle = 1; |
8b712842 | 497 | workers->num_workers++; |
61d92c32 CM |
498 | workers->num_workers_starting--; |
499 | WARN_ON(workers->num_workers_starting < 0); | |
8b712842 CM |
500 | spin_unlock_irq(&workers->lock); |
501 | } | |
502 | return 0; | |
503 | fail: | |
504 | btrfs_stop_workers(workers); | |
505 | return ret; | |
506 | } | |
507 | ||
61d92c32 CM |
508 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) |
509 | { | |
510 | spin_lock_irq(&workers->lock); | |
511 | workers->num_workers_starting += num_workers; | |
512 | spin_unlock_irq(&workers->lock); | |
513 | return __btrfs_start_workers(workers, num_workers); | |
514 | } | |
515 | ||
8b712842 CM |
516 | /* |
517 | * run through the list and find a worker thread that doesn't have a lot | |
518 | * to do right now. This can return null if we aren't yet at the thread | |
519 | * count limit and all of the threads are busy. | |
520 | */ | |
521 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) | |
522 | { | |
523 | struct btrfs_worker_thread *worker; | |
524 | struct list_head *next; | |
61d92c32 CM |
525 | int enforce_min; |
526 | ||
527 | enforce_min = (workers->num_workers + workers->num_workers_starting) < | |
528 | workers->max_workers; | |
8b712842 | 529 | |
8b712842 | 530 | /* |
35d8ba66 CM |
531 | * if we find an idle thread, don't move it to the end of the |
532 | * idle list. This improves the chance that the next submission | |
533 | * will reuse the same thread, and maybe catch it while it is still | |
534 | * working | |
8b712842 | 535 | */ |
35d8ba66 CM |
536 | if (!list_empty(&workers->idle_list)) { |
537 | next = workers->idle_list.next; | |
8b712842 CM |
538 | worker = list_entry(next, struct btrfs_worker_thread, |
539 | worker_list); | |
35d8ba66 | 540 | return worker; |
8b712842 | 541 | } |
35d8ba66 CM |
542 | if (enforce_min || list_empty(&workers->worker_list)) |
543 | return NULL; | |
544 | ||
8b712842 | 545 | /* |
35d8ba66 | 546 | * if we pick a busy task, move the task to the end of the list. |
d352ac68 CM |
547 | * hopefully this will keep things somewhat evenly balanced. |
548 | * Do the move in batches based on the sequence number. This groups | |
549 | * requests submitted at roughly the same time onto the same worker. | |
8b712842 | 550 | */ |
35d8ba66 CM |
551 | next = workers->worker_list.next; |
552 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); | |
4854ddd0 | 553 | worker->sequence++; |
d352ac68 | 554 | |
53863232 | 555 | if (worker->sequence % workers->idle_thresh == 0) |
4854ddd0 | 556 | list_move_tail(next, &workers->worker_list); |
8b712842 CM |
557 | return worker; |
558 | } | |
559 | ||
d352ac68 CM |
560 | /* |
561 | * selects a worker thread to take the next job. This will either find | |
562 | * an idle worker, start a new worker up to the max count, or just return | |
563 | * one of the existing busy workers. | |
564 | */ | |
8b712842 CM |
565 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) |
566 | { | |
567 | struct btrfs_worker_thread *worker; | |
568 | unsigned long flags; | |
9042846b | 569 | struct list_head *fallback; |
8b712842 CM |
570 | |
571 | again: | |
572 | spin_lock_irqsave(&workers->lock, flags); | |
573 | worker = next_worker(workers); | |
8b712842 CM |
574 | |
575 | if (!worker) { | |
61d92c32 CM |
576 | if (workers->num_workers + workers->num_workers_starting >= |
577 | workers->max_workers) { | |
9042846b CM |
578 | goto fallback; |
579 | } else if (workers->atomic_worker_start) { | |
580 | workers->atomic_start_pending = 1; | |
581 | goto fallback; | |
8b712842 | 582 | } else { |
61d92c32 | 583 | workers->num_workers_starting++; |
8b712842 CM |
584 | spin_unlock_irqrestore(&workers->lock, flags); |
585 | /* we're below the limit, start another worker */ | |
61d92c32 | 586 | __btrfs_start_workers(workers, 1); |
8b712842 CM |
587 | goto again; |
588 | } | |
589 | } | |
6e74057c | 590 | goto found; |
9042846b CM |
591 | |
592 | fallback: | |
593 | fallback = NULL; | |
594 | /* | |
595 | * we have failed to find any workers, just | |
596 | * return the first one we can find. | |
597 | */ | |
598 | if (!list_empty(&workers->worker_list)) | |
599 | fallback = workers->worker_list.next; | |
600 | if (!list_empty(&workers->idle_list)) | |
601 | fallback = workers->idle_list.next; | |
602 | BUG_ON(!fallback); | |
603 | worker = list_entry(fallback, | |
604 | struct btrfs_worker_thread, worker_list); | |
6e74057c CM |
605 | found: |
606 | /* | |
607 | * this makes sure the worker doesn't exit before it is placed | |
608 | * onto a busy/idle list | |
609 | */ | |
610 | atomic_inc(&worker->num_pending); | |
9042846b CM |
611 | spin_unlock_irqrestore(&workers->lock, flags); |
612 | return worker; | |
8b712842 CM |
613 | } |
614 | ||
615 | /* | |
616 | * btrfs_requeue_work just puts the work item back on the tail of the list | |
617 | * it was taken from. It is intended for use with long running work functions | |
618 | * that make some progress and want to give the cpu up for others. | |
619 | */ | |
620 | int btrfs_requeue_work(struct btrfs_work *work) | |
621 | { | |
622 | struct btrfs_worker_thread *worker = work->worker; | |
623 | unsigned long flags; | |
a6837051 | 624 | int wake = 0; |
8b712842 | 625 | |
4a69a410 | 626 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b712842 CM |
627 | goto out; |
628 | ||
629 | spin_lock_irqsave(&worker->lock, flags); | |
d313d7a3 CM |
630 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
631 | list_add_tail(&work->list, &worker->prio_pending); | |
632 | else | |
633 | list_add_tail(&work->list, &worker->pending); | |
b51912c9 | 634 | atomic_inc(&worker->num_pending); |
75ccf47d CM |
635 | |
636 | /* by definition we're busy, take ourselves off the idle | |
637 | * list | |
638 | */ | |
639 | if (worker->idle) { | |
29c5e8ce | 640 | spin_lock(&worker->workers->lock); |
75ccf47d CM |
641 | worker->idle = 0; |
642 | list_move_tail(&worker->worker_list, | |
6e74057c | 643 | &worker->workers->worker_list); |
29c5e8ce | 644 | spin_unlock(&worker->workers->lock); |
75ccf47d | 645 | } |
a6837051 CM |
646 | if (!worker->working) { |
647 | wake = 1; | |
648 | worker->working = 1; | |
649 | } | |
75ccf47d | 650 | |
a6837051 CM |
651 | if (wake) |
652 | wake_up_process(worker->task); | |
9042846b | 653 | spin_unlock_irqrestore(&worker->lock, flags); |
8b712842 | 654 | out: |
a6837051 | 655 | |
8b712842 CM |
656 | return 0; |
657 | } | |
658 | ||
d313d7a3 CM |
659 | void btrfs_set_work_high_prio(struct btrfs_work *work) |
660 | { | |
661 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | |
662 | } | |
663 | ||
8b712842 CM |
664 | /* |
665 | * places a struct btrfs_work into the pending queue of one of the kthreads | |
666 | */ | |
667 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |
668 | { | |
669 | struct btrfs_worker_thread *worker; | |
670 | unsigned long flags; | |
671 | int wake = 0; | |
672 | ||
673 | /* don't requeue something already on a list */ | |
4a69a410 | 674 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
8b712842 CM |
675 | goto out; |
676 | ||
677 | worker = find_worker(workers); | |
4a69a410 | 678 | if (workers->ordered) { |
4e3f9c50 CM |
679 | /* |
680 | * you're not allowed to do ordered queues from an | |
681 | * interrupt handler | |
682 | */ | |
683 | spin_lock(&workers->order_lock); | |
d313d7a3 CM |
684 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
685 | list_add_tail(&work->order_list, | |
686 | &workers->prio_order_list); | |
687 | } else { | |
688 | list_add_tail(&work->order_list, &workers->order_list); | |
689 | } | |
4e3f9c50 | 690 | spin_unlock(&workers->order_lock); |
4a69a410 CM |
691 | } else { |
692 | INIT_LIST_HEAD(&work->order_list); | |
693 | } | |
8b712842 CM |
694 | |
695 | spin_lock_irqsave(&worker->lock, flags); | |
a6837051 | 696 | |
d313d7a3 CM |
697 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
698 | list_add_tail(&work->list, &worker->prio_pending); | |
699 | else | |
700 | list_add_tail(&work->list, &worker->pending); | |
35d8ba66 | 701 | check_busy_worker(worker); |
8b712842 CM |
702 | |
703 | /* | |
704 | * avoid calling into wake_up_process if this thread has already | |
705 | * been kicked | |
706 | */ | |
707 | if (!worker->working) | |
708 | wake = 1; | |
709 | worker->working = 1; | |
710 | ||
8b712842 CM |
711 | if (wake) |
712 | wake_up_process(worker->task); | |
9042846b CM |
713 | spin_unlock_irqrestore(&worker->lock, flags); |
714 | ||
8b712842 CM |
715 | out: |
716 | return 0; | |
717 | } |