Btrfs: Wait for kernel threads to make progress during async submission
[linux-2.6-block.git] / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23
24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25 # include <linux/freezer.h>
26 #else
27 # include <linux/sched.h>
28 #endif
29
30 #include "async-thread.h"
31
32 /*
33  * container for the kthread task pointer and the list of pending work
34  * One of these is allocated per thread.
35  */
36 struct btrfs_worker_thread {
37         /* pool we belong to */
38         struct btrfs_workers *workers;
39
40         /* list of struct btrfs_work that are waiting for service */
41         struct list_head pending;
42
43         /* list of worker threads from struct btrfs_workers */
44         struct list_head worker_list;
45
46         /* kthread */
47         struct task_struct *task;
48
49         /* number of things on the pending list */
50         atomic_t num_pending;
51         unsigned long sequence;
52
53         /* protects the pending list. */
54         spinlock_t lock;
55
56         /* set to non-zero when this thread is already awake and kicking */
57         int working;
58
59         /* are we currently idle */
60         int idle;
61 };
62
63 /*
64  * helper function to move a thread onto the idle list after it
65  * has finished some requests.
66  */
67 static void check_idle_worker(struct btrfs_worker_thread *worker)
68 {
69         if (!worker->idle && atomic_read(&worker->num_pending) <
70             worker->workers->idle_thresh / 2) {
71                 unsigned long flags;
72                 spin_lock_irqsave(&worker->workers->lock, flags);
73                 worker->idle = 1;
74                 list_move(&worker->worker_list, &worker->workers->idle_list);
75                 spin_unlock_irqrestore(&worker->workers->lock, flags);
76         }
77 }
78
79 /*
80  * helper function to move a thread off the idle list after new
81  * pending work is added.
82  */
83 static void check_busy_worker(struct btrfs_worker_thread *worker)
84 {
85         if (worker->idle && atomic_read(&worker->num_pending) >=
86             worker->workers->idle_thresh) {
87                 unsigned long flags;
88                 spin_lock_irqsave(&worker->workers->lock, flags);
89                 worker->idle = 0;
90                 list_move_tail(&worker->worker_list,
91                                &worker->workers->worker_list);
92                 spin_unlock_irqrestore(&worker->workers->lock, flags);
93         }
94 }
95
96 /*
97  * main loop for servicing work items
98  */
99 static int worker_loop(void *arg)
100 {
101         struct btrfs_worker_thread *worker = arg;
102         struct list_head *cur;
103         struct btrfs_work *work;
104         do {
105                 spin_lock_irq(&worker->lock);
106                 while(!list_empty(&worker->pending)) {
107                         cur = worker->pending.next;
108                         work = list_entry(cur, struct btrfs_work, list);
109                         list_del(&work->list);
110                         clear_bit(0, &work->flags);
111
112                         work->worker = worker;
113                         spin_unlock_irq(&worker->lock);
114
115                         work->func(work);
116
117                         atomic_dec(&worker->num_pending);
118                         spin_lock_irq(&worker->lock);
119                         check_idle_worker(worker);
120                 }
121                 worker->working = 0;
122                 if (freezing(current)) {
123                         refrigerator();
124                 } else {
125                         set_current_state(TASK_INTERRUPTIBLE);
126                         spin_unlock_irq(&worker->lock);
127                         schedule();
128                         __set_current_state(TASK_RUNNING);
129                 }
130         } while (!kthread_should_stop());
131         return 0;
132 }
133
134 /*
135  * this will wait for all the worker threads to shutdown
136  */
137 int btrfs_stop_workers(struct btrfs_workers *workers)
138 {
139         struct list_head *cur;
140         struct btrfs_worker_thread *worker;
141
142         list_splice_init(&workers->idle_list, &workers->worker_list);
143         while(!list_empty(&workers->worker_list)) {
144                 cur = workers->worker_list.next;
145                 worker = list_entry(cur, struct btrfs_worker_thread,
146                                     worker_list);
147                 kthread_stop(worker->task);
148                 list_del(&worker->worker_list);
149                 kfree(worker);
150         }
151         return 0;
152 }
153
154 /*
155  * simple init on struct btrfs_workers
156  */
157 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
158 {
159         workers->num_workers = 0;
160         INIT_LIST_HEAD(&workers->worker_list);
161         INIT_LIST_HEAD(&workers->idle_list);
162         spin_lock_init(&workers->lock);
163         workers->max_workers = max;
164         workers->idle_thresh = 32;
165         workers->name = name;
166 }
167
168 /*
169  * starts new worker threads.  This does not enforce the max worker
170  * count in case you need to temporarily go past it.
171  */
172 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
173 {
174         struct btrfs_worker_thread *worker;
175         int ret = 0;
176         int i;
177
178         for (i = 0; i < num_workers; i++) {
179                 worker = kzalloc(sizeof(*worker), GFP_NOFS);
180                 if (!worker) {
181                         ret = -ENOMEM;
182                         goto fail;
183                 }
184
185                 INIT_LIST_HEAD(&worker->pending);
186                 INIT_LIST_HEAD(&worker->worker_list);
187                 spin_lock_init(&worker->lock);
188                 atomic_set(&worker->num_pending, 0);
189                 worker->task = kthread_run(worker_loop, worker,
190                                            "btrfs-%s-%d", workers->name,
191                                            workers->num_workers + i);
192                 worker->workers = workers;
193                 if (IS_ERR(worker->task)) {
194                         kfree(worker);
195                         ret = PTR_ERR(worker->task);
196                         goto fail;
197                 }
198
199                 spin_lock_irq(&workers->lock);
200                 list_add_tail(&worker->worker_list, &workers->idle_list);
201                 worker->idle = 1;
202                 workers->num_workers++;
203                 spin_unlock_irq(&workers->lock);
204         }
205         return 0;
206 fail:
207         btrfs_stop_workers(workers);
208         return ret;
209 }
210
211 /*
212  * run through the list and find a worker thread that doesn't have a lot
213  * to do right now.  This can return null if we aren't yet at the thread
214  * count limit and all of the threads are busy.
215  */
216 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
217 {
218         struct btrfs_worker_thread *worker;
219         struct list_head *next;
220         int enforce_min = workers->num_workers < workers->max_workers;
221
222         /*
223          * if we find an idle thread, don't move it to the end of the
224          * idle list.  This improves the chance that the next submission
225          * will reuse the same thread, and maybe catch it while it is still
226          * working
227          */
228         if (!list_empty(&workers->idle_list)) {
229                 next = workers->idle_list.next;
230                 worker = list_entry(next, struct btrfs_worker_thread,
231                                     worker_list);
232                 return worker;
233         }
234         if (enforce_min || list_empty(&workers->worker_list))
235                 return NULL;
236
237         /*
238          * if we pick a busy task, move the task to the end of the list.
239          * hopefully this will keep things somewhat evenly balanced
240          */
241         next = workers->worker_list.next;
242         worker = list_entry(next, struct btrfs_worker_thread, worker_list);
243         atomic_inc(&worker->num_pending);
244         worker->sequence++;
245         if (worker->sequence % 4 == 0)
246                 list_move_tail(next, &workers->worker_list);
247         return worker;
248 }
249
250 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
251 {
252         struct btrfs_worker_thread *worker;
253         unsigned long flags;
254
255 again:
256         spin_lock_irqsave(&workers->lock, flags);
257         worker = next_worker(workers);
258         spin_unlock_irqrestore(&workers->lock, flags);
259
260         if (!worker) {
261                 spin_lock_irqsave(&workers->lock, flags);
262                 if (workers->num_workers >= workers->max_workers) {
263                         struct list_head *fallback = NULL;
264                         /*
265                          * we have failed to find any workers, just
266                          * return the force one
267                          */
268                         if (!list_empty(&workers->worker_list))
269                                 fallback = workers->worker_list.next;
270                         if (!list_empty(&workers->idle_list))
271                                 fallback = workers->idle_list.next;
272                         BUG_ON(!fallback);
273                         worker = list_entry(fallback,
274                                   struct btrfs_worker_thread, worker_list);
275                         spin_unlock_irqrestore(&workers->lock, flags);
276                 } else {
277                         spin_unlock_irqrestore(&workers->lock, flags);
278                         /* we're below the limit, start another worker */
279                         btrfs_start_workers(workers, 1);
280                         goto again;
281                 }
282         }
283         return worker;
284 }
285
286 /*
287  * btrfs_requeue_work just puts the work item back on the tail of the list
288  * it was taken from.  It is intended for use with long running work functions
289  * that make some progress and want to give the cpu up for others.
290  */
291 int btrfs_requeue_work(struct btrfs_work *work)
292 {
293         struct btrfs_worker_thread *worker = work->worker;
294         unsigned long flags;
295
296         if (test_and_set_bit(0, &work->flags))
297                 goto out;
298
299         spin_lock_irqsave(&worker->lock, flags);
300         atomic_inc(&worker->num_pending);
301         list_add_tail(&work->list, &worker->pending);
302         check_busy_worker(worker);
303         spin_unlock_irqrestore(&worker->lock, flags);
304 out:
305         return 0;
306 }
307
308 /*
309  * places a struct btrfs_work into the pending queue of one of the kthreads
310  */
311 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
312 {
313         struct btrfs_worker_thread *worker;
314         unsigned long flags;
315         int wake = 0;
316
317         /* don't requeue something already on a list */
318         if (test_and_set_bit(0, &work->flags))
319                 goto out;
320
321         worker = find_worker(workers);
322
323         spin_lock_irqsave(&worker->lock, flags);
324         atomic_inc(&worker->num_pending);
325         check_busy_worker(worker);
326         list_add_tail(&work->list, &worker->pending);
327
328         /*
329          * avoid calling into wake_up_process if this thread has already
330          * been kicked
331          */
332         if (!worker->working)
333                 wake = 1;
334         worker->working = 1;
335
336         spin_unlock_irqrestore(&worker->lock, flags);
337
338         if (wake)
339                 wake_up_process(worker->task);
340 out:
341         return 0;
342 }