2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/version.h>
20 #include <linux/kthread.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
25 # include <linux/freezer.h>
27 # include <linux/sched.h>
30 #include "async-thread.h"
33 * container for the kthread task pointer and the list of pending work
34 * One of these is allocated per thread.
36 struct btrfs_worker_thread {
37 /* pool we belong to */
38 struct btrfs_workers *workers;
40 /* list of struct btrfs_work that are waiting for service */
41 struct list_head pending;
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list;
47 struct task_struct *task;
49 /* number of things on the pending list */
51 unsigned long sequence;
53 /* protects the pending list. */
56 /* set to non-zero when this thread is already awake and kicking */
59 /* are we currently idle */
64 * helper function to move a thread onto the idle list after it
65 * has finished some requests.
67 static void check_idle_worker(struct btrfs_worker_thread *worker)
69 if (!worker->idle && atomic_read(&worker->num_pending) <
70 worker->workers->idle_thresh / 2) {
72 spin_lock_irqsave(&worker->workers->lock, flags);
74 list_move(&worker->worker_list, &worker->workers->idle_list);
75 spin_unlock_irqrestore(&worker->workers->lock, flags);
80 * helper function to move a thread off the idle list after new
81 * pending work is added.
83 static void check_busy_worker(struct btrfs_worker_thread *worker)
85 if (worker->idle && atomic_read(&worker->num_pending) >=
86 worker->workers->idle_thresh) {
88 spin_lock_irqsave(&worker->workers->lock, flags);
90 list_move_tail(&worker->worker_list,
91 &worker->workers->worker_list);
92 spin_unlock_irqrestore(&worker->workers->lock, flags);
97 * main loop for servicing work items
99 static int worker_loop(void *arg)
101 struct btrfs_worker_thread *worker = arg;
102 struct list_head *cur;
103 struct btrfs_work *work;
105 spin_lock_irq(&worker->lock);
106 while(!list_empty(&worker->pending)) {
107 cur = worker->pending.next;
108 work = list_entry(cur, struct btrfs_work, list);
109 list_del(&work->list);
110 clear_bit(0, &work->flags);
112 work->worker = worker;
113 spin_unlock_irq(&worker->lock);
117 atomic_dec(&worker->num_pending);
118 spin_lock_irq(&worker->lock);
119 check_idle_worker(worker);
122 if (freezing(current)) {
125 set_current_state(TASK_INTERRUPTIBLE);
126 spin_unlock_irq(&worker->lock);
128 __set_current_state(TASK_RUNNING);
130 } while (!kthread_should_stop());
135 * this will wait for all the worker threads to shutdown
137 int btrfs_stop_workers(struct btrfs_workers *workers)
139 struct list_head *cur;
140 struct btrfs_worker_thread *worker;
142 list_splice_init(&workers->idle_list, &workers->worker_list);
143 while(!list_empty(&workers->worker_list)) {
144 cur = workers->worker_list.next;
145 worker = list_entry(cur, struct btrfs_worker_thread,
147 kthread_stop(worker->task);
148 list_del(&worker->worker_list);
155 * simple init on struct btrfs_workers
157 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
159 workers->num_workers = 0;
160 INIT_LIST_HEAD(&workers->worker_list);
161 INIT_LIST_HEAD(&workers->idle_list);
162 spin_lock_init(&workers->lock);
163 workers->max_workers = max;
164 workers->idle_thresh = 32;
165 workers->name = name;
169 * starts new worker threads. This does not enforce the max worker
170 * count in case you need to temporarily go past it.
172 int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
174 struct btrfs_worker_thread *worker;
178 for (i = 0; i < num_workers; i++) {
179 worker = kzalloc(sizeof(*worker), GFP_NOFS);
185 INIT_LIST_HEAD(&worker->pending);
186 INIT_LIST_HEAD(&worker->worker_list);
187 spin_lock_init(&worker->lock);
188 atomic_set(&worker->num_pending, 0);
189 worker->task = kthread_run(worker_loop, worker,
190 "btrfs-%s-%d", workers->name,
191 workers->num_workers + i);
192 worker->workers = workers;
193 if (IS_ERR(worker->task)) {
195 ret = PTR_ERR(worker->task);
199 spin_lock_irq(&workers->lock);
200 list_add_tail(&worker->worker_list, &workers->idle_list);
202 workers->num_workers++;
203 spin_unlock_irq(&workers->lock);
207 btrfs_stop_workers(workers);
212 * run through the list and find a worker thread that doesn't have a lot
213 * to do right now. This can return null if we aren't yet at the thread
214 * count limit and all of the threads are busy.
216 static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
218 struct btrfs_worker_thread *worker;
219 struct list_head *next;
220 int enforce_min = workers->num_workers < workers->max_workers;
223 * if we find an idle thread, don't move it to the end of the
224 * idle list. This improves the chance that the next submission
225 * will reuse the same thread, and maybe catch it while it is still
228 if (!list_empty(&workers->idle_list)) {
229 next = workers->idle_list.next;
230 worker = list_entry(next, struct btrfs_worker_thread,
234 if (enforce_min || list_empty(&workers->worker_list))
238 * if we pick a busy task, move the task to the end of the list.
239 * hopefully this will keep things somewhat evenly balanced
241 next = workers->worker_list.next;
242 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
243 atomic_inc(&worker->num_pending);
245 if (worker->sequence % 4 == 0)
246 list_move_tail(next, &workers->worker_list);
250 static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
252 struct btrfs_worker_thread *worker;
256 spin_lock_irqsave(&workers->lock, flags);
257 worker = next_worker(workers);
258 spin_unlock_irqrestore(&workers->lock, flags);
261 spin_lock_irqsave(&workers->lock, flags);
262 if (workers->num_workers >= workers->max_workers) {
263 struct list_head *fallback = NULL;
265 * we have failed to find any workers, just
266 * return the force one
268 if (!list_empty(&workers->worker_list))
269 fallback = workers->worker_list.next;
270 if (!list_empty(&workers->idle_list))
271 fallback = workers->idle_list.next;
273 worker = list_entry(fallback,
274 struct btrfs_worker_thread, worker_list);
275 spin_unlock_irqrestore(&workers->lock, flags);
277 spin_unlock_irqrestore(&workers->lock, flags);
278 /* we're below the limit, start another worker */
279 btrfs_start_workers(workers, 1);
287 * btrfs_requeue_work just puts the work item back on the tail of the list
288 * it was taken from. It is intended for use with long running work functions
289 * that make some progress and want to give the cpu up for others.
291 int btrfs_requeue_work(struct btrfs_work *work)
293 struct btrfs_worker_thread *worker = work->worker;
296 if (test_and_set_bit(0, &work->flags))
299 spin_lock_irqsave(&worker->lock, flags);
300 atomic_inc(&worker->num_pending);
301 list_add_tail(&work->list, &worker->pending);
302 check_busy_worker(worker);
303 spin_unlock_irqrestore(&worker->lock, flags);
309 * places a struct btrfs_work into the pending queue of one of the kthreads
311 int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
313 struct btrfs_worker_thread *worker;
317 /* don't requeue something already on a list */
318 if (test_and_set_bit(0, &work->flags))
321 worker = find_worker(workers);
323 spin_lock_irqsave(&worker->lock, flags);
324 atomic_inc(&worker->num_pending);
325 check_busy_worker(worker);
326 list_add_tail(&work->list, &worker->pending);
329 * avoid calling into wake_up_process if this thread has already
332 if (!worker->working)
336 spin_unlock_irqrestore(&worker->lock, flags);
339 wake_up_process(worker->task);