Commit | Line | Data |
---|---|---|
8b712842 CM |
1 | /* |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
d05e5a4d | 19 | #include <linux/version.h> |
8b712842 CM |
20 | #include <linux/kthread.h> |
21 | #include <linux/list.h> | |
22 | #include <linux/spinlock.h> | |
d05e5a4d CM |
23 | |
24 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) | |
25 | # include <linux/freezer.h> | |
26 | #else | |
27 | # include <linux/sched.h> | |
28 | #endif | |
29 | ||
8b712842 CM |
30 | #include "async-thread.h" |
31 | ||
32 | /* | |
33 | * container for the kthread task pointer and the list of pending work | |
34 | * One of these is allocated per thread. | |
35 | */ | |
36 | struct btrfs_worker_thread { | |
35d8ba66 CM |
37 | /* pool we belong to */ |
38 | struct btrfs_workers *workers; | |
39 | ||
8b712842 CM |
40 | /* list of struct btrfs_work that are waiting for service */ |
41 | struct list_head pending; | |
42 | ||
43 | /* list of worker threads from struct btrfs_workers */ | |
44 | struct list_head worker_list; | |
45 | ||
46 | /* kthread */ | |
47 | struct task_struct *task; | |
48 | ||
49 | /* number of things on the pending list */ | |
50 | atomic_t num_pending; | |
51 | ||
52 | /* protects the pending list. */ | |
53 | spinlock_t lock; | |
54 | ||
55 | /* set to non-zero when this thread is already awake and kicking */ | |
56 | int working; | |
35d8ba66 CM |
57 | |
58 | /* are we currently idle */ | |
59 | int idle; | |
8b712842 CM |
60 | }; |
61 | ||
35d8ba66 CM |
62 | /* |
63 | * helper function to move a thread onto the idle list after it | |
64 | * has finished some requests. | |
65 | */ | |
66 | static void check_idle_worker(struct btrfs_worker_thread *worker) | |
67 | { | |
68 | if (!worker->idle && atomic_read(&worker->num_pending) < | |
69 | worker->workers->idle_thresh / 2) { | |
70 | unsigned long flags; | |
71 | spin_lock_irqsave(&worker->workers->lock, flags); | |
72 | worker->idle = 1; | |
73 | list_move(&worker->worker_list, &worker->workers->idle_list); | |
74 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
75 | } | |
76 | } | |
77 | ||
78 | /* | |
79 | * helper function to move a thread off the idle list after new | |
80 | * pending work is added. | |
81 | */ | |
82 | static void check_busy_worker(struct btrfs_worker_thread *worker) | |
83 | { | |
84 | if (worker->idle && atomic_read(&worker->num_pending) >= | |
85 | worker->workers->idle_thresh) { | |
86 | unsigned long flags; | |
87 | spin_lock_irqsave(&worker->workers->lock, flags); | |
88 | worker->idle = 0; | |
89 | list_move_tail(&worker->worker_list, | |
90 | &worker->workers->worker_list); | |
91 | spin_unlock_irqrestore(&worker->workers->lock, flags); | |
92 | } | |
93 | } | |
94 | ||
8b712842 CM |
95 | /* |
96 | * main loop for servicing work items | |
97 | */ | |
98 | static int worker_loop(void *arg) | |
99 | { | |
100 | struct btrfs_worker_thread *worker = arg; | |
101 | struct list_head *cur; | |
102 | struct btrfs_work *work; | |
103 | do { | |
104 | spin_lock_irq(&worker->lock); | |
105 | while(!list_empty(&worker->pending)) { | |
106 | cur = worker->pending.next; | |
107 | work = list_entry(cur, struct btrfs_work, list); | |
108 | list_del(&work->list); | |
109 | clear_bit(0, &work->flags); | |
110 | ||
111 | work->worker = worker; | |
112 | spin_unlock_irq(&worker->lock); | |
113 | ||
114 | work->func(work); | |
115 | ||
116 | atomic_dec(&worker->num_pending); | |
117 | spin_lock_irq(&worker->lock); | |
35d8ba66 | 118 | check_idle_worker(worker); |
8b712842 CM |
119 | } |
120 | worker->working = 0; | |
121 | if (freezing(current)) { | |
122 | refrigerator(); | |
123 | } else { | |
124 | set_current_state(TASK_INTERRUPTIBLE); | |
125 | spin_unlock_irq(&worker->lock); | |
126 | schedule(); | |
127 | __set_current_state(TASK_RUNNING); | |
128 | } | |
129 | } while (!kthread_should_stop()); | |
130 | return 0; | |
131 | } | |
132 | ||
133 | /* | |
134 | * this will wait for all the worker threads to shutdown | |
135 | */ | |
136 | int btrfs_stop_workers(struct btrfs_workers *workers) | |
137 | { | |
138 | struct list_head *cur; | |
139 | struct btrfs_worker_thread *worker; | |
140 | ||
35d8ba66 | 141 | list_splice_init(&workers->idle_list, &workers->worker_list); |
8b712842 CM |
142 | while(!list_empty(&workers->worker_list)) { |
143 | cur = workers->worker_list.next; | |
144 | worker = list_entry(cur, struct btrfs_worker_thread, | |
145 | worker_list); | |
146 | kthread_stop(worker->task); | |
147 | list_del(&worker->worker_list); | |
148 | kfree(worker); | |
149 | } | |
150 | return 0; | |
151 | } | |
152 | ||
153 | /* | |
154 | * simple init on struct btrfs_workers | |
155 | */ | |
156 | void btrfs_init_workers(struct btrfs_workers *workers, int max) | |
157 | { | |
158 | workers->num_workers = 0; | |
159 | INIT_LIST_HEAD(&workers->worker_list); | |
35d8ba66 | 160 | INIT_LIST_HEAD(&workers->idle_list); |
8b712842 CM |
161 | spin_lock_init(&workers->lock); |
162 | workers->max_workers = max; | |
35d8ba66 | 163 | workers->idle_thresh = 64; |
8b712842 CM |
164 | } |
165 | ||
166 | /* | |
167 | * starts new worker threads. This does not enforce the max worker | |
168 | * count in case you need to temporarily go past it. | |
169 | */ | |
170 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |
171 | { | |
172 | struct btrfs_worker_thread *worker; | |
173 | int ret = 0; | |
174 | int i; | |
175 | ||
176 | for (i = 0; i < num_workers; i++) { | |
177 | worker = kzalloc(sizeof(*worker), GFP_NOFS); | |
178 | if (!worker) { | |
179 | ret = -ENOMEM; | |
180 | goto fail; | |
181 | } | |
182 | ||
183 | INIT_LIST_HEAD(&worker->pending); | |
184 | INIT_LIST_HEAD(&worker->worker_list); | |
185 | spin_lock_init(&worker->lock); | |
186 | atomic_set(&worker->num_pending, 0); | |
187 | worker->task = kthread_run(worker_loop, worker, "btrfs"); | |
35d8ba66 | 188 | worker->workers = workers; |
8b712842 CM |
189 | if (IS_ERR(worker->task)) { |
190 | ret = PTR_ERR(worker->task); | |
191 | goto fail; | |
192 | } | |
193 | ||
194 | spin_lock_irq(&workers->lock); | |
35d8ba66 | 195 | list_add_tail(&worker->worker_list, &workers->idle_list); |
8b712842 CM |
196 | workers->num_workers++; |
197 | spin_unlock_irq(&workers->lock); | |
198 | } | |
199 | return 0; | |
200 | fail: | |
201 | btrfs_stop_workers(workers); | |
202 | return ret; | |
203 | } | |
204 | ||
205 | /* | |
206 | * run through the list and find a worker thread that doesn't have a lot | |
207 | * to do right now. This can return null if we aren't yet at the thread | |
208 | * count limit and all of the threads are busy. | |
209 | */ | |
210 | static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) | |
211 | { | |
212 | struct btrfs_worker_thread *worker; | |
213 | struct list_head *next; | |
8b712842 CM |
214 | int enforce_min = workers->num_workers < workers->max_workers; |
215 | ||
8b712842 | 216 | /* |
35d8ba66 CM |
217 | * if we find an idle thread, don't move it to the end of the |
218 | * idle list. This improves the chance that the next submission | |
219 | * will reuse the same thread, and maybe catch it while it is still | |
220 | * working | |
8b712842 | 221 | */ |
35d8ba66 CM |
222 | if (!list_empty(&workers->idle_list)) { |
223 | next = workers->idle_list.next; | |
8b712842 CM |
224 | worker = list_entry(next, struct btrfs_worker_thread, |
225 | worker_list); | |
35d8ba66 | 226 | return worker; |
8b712842 | 227 | } |
35d8ba66 CM |
228 | if (enforce_min || list_empty(&workers->worker_list)) |
229 | return NULL; | |
230 | ||
8b712842 | 231 | /* |
35d8ba66 CM |
232 | * if we pick a busy task, move the task to the end of the list. |
233 | * hopefully this will keep things somewhat evenly balanced | |
8b712842 | 234 | */ |
35d8ba66 CM |
235 | next = workers->worker_list.next; |
236 | worker = list_entry(next, struct btrfs_worker_thread, worker_list); | |
237 | list_move_tail(next, &workers->worker_list); | |
8b712842 CM |
238 | return worker; |
239 | } | |
240 | ||
241 | static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) | |
242 | { | |
243 | struct btrfs_worker_thread *worker; | |
244 | unsigned long flags; | |
245 | ||
246 | again: | |
247 | spin_lock_irqsave(&workers->lock, flags); | |
248 | worker = next_worker(workers); | |
249 | spin_unlock_irqrestore(&workers->lock, flags); | |
250 | ||
251 | if (!worker) { | |
252 | spin_lock_irqsave(&workers->lock, flags); | |
253 | if (workers->num_workers >= workers->max_workers) { | |
35d8ba66 | 254 | struct list_head *fallback = NULL; |
8b712842 CM |
255 | /* |
256 | * we have failed to find any workers, just | |
257 | * return the force one | |
258 | */ | |
35d8ba66 CM |
259 | if (!list_empty(&workers->worker_list)) |
260 | fallback = workers->worker_list.next; | |
261 | if (!list_empty(&workers->idle_list)) | |
262 | fallback = workers->idle_list.next; | |
263 | BUG_ON(!fallback); | |
264 | worker = list_entry(fallback, | |
8b712842 CM |
265 | struct btrfs_worker_thread, worker_list); |
266 | spin_unlock_irqrestore(&workers->lock, flags); | |
267 | } else { | |
268 | spin_unlock_irqrestore(&workers->lock, flags); | |
269 | /* we're below the limit, start another worker */ | |
270 | btrfs_start_workers(workers, 1); | |
271 | goto again; | |
272 | } | |
273 | } | |
274 | return worker; | |
275 | } | |
276 | ||
277 | /* | |
278 | * btrfs_requeue_work just puts the work item back on the tail of the list | |
279 | * it was taken from. It is intended for use with long running work functions | |
280 | * that make some progress and want to give the cpu up for others. | |
281 | */ | |
282 | int btrfs_requeue_work(struct btrfs_work *work) | |
283 | { | |
284 | struct btrfs_worker_thread *worker = work->worker; | |
285 | unsigned long flags; | |
286 | ||
287 | if (test_and_set_bit(0, &work->flags)) | |
288 | goto out; | |
289 | ||
290 | spin_lock_irqsave(&worker->lock, flags); | |
291 | atomic_inc(&worker->num_pending); | |
292 | list_add_tail(&work->list, &worker->pending); | |
35d8ba66 | 293 | check_busy_worker(worker); |
8b712842 CM |
294 | spin_unlock_irqrestore(&worker->lock, flags); |
295 | out: | |
296 | return 0; | |
297 | } | |
298 | ||
299 | /* | |
300 | * places a struct btrfs_work into the pending queue of one of the kthreads | |
301 | */ | |
302 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |
303 | { | |
304 | struct btrfs_worker_thread *worker; | |
305 | unsigned long flags; | |
306 | int wake = 0; | |
307 | ||
308 | /* don't requeue something already on a list */ | |
309 | if (test_and_set_bit(0, &work->flags)) | |
310 | goto out; | |
311 | ||
312 | worker = find_worker(workers); | |
313 | ||
314 | spin_lock_irqsave(&worker->lock, flags); | |
315 | atomic_inc(&worker->num_pending); | |
35d8ba66 | 316 | check_busy_worker(worker); |
8b712842 CM |
317 | list_add_tail(&work->list, &worker->pending); |
318 | ||
319 | /* | |
320 | * avoid calling into wake_up_process if this thread has already | |
321 | * been kicked | |
322 | */ | |
323 | if (!worker->working) | |
324 | wake = 1; | |
325 | worker->working = 1; | |
326 | ||
327 | spin_unlock_irqrestore(&worker->lock, flags); | |
328 | ||
329 | if (wake) | |
330 | wake_up_process(worker->task); | |
331 | out: | |
332 | return 0; | |
333 | } |