Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8b712842 CM |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. | |
08a9ff32 | 4 | * Copyright (C) 2014 Fujitsu. All rights reserved. |
8b712842 CM |
5 | */ |
6 | ||
7 | #include <linux/kthread.h> | |
5a0e3ad6 | 8 | #include <linux/slab.h> |
8b712842 CM |
9 | #include <linux/list.h> |
10 | #include <linux/spinlock.h> | |
b51912c9 | 11 | #include <linux/freezer.h> |
8b712842 | 12 | #include "async-thread.h" |
52483bc2 | 13 | #include "ctree.h" |
8b712842 | 14 | |
f64ce7b8 DS |
15 | enum { |
16 | WORK_DONE_BIT, | |
17 | WORK_ORDER_DONE_BIT, | |
18 | WORK_HIGH_PRIO_BIT, | |
19 | }; | |
4a69a410 | 20 | |
0bd9289c QW |
21 | #define NO_THRESHOLD (-1) |
22 | #define DFT_THRESHOLD (32) | |
23 | ||
d458b054 | 24 | struct __btrfs_workqueue { |
08a9ff32 | 25 | struct workqueue_struct *normal_wq; |
cb001095 JM |
26 | |
27 | /* File system this workqueue services */ | |
28 | struct btrfs_fs_info *fs_info; | |
29 | ||
08a9ff32 QW |
30 | /* List head pointing to ordered work list */ |
31 | struct list_head ordered_list; | |
32 | ||
33 | /* Spinlock for ordered_list */ | |
34 | spinlock_t list_lock; | |
0bd9289c QW |
35 | |
36 | /* Thresholding related variants */ | |
37 | atomic_t pending; | |
c6dd6ea5 QW |
38 | |
39 | /* Up limit of concurrency workers */ | |
40 | int limit_active; | |
41 | ||
42 | /* Current number of concurrency workers */ | |
43 | int current_active; | |
44 | ||
45 | /* Threshold to change current_active */ | |
0bd9289c QW |
46 | int thresh; |
47 | unsigned int count; | |
48 | spinlock_t thres_lock; | |
08a9ff32 QW |
49 | }; |
50 | ||
d458b054 QW |
51 | struct btrfs_workqueue { |
52 | struct __btrfs_workqueue *normal; | |
53 | struct __btrfs_workqueue *high; | |
1ca08976 QW |
54 | }; |
55 | ||
e1f60a65 | 56 | struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) |
cb001095 JM |
57 | { |
58 | return wq->fs_info; | |
59 | } | |
60 | ||
e1f60a65 | 61 | struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) |
cb001095 JM |
62 | { |
63 | return work->wq->fs_info; | |
64 | } | |
65 | ||
9a35b637 | 66 | bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) |
2939e1a8 MP |
67 | { |
68 | /* | |
69 | * We could compare wq->normal->pending with num_online_cpus() | |
70 | * to support "thresh == NO_THRESHOLD" case, but it requires | |
71 | * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's | |
72 | * postpone it until someone needs the support of that case. | |
73 | */ | |
74 | if (wq->normal->thresh == NO_THRESHOLD) | |
75 | return false; | |
76 | ||
77 | return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; | |
78 | } | |
79 | ||
9e0af237 | 80 | static struct __btrfs_workqueue * |
cb001095 JM |
81 | __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, |
82 | unsigned int flags, int limit_active, int thresh) | |
1ca08976 | 83 | { |
61dd5ae6 | 84 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
1ca08976 | 85 | |
5d99a998 | 86 | if (!ret) |
1ca08976 QW |
87 | return NULL; |
88 | ||
cb001095 | 89 | ret->fs_info = fs_info; |
c6dd6ea5 | 90 | ret->limit_active = limit_active; |
0bd9289c QW |
91 | atomic_set(&ret->pending, 0); |
92 | if (thresh == 0) | |
93 | thresh = DFT_THRESHOLD; | |
94 | /* For low threshold, disabling threshold is a better choice */ | |
95 | if (thresh < DFT_THRESHOLD) { | |
c6dd6ea5 | 96 | ret->current_active = limit_active; |
0bd9289c QW |
97 | ret->thresh = NO_THRESHOLD; |
98 | } else { | |
c6dd6ea5 QW |
99 | /* |
100 | * For threshold-able wq, let its concurrency grow on demand. | |
101 | * Use minimal max_active at alloc time to reduce resource | |
102 | * usage. | |
103 | */ | |
104 | ret->current_active = 1; | |
0bd9289c QW |
105 | ret->thresh = thresh; |
106 | } | |
107 | ||
1ca08976 | 108 | if (flags & WQ_HIGHPRI) |
ce3ded10 DS |
109 | ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, |
110 | ret->current_active, name); | |
1ca08976 | 111 | else |
ce3ded10 DS |
112 | ret->normal_wq = alloc_workqueue("btrfs-%s", flags, |
113 | ret->current_active, name); | |
5d99a998 | 114 | if (!ret->normal_wq) { |
1ca08976 QW |
115 | kfree(ret); |
116 | return NULL; | |
117 | } | |
118 | ||
119 | INIT_LIST_HEAD(&ret->ordered_list); | |
120 | spin_lock_init(&ret->list_lock); | |
0bd9289c | 121 | spin_lock_init(&ret->thres_lock); |
c3a46891 | 122 | trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); |
1ca08976 QW |
123 | return ret; |
124 | } | |
125 | ||
126 | static inline void | |
d458b054 | 127 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); |
1ca08976 | 128 | |
cb001095 JM |
129 | struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, |
130 | const char *name, | |
6f011058 | 131 | unsigned int flags, |
c6dd6ea5 | 132 | int limit_active, |
d458b054 | 133 | int thresh) |
08a9ff32 | 134 | { |
61dd5ae6 | 135 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
08a9ff32 | 136 | |
5d99a998 | 137 | if (!ret) |
08a9ff32 QW |
138 | return NULL; |
139 | ||
cb001095 JM |
140 | ret->normal = __btrfs_alloc_workqueue(fs_info, name, |
141 | flags & ~WQ_HIGHPRI, | |
c6dd6ea5 | 142 | limit_active, thresh); |
5d99a998 | 143 | if (!ret->normal) { |
08a9ff32 QW |
144 | kfree(ret); |
145 | return NULL; | |
146 | } | |
147 | ||
1ca08976 | 148 | if (flags & WQ_HIGHPRI) { |
cb001095 JM |
149 | ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, |
150 | limit_active, thresh); | |
5d99a998 | 151 | if (!ret->high) { |
1ca08976 QW |
152 | __btrfs_destroy_workqueue(ret->normal); |
153 | kfree(ret); | |
154 | return NULL; | |
155 | } | |
156 | } | |
08a9ff32 QW |
157 | return ret; |
158 | } | |
159 | ||
0bd9289c QW |
160 | /* |
161 | * Hook for threshold which will be called in btrfs_queue_work. | |
162 | * This hook WILL be called in IRQ handler context, | |
163 | * so workqueue_set_max_active MUST NOT be called in this hook | |
164 | */ | |
d458b054 | 165 | static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) |
0bd9289c QW |
166 | { |
167 | if (wq->thresh == NO_THRESHOLD) | |
168 | return; | |
169 | atomic_inc(&wq->pending); | |
170 | } | |
171 | ||
172 | /* | |
173 | * Hook for threshold which will be called before executing the work, | |
174 | * This hook is called in kthread content. | |
175 | * So workqueue_set_max_active is called here. | |
176 | */ | |
d458b054 | 177 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) |
0bd9289c | 178 | { |
c6dd6ea5 | 179 | int new_current_active; |
0bd9289c QW |
180 | long pending; |
181 | int need_change = 0; | |
182 | ||
183 | if (wq->thresh == NO_THRESHOLD) | |
184 | return; | |
185 | ||
186 | atomic_dec(&wq->pending); | |
187 | spin_lock(&wq->thres_lock); | |
188 | /* | |
189 | * Use wq->count to limit the calling frequency of | |
190 | * workqueue_set_max_active. | |
191 | */ | |
192 | wq->count++; | |
193 | wq->count %= (wq->thresh / 4); | |
194 | if (!wq->count) | |
195 | goto out; | |
c6dd6ea5 | 196 | new_current_active = wq->current_active; |
0bd9289c QW |
197 | |
198 | /* | |
199 | * pending may be changed later, but it's OK since we really | |
200 | * don't need it so accurate to calculate new_max_active. | |
201 | */ | |
202 | pending = atomic_read(&wq->pending); | |
203 | if (pending > wq->thresh) | |
c6dd6ea5 | 204 | new_current_active++; |
0bd9289c | 205 | if (pending < wq->thresh / 2) |
c6dd6ea5 QW |
206 | new_current_active--; |
207 | new_current_active = clamp_val(new_current_active, 1, wq->limit_active); | |
208 | if (new_current_active != wq->current_active) { | |
0bd9289c | 209 | need_change = 1; |
c6dd6ea5 | 210 | wq->current_active = new_current_active; |
0bd9289c QW |
211 | } |
212 | out: | |
213 | spin_unlock(&wq->thres_lock); | |
214 | ||
215 | if (need_change) { | |
c6dd6ea5 | 216 | workqueue_set_max_active(wq->normal_wq, wq->current_active); |
0bd9289c QW |
217 | } |
218 | } | |
219 | ||
c495dcd6 OS |
220 | static void run_ordered_work(struct __btrfs_workqueue *wq, |
221 | struct btrfs_work *self) | |
08a9ff32 QW |
222 | { |
223 | struct list_head *list = &wq->ordered_list; | |
d458b054 | 224 | struct btrfs_work *work; |
08a9ff32 QW |
225 | spinlock_t *lock = &wq->list_lock; |
226 | unsigned long flags; | |
c495dcd6 | 227 | bool free_self = false; |
08a9ff32 QW |
228 | |
229 | while (1) { | |
230 | spin_lock_irqsave(lock, flags); | |
231 | if (list_empty(list)) | |
232 | break; | |
d458b054 | 233 | work = list_entry(list->next, struct btrfs_work, |
08a9ff32 QW |
234 | ordered_list); |
235 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | |
236 | break; | |
237 | ||
238 | /* | |
239 | * we are going to call the ordered done function, but | |
240 | * we leave the work item on the list as a barrier so | |
241 | * that later work items that are done don't have their | |
242 | * functions called before this one returns | |
243 | */ | |
244 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) | |
245 | break; | |
52483bc2 | 246 | trace_btrfs_ordered_sched(work); |
08a9ff32 QW |
247 | spin_unlock_irqrestore(lock, flags); |
248 | work->ordered_func(work); | |
249 | ||
250 | /* now take the lock again and drop our item from the list */ | |
251 | spin_lock_irqsave(lock, flags); | |
252 | list_del(&work->ordered_list); | |
253 | spin_unlock_irqrestore(lock, flags); | |
254 | ||
c495dcd6 OS |
255 | if (work == self) { |
256 | /* | |
257 | * This is the work item that the worker is currently | |
258 | * executing. | |
259 | * | |
260 | * The kernel workqueue code guarantees non-reentrancy | |
261 | * of work items. I.e., if a work item with the same | |
262 | * address and work function is queued twice, the second | |
263 | * execution is blocked until the first one finishes. A | |
264 | * work item may be freed and recycled with the same | |
265 | * work function; the workqueue code assumes that the | |
266 | * original work item cannot depend on the recycled work | |
267 | * item in that case (see find_worker_executing_work()). | |
268 | * | |
a0cac0ec OS |
269 | * Note that different types of Btrfs work can depend on |
270 | * each other, and one type of work on one Btrfs | |
271 | * filesystem may even depend on the same type of work | |
272 | * on another Btrfs filesystem via, e.g., a loop device. | |
273 | * Therefore, we must not allow the current work item to | |
274 | * be recycled until we are really done, otherwise we | |
275 | * break the above assumption and can deadlock. | |
c495dcd6 OS |
276 | */ |
277 | free_self = true; | |
278 | } else { | |
279 | /* | |
280 | * We don't want to call the ordered free functions with | |
c9eb55db | 281 | * the lock held. |
c495dcd6 | 282 | */ |
c495dcd6 | 283 | work->ordered_free(work); |
c9eb55db OS |
284 | /* NB: work must not be dereferenced past this point. */ |
285 | trace_btrfs_all_work_done(wq->fs_info, work); | |
c495dcd6 | 286 | } |
08a9ff32 QW |
287 | } |
288 | spin_unlock_irqrestore(lock, flags); | |
c495dcd6 OS |
289 | |
290 | if (free_self) { | |
c495dcd6 | 291 | self->ordered_free(self); |
c9eb55db OS |
292 | /* NB: self must not be dereferenced past this point. */ |
293 | trace_btrfs_all_work_done(wq->fs_info, self); | |
c495dcd6 | 294 | } |
08a9ff32 QW |
295 | } |
296 | ||
a0cac0ec | 297 | static void btrfs_work_helper(struct work_struct *normal_work) |
08a9ff32 | 298 | { |
a0cac0ec OS |
299 | struct btrfs_work *work = container_of(normal_work, struct btrfs_work, |
300 | normal_work); | |
d458b054 | 301 | struct __btrfs_workqueue *wq; |
08a9ff32 QW |
302 | int need_order = 0; |
303 | ||
08a9ff32 QW |
304 | /* |
305 | * We should not touch things inside work in the following cases: | |
306 | * 1) after work->func() if it has no ordered_free | |
307 | * Since the struct is freed in work->func(). | |
308 | * 2) after setting WORK_DONE_BIT | |
309 | * The work may be freed in other threads almost instantly. | |
310 | * So we save the needed things here. | |
311 | */ | |
312 | if (work->ordered_func) | |
313 | need_order = 1; | |
314 | wq = work->wq; | |
315 | ||
52483bc2 | 316 | trace_btrfs_work_sched(work); |
0bd9289c | 317 | thresh_exec_hook(wq); |
08a9ff32 QW |
318 | work->func(work); |
319 | if (need_order) { | |
320 | set_bit(WORK_DONE_BIT, &work->flags); | |
c495dcd6 | 321 | run_ordered_work(wq, work); |
c9eb55db OS |
322 | } else { |
323 | /* NB: work must not be dereferenced past this point. */ | |
324 | trace_btrfs_all_work_done(wq->fs_info, work); | |
08a9ff32 QW |
325 | } |
326 | } | |
327 | ||
a0cac0ec OS |
328 | void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, |
329 | btrfs_func_t ordered_func, btrfs_func_t ordered_free) | |
08a9ff32 QW |
330 | { |
331 | work->func = func; | |
332 | work->ordered_func = ordered_func; | |
333 | work->ordered_free = ordered_free; | |
a0cac0ec | 334 | INIT_WORK(&work->normal_work, btrfs_work_helper); |
08a9ff32 QW |
335 | INIT_LIST_HEAD(&work->ordered_list); |
336 | work->flags = 0; | |
337 | } | |
338 | ||
d458b054 QW |
339 | static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, |
340 | struct btrfs_work *work) | |
08a9ff32 QW |
341 | { |
342 | unsigned long flags; | |
343 | ||
344 | work->wq = wq; | |
0bd9289c | 345 | thresh_queue_hook(wq); |
08a9ff32 QW |
346 | if (work->ordered_func) { |
347 | spin_lock_irqsave(&wq->list_lock, flags); | |
348 | list_add_tail(&work->ordered_list, &wq->ordered_list); | |
349 | spin_unlock_irqrestore(&wq->list_lock, flags); | |
350 | } | |
52483bc2 | 351 | trace_btrfs_work_queued(work); |
0a95b851 | 352 | queue_work(wq->normal_wq, &work->normal_work); |
08a9ff32 QW |
353 | } |
354 | ||
d458b054 QW |
355 | void btrfs_queue_work(struct btrfs_workqueue *wq, |
356 | struct btrfs_work *work) | |
1ca08976 | 357 | { |
d458b054 | 358 | struct __btrfs_workqueue *dest_wq; |
1ca08976 QW |
359 | |
360 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) | |
361 | dest_wq = wq->high; | |
362 | else | |
363 | dest_wq = wq->normal; | |
364 | __btrfs_queue_work(dest_wq, work); | |
365 | } | |
366 | ||
367 | static inline void | |
d458b054 | 368 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) |
08a9ff32 QW |
369 | { |
370 | destroy_workqueue(wq->normal_wq); | |
c3a46891 | 371 | trace_btrfs_workqueue_destroy(wq); |
08a9ff32 QW |
372 | kfree(wq); |
373 | } | |
374 | ||
d458b054 | 375 | void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) |
1ca08976 QW |
376 | { |
377 | if (!wq) | |
378 | return; | |
379 | if (wq->high) | |
380 | __btrfs_destroy_workqueue(wq->high); | |
381 | __btrfs_destroy_workqueue(wq->normal); | |
ef66af10 | 382 | kfree(wq); |
1ca08976 QW |
383 | } |
384 | ||
c6dd6ea5 | 385 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) |
08a9ff32 | 386 | { |
800ee224 ST |
387 | if (!wq) |
388 | return; | |
c6dd6ea5 | 389 | wq->normal->limit_active = limit_active; |
1ca08976 | 390 | if (wq->high) |
c6dd6ea5 | 391 | wq->high->limit_active = limit_active; |
1ca08976 QW |
392 | } |
393 | ||
d458b054 | 394 | void btrfs_set_work_high_priority(struct btrfs_work *work) |
1ca08976 QW |
395 | { |
396 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | |
08a9ff32 | 397 | } |
f0cc2cd7 FM |
398 | |
399 | void btrfs_flush_workqueue(struct btrfs_workqueue *wq) | |
400 | { | |
401 | if (wq->high) | |
402 | flush_workqueue(wq->high->normal_wq); | |
403 | ||
404 | flush_workqueue(wq->normal->normal_wq); | |
405 | } |