Btrfs: cache extent states in defrag code path
[linux-block.git] / fs / btrfs / async-thread.c
CommitLineData
8b712842
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
08a9ff32 3 * Copyright (C) 2014 Fujitsu. All rights reserved.
8b712842
CM
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/kthread.h>
5a0e3ad6 21#include <linux/slab.h>
8b712842
CM
22#include <linux/list.h>
23#include <linux/spinlock.h>
b51912c9 24#include <linux/freezer.h>
08a9ff32 25#include <linux/workqueue.h>
8b712842 26#include "async-thread.h"
52483bc2 27#include "ctree.h"
8b712842 28
a046e9c8
QW
29#define WORK_DONE_BIT 0
30#define WORK_ORDER_DONE_BIT 1
31#define WORK_HIGH_PRIO_BIT 2
4a69a410 32
0bd9289c
QW
33#define NO_THRESHOLD (-1)
34#define DFT_THRESHOLD (32)
35
d458b054 36struct __btrfs_workqueue {
08a9ff32
QW
37 struct workqueue_struct *normal_wq;
38 /* List head pointing to ordered work list */
39 struct list_head ordered_list;
40
41 /* Spinlock for ordered_list */
42 spinlock_t list_lock;
0bd9289c
QW
43
44 /* Thresholding related variants */
45 atomic_t pending;
46 int max_active;
47 int current_max;
48 int thresh;
49 unsigned int count;
50 spinlock_t thres_lock;
08a9ff32
QW
51};
52
d458b054
QW
53struct btrfs_workqueue {
54 struct __btrfs_workqueue *normal;
55 struct __btrfs_workqueue *high;
1ca08976
QW
56};
57
d458b054 58static inline struct __btrfs_workqueue
0bd9289c 59*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
1ca08976 60{
d458b054 61 struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
1ca08976
QW
62
63 if (unlikely(!ret))
64 return NULL;
65
0bd9289c
QW
66 ret->max_active = max_active;
67 atomic_set(&ret->pending, 0);
68 if (thresh == 0)
69 thresh = DFT_THRESHOLD;
70 /* For low threshold, disabling threshold is a better choice */
71 if (thresh < DFT_THRESHOLD) {
72 ret->current_max = max_active;
73 ret->thresh = NO_THRESHOLD;
74 } else {
75 ret->current_max = 1;
76 ret->thresh = thresh;
77 }
78
1ca08976
QW
79 if (flags & WQ_HIGHPRI)
80 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
0bd9289c
QW
81 ret->max_active,
82 "btrfs", name);
1ca08976
QW
83 else
84 ret->normal_wq = alloc_workqueue("%s-%s", flags,
0bd9289c
QW
85 ret->max_active, "btrfs",
86 name);
1ca08976
QW
87 if (unlikely(!ret->normal_wq)) {
88 kfree(ret);
89 return NULL;
90 }
91
92 INIT_LIST_HEAD(&ret->ordered_list);
93 spin_lock_init(&ret->list_lock);
0bd9289c 94 spin_lock_init(&ret->thres_lock);
1ca08976
QW
95 return ret;
96}
97
98static inline void
d458b054 99__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
1ca08976 100
d458b054
QW
101struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
102 int flags,
103 int max_active,
104 int thresh)
08a9ff32 105{
d458b054 106 struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
08a9ff32
QW
107
108 if (unlikely(!ret))
109 return NULL;
110
1ca08976 111 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
0bd9289c 112 max_active, thresh);
1ca08976 113 if (unlikely(!ret->normal)) {
08a9ff32
QW
114 kfree(ret);
115 return NULL;
116 }
117
1ca08976 118 if (flags & WQ_HIGHPRI) {
0bd9289c
QW
119 ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
120 thresh);
1ca08976
QW
121 if (unlikely(!ret->high)) {
122 __btrfs_destroy_workqueue(ret->normal);
123 kfree(ret);
124 return NULL;
125 }
126 }
08a9ff32
QW
127 return ret;
128}
129
0bd9289c
QW
130/*
131 * Hook for threshold which will be called in btrfs_queue_work.
132 * This hook WILL be called in IRQ handler context,
133 * so workqueue_set_max_active MUST NOT be called in this hook
134 */
d458b054 135static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
0bd9289c
QW
136{
137 if (wq->thresh == NO_THRESHOLD)
138 return;
139 atomic_inc(&wq->pending);
140}
141
142/*
143 * Hook for threshold which will be called before executing the work,
144 * This hook is called in kthread content.
145 * So workqueue_set_max_active is called here.
146 */
d458b054 147static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
0bd9289c
QW
148{
149 int new_max_active;
150 long pending;
151 int need_change = 0;
152
153 if (wq->thresh == NO_THRESHOLD)
154 return;
155
156 atomic_dec(&wq->pending);
157 spin_lock(&wq->thres_lock);
158 /*
159 * Use wq->count to limit the calling frequency of
160 * workqueue_set_max_active.
161 */
162 wq->count++;
163 wq->count %= (wq->thresh / 4);
164 if (!wq->count)
165 goto out;
166 new_max_active = wq->current_max;
167
168 /*
169 * pending may be changed later, but it's OK since we really
170 * don't need it so accurate to calculate new_max_active.
171 */
172 pending = atomic_read(&wq->pending);
173 if (pending > wq->thresh)
174 new_max_active++;
175 if (pending < wq->thresh / 2)
176 new_max_active--;
177 new_max_active = clamp_val(new_max_active, 1, wq->max_active);
178 if (new_max_active != wq->current_max) {
179 need_change = 1;
180 wq->current_max = new_max_active;
181 }
182out:
183 spin_unlock(&wq->thres_lock);
184
185 if (need_change) {
186 workqueue_set_max_active(wq->normal_wq, wq->current_max);
187 }
188}
189
d458b054 190static void run_ordered_work(struct __btrfs_workqueue *wq)
08a9ff32
QW
191{
192 struct list_head *list = &wq->ordered_list;
d458b054 193 struct btrfs_work *work;
08a9ff32
QW
194 spinlock_t *lock = &wq->list_lock;
195 unsigned long flags;
196
197 while (1) {
198 spin_lock_irqsave(lock, flags);
199 if (list_empty(list))
200 break;
d458b054 201 work = list_entry(list->next, struct btrfs_work,
08a9ff32
QW
202 ordered_list);
203 if (!test_bit(WORK_DONE_BIT, &work->flags))
204 break;
205
206 /*
207 * we are going to call the ordered done function, but
208 * we leave the work item on the list as a barrier so
209 * that later work items that are done don't have their
210 * functions called before this one returns
211 */
212 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
213 break;
52483bc2 214 trace_btrfs_ordered_sched(work);
08a9ff32
QW
215 spin_unlock_irqrestore(lock, flags);
216 work->ordered_func(work);
217
218 /* now take the lock again and drop our item from the list */
219 spin_lock_irqsave(lock, flags);
220 list_del(&work->ordered_list);
221 spin_unlock_irqrestore(lock, flags);
222
223 /*
224 * we don't want to call the ordered free functions
225 * with the lock held though
226 */
227 work->ordered_free(work);
52483bc2 228 trace_btrfs_all_work_done(work);
08a9ff32
QW
229 }
230 spin_unlock_irqrestore(lock, flags);
231}
232
233static void normal_work_helper(struct work_struct *arg)
234{
d458b054
QW
235 struct btrfs_work *work;
236 struct __btrfs_workqueue *wq;
08a9ff32
QW
237 int need_order = 0;
238
d458b054 239 work = container_of(arg, struct btrfs_work, normal_work);
08a9ff32
QW
240 /*
241 * We should not touch things inside work in the following cases:
242 * 1) after work->func() if it has no ordered_free
243 * Since the struct is freed in work->func().
244 * 2) after setting WORK_DONE_BIT
245 * The work may be freed in other threads almost instantly.
246 * So we save the needed things here.
247 */
248 if (work->ordered_func)
249 need_order = 1;
250 wq = work->wq;
251
52483bc2 252 trace_btrfs_work_sched(work);
0bd9289c 253 thresh_exec_hook(wq);
08a9ff32
QW
254 work->func(work);
255 if (need_order) {
256 set_bit(WORK_DONE_BIT, &work->flags);
257 run_ordered_work(wq);
258 }
52483bc2
QW
259 if (!need_order)
260 trace_btrfs_all_work_done(work);
08a9ff32
QW
261}
262
d458b054 263void btrfs_init_work(struct btrfs_work *work,
6db8914f
QW
264 btrfs_func_t func,
265 btrfs_func_t ordered_func,
266 btrfs_func_t ordered_free)
08a9ff32
QW
267{
268 work->func = func;
269 work->ordered_func = ordered_func;
270 work->ordered_free = ordered_free;
271 INIT_WORK(&work->normal_work, normal_work_helper);
272 INIT_LIST_HEAD(&work->ordered_list);
273 work->flags = 0;
274}
275
d458b054
QW
276static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
277 struct btrfs_work *work)
08a9ff32
QW
278{
279 unsigned long flags;
280
281 work->wq = wq;
0bd9289c 282 thresh_queue_hook(wq);
08a9ff32
QW
283 if (work->ordered_func) {
284 spin_lock_irqsave(&wq->list_lock, flags);
285 list_add_tail(&work->ordered_list, &wq->ordered_list);
286 spin_unlock_irqrestore(&wq->list_lock, flags);
287 }
288 queue_work(wq->normal_wq, &work->normal_work);
52483bc2 289 trace_btrfs_work_queued(work);
08a9ff32
QW
290}
291
d458b054
QW
292void btrfs_queue_work(struct btrfs_workqueue *wq,
293 struct btrfs_work *work)
1ca08976 294{
d458b054 295 struct __btrfs_workqueue *dest_wq;
1ca08976
QW
296
297 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
298 dest_wq = wq->high;
299 else
300 dest_wq = wq->normal;
301 __btrfs_queue_work(dest_wq, work);
302}
303
304static inline void
d458b054 305__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
08a9ff32
QW
306{
307 destroy_workqueue(wq->normal_wq);
308 kfree(wq);
309}
310
d458b054 311void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
1ca08976
QW
312{
313 if (!wq)
314 return;
315 if (wq->high)
316 __btrfs_destroy_workqueue(wq->high);
317 __btrfs_destroy_workqueue(wq->normal);
318}
319
d458b054 320void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
08a9ff32 321{
0bd9289c 322 wq->normal->max_active = max;
1ca08976 323 if (wq->high)
0bd9289c 324 wq->high->max_active = max;
1ca08976
QW
325}
326
d458b054 327void btrfs_set_work_high_priority(struct btrfs_work *work)
1ca08976
QW
328{
329 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
08a9ff32 330}