eta: fix comparison reversal for time based jobs
[fio.git] / workqueue.c
CommitLineData
a9da8ab2 1/*
51575029 2 * Generic workqueue offload mechanism
a9da8ab2
JA
3 *
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
5 *
6 */
7#include <unistd.h>
8
9#include "fio.h"
a9da8ab2
JA
10#include "flist.h"
11#include "workqueue.h"
a9da8ab2 12
a9da8ab2
JA
13enum {
14 SW_F_IDLE = 1 << 0,
15 SW_F_RUNNING = 1 << 1,
16 SW_F_EXIT = 1 << 2,
17 SW_F_EXITED = 1 << 3,
18 SW_F_ACCOUNTED = 1 << 4,
19 SW_F_ERROR = 1 << 5,
20};
21
22static struct submit_worker *__get_submit_worker(struct workqueue *wq,
23 unsigned int start,
24 unsigned int end,
25 struct submit_worker **best)
26{
27 struct submit_worker *sw = NULL;
28
29 while (start <= end) {
30 sw = &wq->workers[start];
31 if (sw->flags & SW_F_IDLE)
32 return sw;
33 if (!(*best) || sw->seq < (*best)->seq)
34 *best = sw;
35 start++;
36 }
37
38 return NULL;
39}
40
41static struct submit_worker *get_submit_worker(struct workqueue *wq)
42{
43 unsigned int next = wq->next_free_worker;
44 struct submit_worker *sw, *best = NULL;
45
46 assert(next < wq->max_workers);
47
48 sw = __get_submit_worker(wq, next, wq->max_workers - 1, &best);
49 if (!sw && next)
50 sw = __get_submit_worker(wq, 0, next - 1, &best);
51
52 /*
53 * No truly idle found, use best match
54 */
55 if (!sw)
56 sw = best;
57
58 if (sw->index == wq->next_free_worker) {
59 if (sw->index + 1 < wq->max_workers)
60 wq->next_free_worker = sw->index + 1;
61 else
62 wq->next_free_worker = 0;
63 }
64
65 return sw;
66}
67
1391052a 68static bool all_sw_idle(struct workqueue *wq)
a9da8ab2
JA
69{
70 int i;
71
72 for (i = 0; i < wq->max_workers; i++) {
73 struct submit_worker *sw = &wq->workers[i];
74
75 if (!(sw->flags & SW_F_IDLE))
1391052a 76 return false;
a9da8ab2
JA
77 }
78
1391052a 79 return true;
a9da8ab2
JA
80}
81
82/*
83 * Must be serialized wrt workqueue_enqueue() by caller
84 */
85void workqueue_flush(struct workqueue *wq)
86{
87 wq->wake_idle = 1;
88
89 while (!all_sw_idle(wq)) {
90 pthread_mutex_lock(&wq->flush_lock);
91 pthread_cond_wait(&wq->flush_cond, &wq->flush_lock);
92 pthread_mutex_unlock(&wq->flush_lock);
93 }
94
95 wq->wake_idle = 0;
96}
97
98/*
b07f6ad1 99 * Must be serialized by caller. Returns true for queued, false for busy.
a9da8ab2 100 */
26de50cf 101void workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work)
a9da8ab2
JA
102{
103 struct submit_worker *sw;
104
105 sw = get_submit_worker(wq);
26de50cf
JA
106 assert(sw);
107
108 pthread_mutex_lock(&sw->lock);
109 flist_add_tail(&work->list, &sw->work_list);
110 sw->seq = ++wq->work_seq;
111 sw->flags &= ~SW_F_IDLE;
112 pthread_mutex_unlock(&sw->lock);
a9da8ab2 113
26de50cf 114 pthread_cond_signal(&sw->cond);
a9da8ab2
JA
115}
116
117static void handle_list(struct submit_worker *sw, struct flist_head *list)
118{
119 struct workqueue *wq = sw->wq;
88271841 120 struct workqueue_work *work;
a9da8ab2
JA
121
122 while (!flist_empty(list)) {
88271841
JA
123 work = flist_first_entry(list, struct workqueue_work, list);
124 flist_del_init(&work->list);
ee2b6d6e 125 wq->ops.fn(sw, work);
a9da8ab2
JA
126 }
127}
128
a9da8ab2
JA
129static void *worker_thread(void *data)
130{
131 struct submit_worker *sw = data;
132 struct workqueue *wq = sw->wq;
f6496ba7 133 unsigned int eflags = 0, ret = 0;
a9da8ab2
JA
134 FLIST_HEAD(local_list);
135
f6496ba7
JA
136 if (wq->ops.nice) {
137 if (nice(wq->ops.nice) < 0) {
138 log_err("workqueue: nice %s\n", strerror(errno));
139 ret = 1;
140 }
141 }
142
143 if (!ret)
144 ret = workqueue_init_worker(sw);
145
a9da8ab2
JA
146 pthread_mutex_lock(&sw->lock);
147 sw->flags |= SW_F_RUNNING;
148 if (ret)
149 sw->flags |= SW_F_ERROR;
150 pthread_mutex_unlock(&sw->lock);
151
152 pthread_mutex_lock(&wq->flush_lock);
153 pthread_cond_signal(&wq->flush_cond);
154 pthread_mutex_unlock(&wq->flush_lock);
155
156 if (sw->flags & SW_F_ERROR)
157 goto done;
158
159 while (1) {
160 pthread_mutex_lock(&sw->lock);
161
162 if (flist_empty(&sw->work_list)) {
163 if (sw->flags & SW_F_EXIT) {
164 pthread_mutex_unlock(&sw->lock);
165 break;
166 }
167
ee2b6d6e 168 if (workqueue_pre_sleep_check(sw)) {
a9da8ab2 169 pthread_mutex_unlock(&sw->lock);
ee2b6d6e 170 workqueue_pre_sleep(sw);
a9da8ab2
JA
171 pthread_mutex_lock(&sw->lock);
172 }
173
174 /*
175 * We dropped and reaquired the lock, check
176 * state again.
177 */
178 if (!flist_empty(&sw->work_list))
179 goto handle_work;
180
181 if (sw->flags & SW_F_EXIT) {
182 pthread_mutex_unlock(&sw->lock);
183 break;
184 } else if (!(sw->flags & SW_F_IDLE)) {
185 sw->flags |= SW_F_IDLE;
186 wq->next_free_worker = sw->index;
187 if (wq->wake_idle)
188 pthread_cond_signal(&wq->flush_cond);
189 }
17ecadc3
JA
190 if (wq->ops.update_acct_fn)
191 wq->ops.update_acct_fn(sw);
192
a9da8ab2
JA
193 pthread_cond_wait(&sw->cond, &sw->lock);
194 } else {
195handle_work:
196 flist_splice_init(&sw->work_list, &local_list);
197 }
198 pthread_mutex_unlock(&sw->lock);
199 handle_list(sw, &local_list);
200 }
201
17ecadc3
JA
202 if (wq->ops.update_acct_fn)
203 wq->ops.update_acct_fn(sw);
a9da8ab2
JA
204
205done:
206 pthread_mutex_lock(&sw->lock);
207 sw->flags |= (SW_F_EXITED | eflags);
208 pthread_mutex_unlock(&sw->lock);
209 return NULL;
210}
211
c35c582d 212static void free_worker(struct submit_worker *sw, unsigned int *sum_cnt)
a9da8ab2 213{
ee2b6d6e 214 struct workqueue *wq = sw->wq;
a9da8ab2 215
c35c582d 216 workqueue_exit_worker(sw, sum_cnt);
a9da8ab2
JA
217
218 pthread_cond_destroy(&sw->cond);
219 pthread_mutex_destroy(&sw->lock);
ee2b6d6e
JA
220
221 if (wq->ops.free_worker_fn)
222 wq->ops.free_worker_fn(sw);
a9da8ab2
JA
223}
224
225static void shutdown_worker(struct submit_worker *sw, unsigned int *sum_cnt)
226{
a9da8ab2 227 pthread_join(sw->thread, NULL);
c35c582d 228 free_worker(sw, sum_cnt);
a9da8ab2
JA
229}
230
231void workqueue_exit(struct workqueue *wq)
232{
233 unsigned int shutdown, sum_cnt = 0;
234 struct submit_worker *sw;
235 int i;
236
f8e266c9
JA
237 if (!wq->workers)
238 return;
239
a9da8ab2
JA
240 for (i = 0; i < wq->max_workers; i++) {
241 sw = &wq->workers[i];
242
243 pthread_mutex_lock(&sw->lock);
244 sw->flags |= SW_F_EXIT;
245 pthread_cond_signal(&sw->cond);
246 pthread_mutex_unlock(&sw->lock);
247 }
248
249 do {
250 shutdown = 0;
251 for (i = 0; i < wq->max_workers; i++) {
252 sw = &wq->workers[i];
253 if (sw->flags & SW_F_ACCOUNTED)
254 continue;
b7d0bbf1 255 pthread_mutex_lock(&sw->lock);
a9da8ab2 256 sw->flags |= SW_F_ACCOUNTED;
b7d0bbf1 257 pthread_mutex_unlock(&sw->lock);
a9da8ab2
JA
258 shutdown_worker(sw, &sum_cnt);
259 shutdown++;
260 }
261 } while (shutdown && shutdown != wq->max_workers);
262
263 free(wq->workers);
f8e266c9 264 wq->workers = NULL;
a9da8ab2
JA
265 pthread_mutex_destroy(&wq->flush_lock);
266 pthread_cond_destroy(&wq->flush_cond);
2a274336 267 pthread_mutex_destroy(&wq->stat_lock);
a9da8ab2
JA
268}
269
270static int start_worker(struct workqueue *wq, unsigned int index)
271{
272 struct submit_worker *sw = &wq->workers[index];
273 int ret;
274
275 INIT_FLIST_HEAD(&sw->work_list);
276 pthread_cond_init(&sw->cond, NULL);
277 pthread_mutex_init(&sw->lock, NULL);
278 sw->wq = wq;
279 sw->index = index;
280
ee2b6d6e
JA
281 if (wq->ops.alloc_worker_fn) {
282 ret = wq->ops.alloc_worker_fn(sw);
283 if (ret)
284 return ret;
285 }
286
a9da8ab2
JA
287 ret = pthread_create(&sw->thread, NULL, worker_thread, sw);
288 if (!ret) {
289 pthread_mutex_lock(&sw->lock);
290 sw->flags = SW_F_IDLE;
291 pthread_mutex_unlock(&sw->lock);
292 return 0;
293 }
294
c35c582d 295 free_worker(sw, NULL);
a9da8ab2
JA
296 return 1;
297}
298
299int workqueue_init(struct thread_data *td, struct workqueue *wq,
5bb79f69 300 struct workqueue_ops *ops, unsigned max_pending)
a9da8ab2
JA
301{
302 unsigned int running;
303 int i, error;
304
305 wq->max_workers = max_pending;
306 wq->td = td;
5bb79f69 307 wq->ops = *ops;
a9da8ab2
JA
308 wq->work_seq = 0;
309 wq->next_free_worker = 0;
310 pthread_cond_init(&wq->flush_cond, NULL);
311 pthread_mutex_init(&wq->flush_lock, NULL);
2a274336 312 pthread_mutex_init(&wq->stat_lock, NULL);
a9da8ab2
JA
313
314 wq->workers = calloc(wq->max_workers, sizeof(struct submit_worker));
315
316 for (i = 0; i < wq->max_workers; i++)
317 if (start_worker(wq, i))
318 break;
319
320 wq->max_workers = i;
bddc8d16
JA
321 if (!wq->max_workers)
322 goto err;
a9da8ab2
JA
323
324 /*
325 * Wait for them all to be started and initialized
326 */
327 error = 0;
328 do {
329 struct submit_worker *sw;
330
331 running = 0;
332 pthread_mutex_lock(&wq->flush_lock);
333 for (i = 0; i < wq->max_workers; i++) {
334 sw = &wq->workers[i];
335 pthread_mutex_lock(&sw->lock);
336 if (sw->flags & SW_F_RUNNING)
337 running++;
338 if (sw->flags & SW_F_ERROR)
339 error++;
340 pthread_mutex_unlock(&sw->lock);
341 }
342
343 if (error || running == wq->max_workers) {
344 pthread_mutex_unlock(&wq->flush_lock);
345 break;
346 }
347
348 pthread_cond_wait(&wq->flush_cond, &wq->flush_lock);
349 pthread_mutex_unlock(&wq->flush_lock);
350 } while (1);
351
bddc8d16
JA
352 if (!error)
353 return 0;
a9da8ab2 354
bddc8d16
JA
355err:
356 log_err("Can't create rate workqueue\n");
357 td_verror(td, ESRCH, "workqueue_init");
358 workqueue_exit(wq);
359 return 1;
a9da8ab2 360}