2 * Generic workqueue offload mechanism
4 * Copyright (C) 2015 Jens Axboe <axboe@kernel.dk>
11 #include "workqueue.h"
16 SW_F_RUNNING = 1 << 1,
18 SW_F_ACCOUNTED = 1 << 3,
22 static struct submit_worker *__get_submit_worker(struct workqueue *wq,
25 struct submit_worker **best)
27 struct submit_worker *sw = NULL;
29 while (start <= end) {
30 sw = &wq->workers[start];
31 if (sw->flags & SW_F_IDLE)
33 if (!(*best) || sw->seq < (*best)->seq)
41 static struct submit_worker *get_submit_worker(struct workqueue *wq)
43 unsigned int next = wq->next_free_worker;
44 struct submit_worker *sw, *best = NULL;
46 assert(next < wq->max_workers);
48 sw = __get_submit_worker(wq, next, wq->max_workers - 1, &best);
50 sw = __get_submit_worker(wq, 0, next - 1, &best);
53 * No truly idle found, use best match
58 if (sw->index == wq->next_free_worker) {
59 if (sw->index + 1 < wq->max_workers)
60 wq->next_free_worker = sw->index + 1;
62 wq->next_free_worker = 0;
68 static bool all_sw_idle(struct workqueue *wq)
72 for (i = 0; i < wq->max_workers; i++) {
73 struct submit_worker *sw = &wq->workers[i];
75 if (!(sw->flags & SW_F_IDLE))
83 * Must be serialized wrt workqueue_enqueue() by caller
85 void workqueue_flush(struct workqueue *wq)
89 while (!all_sw_idle(wq)) {
90 pthread_mutex_lock(&wq->flush_lock);
91 pthread_cond_wait(&wq->flush_cond, &wq->flush_lock);
92 pthread_mutex_unlock(&wq->flush_lock);
99 * Must be serialized by caller. Returns true for queued, false for busy.
101 void workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work)
103 struct submit_worker *sw;
105 sw = get_submit_worker(wq);
108 pthread_mutex_lock(&sw->lock);
109 flist_add_tail(&work->list, &sw->work_list);
110 sw->seq = ++wq->work_seq;
111 sw->flags &= ~SW_F_IDLE;
112 pthread_mutex_unlock(&sw->lock);
114 pthread_cond_signal(&sw->cond);
117 static void handle_list(struct submit_worker *sw, struct flist_head *list)
119 struct workqueue *wq = sw->wq;
120 struct workqueue_work *work;
122 while (!flist_empty(list)) {
123 work = flist_first_entry(list, struct workqueue_work, list);
124 flist_del_init(&work->list);
125 wq->ops.fn(sw, work);
129 static void *worker_thread(void *data)
131 struct submit_worker *sw = data;
132 struct workqueue *wq = sw->wq;
133 unsigned int ret = 0;
134 FLIST_HEAD(local_list);
136 sk_out_assign(sw->sk_out);
139 if (nice(wq->ops.nice) < 0) {
140 log_err("workqueue: nice %s\n", strerror(errno));
146 ret = workqueue_init_worker(sw);
148 pthread_mutex_lock(&sw->lock);
149 sw->flags |= SW_F_RUNNING;
151 sw->flags |= SW_F_ERROR;
152 pthread_mutex_unlock(&sw->lock);
154 pthread_mutex_lock(&wq->flush_lock);
155 pthread_cond_signal(&wq->flush_cond);
156 pthread_mutex_unlock(&wq->flush_lock);
158 if (sw->flags & SW_F_ERROR)
162 pthread_mutex_lock(&sw->lock);
164 if (flist_empty(&sw->work_list)) {
165 if (sw->flags & SW_F_EXIT) {
166 pthread_mutex_unlock(&sw->lock);
170 if (workqueue_pre_sleep_check(sw)) {
171 pthread_mutex_unlock(&sw->lock);
172 workqueue_pre_sleep(sw);
173 pthread_mutex_lock(&sw->lock);
177 * We dropped and reaquired the lock, check
180 if (!flist_empty(&sw->work_list))
183 if (sw->flags & SW_F_EXIT) {
184 pthread_mutex_unlock(&sw->lock);
186 } else if (!(sw->flags & SW_F_IDLE)) {
187 sw->flags |= SW_F_IDLE;
188 wq->next_free_worker = sw->index;
190 pthread_cond_signal(&wq->flush_cond);
192 if (wq->ops.update_acct_fn)
193 wq->ops.update_acct_fn(sw);
195 pthread_cond_wait(&sw->cond, &sw->lock);
198 flist_splice_init(&sw->work_list, &local_list);
200 pthread_mutex_unlock(&sw->lock);
201 handle_list(sw, &local_list);
204 if (wq->ops.update_acct_fn)
205 wq->ops.update_acct_fn(sw);
212 static void free_worker(struct submit_worker *sw, unsigned int *sum_cnt)
214 struct workqueue *wq = sw->wq;
216 workqueue_exit_worker(sw, sum_cnt);
218 pthread_cond_destroy(&sw->cond);
219 pthread_mutex_destroy(&sw->lock);
221 if (wq->ops.free_worker_fn)
222 wq->ops.free_worker_fn(sw);
225 static void shutdown_worker(struct submit_worker *sw, unsigned int *sum_cnt)
227 pthread_join(sw->thread, NULL);
228 free_worker(sw, sum_cnt);
231 void workqueue_exit(struct workqueue *wq)
233 unsigned int shutdown, sum_cnt = 0;
234 struct submit_worker *sw;
240 for (i = 0; i < wq->max_workers; i++) {
241 sw = &wq->workers[i];
243 pthread_mutex_lock(&sw->lock);
244 sw->flags |= SW_F_EXIT;
245 pthread_cond_signal(&sw->cond);
246 pthread_mutex_unlock(&sw->lock);
251 for (i = 0; i < wq->max_workers; i++) {
252 sw = &wq->workers[i];
253 if (sw->flags & SW_F_ACCOUNTED)
255 pthread_mutex_lock(&sw->lock);
256 sw->flags |= SW_F_ACCOUNTED;
257 pthread_mutex_unlock(&sw->lock);
258 shutdown_worker(sw, &sum_cnt);
261 } while (shutdown && shutdown != wq->max_workers);
265 pthread_mutex_destroy(&wq->flush_lock);
266 pthread_cond_destroy(&wq->flush_cond);
267 pthread_mutex_destroy(&wq->stat_lock);
270 static int start_worker(struct workqueue *wq, unsigned int index,
271 struct sk_out *sk_out)
273 struct submit_worker *sw = &wq->workers[index];
276 INIT_FLIST_HEAD(&sw->work_list);
278 ret = mutex_cond_init_pshared(&sw->lock, &sw->cond);
286 if (wq->ops.alloc_worker_fn) {
287 ret = wq->ops.alloc_worker_fn(sw);
292 ret = pthread_create(&sw->thread, NULL, worker_thread, sw);
294 pthread_mutex_lock(&sw->lock);
295 sw->flags = SW_F_IDLE;
296 pthread_mutex_unlock(&sw->lock);
300 free_worker(sw, NULL);
304 int workqueue_init(struct thread_data *td, struct workqueue *wq,
305 struct workqueue_ops *ops, unsigned int max_workers,
306 struct sk_out *sk_out)
308 unsigned int running;
312 wq->max_workers = max_workers;
316 wq->next_free_worker = 0;
318 ret = mutex_cond_init_pshared(&wq->flush_lock, &wq->flush_cond);
321 ret = mutex_init_pshared(&wq->stat_lock);
325 wq->workers = smalloc(wq->max_workers * sizeof(struct submit_worker));
329 for (i = 0; i < wq->max_workers; i++)
330 if (start_worker(wq, i, sk_out))
334 if (!wq->max_workers)
338 * Wait for them all to be started and initialized
342 struct submit_worker *sw;
345 pthread_mutex_lock(&wq->flush_lock);
346 for (i = 0; i < wq->max_workers; i++) {
347 sw = &wq->workers[i];
348 pthread_mutex_lock(&sw->lock);
349 if (sw->flags & SW_F_RUNNING)
351 if (sw->flags & SW_F_ERROR)
353 pthread_mutex_unlock(&sw->lock);
356 if (error || running == wq->max_workers) {
357 pthread_mutex_unlock(&wq->flush_lock);
361 pthread_cond_wait(&wq->flush_cond, &wq->flush_lock);
362 pthread_mutex_unlock(&wq->flush_lock);
369 log_err("Can't create rate workqueue\n");
370 td_verror(td, ESRCH, "workqueue_init");