if (td->error)
break;
- ret = workqueue_enqueue(&td->io_wq, &io_u->work);
- if (ret)
- ret = FIO_Q_QUEUED;
- else
- ret = FIO_Q_BUSY;
+ workqueue_enqueue(&td->io_wq, &io_u->work);
+ ret = FIO_Q_QUEUED;
- if (ret == FIO_Q_QUEUED && ddir_rw(ddir)) {
+ if (ddir_rw(ddir)) {
td->io_issues[ddir]++;
td->io_issue_bytes[ddir] += blen;
td->rate_io_issue_bytes[ddir] += blen;
/*
* Must be serialized by caller. Returns true for queued, false for busy.
*/
-bool workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work)
+void workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work)
{
struct submit_worker *sw;
sw = get_submit_worker(wq);
- if (sw) {
- pthread_mutex_lock(&sw->lock);
- flist_add_tail(&work->list, &sw->work_list);
- sw->seq = ++wq->work_seq;
- sw->flags &= ~SW_F_IDLE;
- pthread_mutex_unlock(&sw->lock);
+ assert(sw);
- pthread_cond_signal(&sw->cond);
- return true;
- }
+ pthread_mutex_lock(&sw->lock);
+ flist_add_tail(&work->list, &sw->work_list);
+ sw->seq = ++wq->work_seq;
+ sw->flags &= ~SW_F_IDLE;
+ pthread_mutex_unlock(&sw->lock);
- return false;
+ pthread_cond_signal(&sw->cond);
}
static void handle_list(struct submit_worker *sw, struct flist_head *list)
int workqueue_init(struct thread_data *td, struct workqueue *wq, struct workqueue_ops *ops, unsigned int max_workers);
void workqueue_exit(struct workqueue *wq);
-bool workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work);
+void workqueue_enqueue(struct workqueue *wq, struct workqueue_work *work);
void workqueue_flush(struct workqueue *wq);
static inline bool workqueue_pre_sleep_check(struct submit_worker *sw)