log_io_piece(td, io_u);
if (td->o.io_submit_mode == IO_MODE_OFFLOAD) {
+ const unsigned long blen = io_u->xfer_buflen;
+ const enum fio_ddir ddir = acct_ddir(io_u);
+
if (td->error)
break;
- ret = workqueue_enqueue(&td->io_wq, io_u);
+
+ ret = workqueue_enqueue(&td->io_wq, &io_u->work);
+ if (ret)
+ ret = FIO_Q_QUEUED;
+ else
+ ret = FIO_Q_BUSY;
+
+ if (ret == FIO_Q_QUEUED && ddir_rw(ddir)) {
+ td->io_issues[ddir]++;
+ td->io_issue_bytes[ddir] += blen;
+ td->rate_io_issue_bytes[ddir] += blen;
+ }
if (should_check_rate(td))
td->rate_next_io_time[ddir] = usec_for_io(td, ddir);
return td->bytes_done[DDIR_WRITE] + td->bytes_done[DDIR_TRIM];
}
-static void io_workqueue_fn(struct thread_data *td, struct io_u *io_u)
+static void io_workqueue_fn(struct submit_worker *sw,
+ struct workqueue_work *work)
{
+ struct io_u *io_u = container_of(work, struct io_u, work);
const enum fio_ddir ddir = io_u->ddir;
+ struct thread_data *td = sw->private;
int ret;
dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid());
td->cur_depth++;
- ret = td_io_queue(td, io_u);
+ do {
+ ret = td_io_queue(td, io_u);
+ if (ret != FIO_Q_BUSY)
+ break;
+ ret = io_u_queued_complete(td, 1);
+ if (ret > 0)
+ td->cur_depth -= ret;
+ io_u_clear(io_u, IO_U_F_FLIGHT);
+ } while (1);
dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid());
io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL);
- if (ret == FIO_Q_QUEUED)
- ret = io_u_queued_complete(td, 1);
+ if (ret == FIO_Q_COMPLETED)
+ td->cur_depth--;
+ else if (ret == FIO_Q_QUEUED) {
+ unsigned int min_evts;
- td->cur_depth--;
+ if (td->o.iodepth == 1)
+ min_evts = 1;
+ else
+ min_evts = 0;
+
+ ret = io_u_queued_complete(td, min_evts);
+ if (ret > 0)
+ td->cur_depth -= ret;
+ } else if (ret == FIO_Q_BUSY) {
+ ret = io_u_queued_complete(td, td->cur_depth);
+ if (ret > 0)
+ td->cur_depth -= ret;
+ }
}
+static bool io_workqueue_pre_sleep_flush_fn(struct submit_worker *sw)
+{
+ struct thread_data *td = sw->private;
+
+ if (td->io_u_queued || td->cur_depth || td->io_u_in_flight)
+ return true;
+
+ return false;
+}
+
+static void io_workqueue_pre_sleep_fn(struct submit_worker *sw)
+{
+ struct thread_data *td = sw->private;
+ int ret;
+
+ ret = io_u_quiesce(td);
+ if (ret > 0)
+ td->cur_depth -= ret;
+}
+
+static int io_workqueue_alloc_fn(struct submit_worker *sw)
+{
+ struct thread_data *td;
+
+ td = calloc(1, sizeof(*td));
+ sw->private = td;
+ return 0;
+}
+
+static void io_workqueue_free_fn(struct submit_worker *sw)
+{
+ free(sw->private);
+ sw->private = NULL;
+}
+
+struct workqueue_ops rated_wq_ops = {
+ .fn = io_workqueue_fn,
+ .pre_sleep_flush_fn = io_workqueue_pre_sleep_flush_fn,
+ .pre_sleep_fn = io_workqueue_pre_sleep_fn,
+ .alloc_worker_fn = io_workqueue_alloc_fn,
+ .free_worker_fn = io_workqueue_free_fn,
+};
+
/*
* Entry point for the thread based jobs. The process based jobs end up
* here as well, after a little setup.
fio_verify_init(td);
if ((o->io_submit_mode == IO_MODE_OFFLOAD) &&
- workqueue_init(td, &td->io_wq, io_workqueue_fn, td->o.iodepth))
+ workqueue_init(td, &td->io_wq, &rated_wq_ops, td->o.iodepth))
goto err;
fio_gettime(&td->epoch, NULL);