+ ret = td->io_ops->queue(td, io_u);
+
+ unlock_file(td, io_u->file);
+
+ if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
+ td->io_issues[ddir]--;
+ td->io_issue_bytes[ddir] -= buflen;
+ td->rate_io_issue_bytes[ddir] -= buflen;
+ io_u_clear(td, io_u, IO_U_F_FLIGHT);
+ }
+
+ /*
+ * If an error was seen and the io engine didn't propagate it
+ * back to 'td', do so.
+ */
+ if (io_u->error && !td->error)
+ td_verror(td, io_u->error, "td_io_queue");
+
+ /*
+ * Add warning for O_DIRECT so that users have an easier time
+ * spotting potentially bad alignment. If this triggers for the first
+ * IO, then it's likely an alignment problem or because the host fs
+ * does not support O_DIRECT
+ */
+ if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
+ td->o.odirect) {
+
+ log_info("fio: first direct IO errored. File system may not "
+ "support direct IO, or iomem_align= is bad. Try "
+ "setting direct=0.\n");
+ }
+
+ if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
+ io_u_mark_submit(td, 1);
+ io_u_mark_complete(td, 1);
+ }
+
+ if (ret == FIO_Q_COMPLETED) {
+ if (ddir_rw(io_u->ddir)) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+ } else if (ret == FIO_Q_QUEUED) {
+ int r;
+
+ td->io_u_queued++;
+
+ if (ddir_rw(io_u->ddir))
+ td->ts.total_io_u[io_u->ddir]++;
+
+ if (td->io_u_queued >= td->o.iodepth_batch) {
+ r = td_io_commit(td);
+ if (r < 0)
+ return r;
+ }
+ }
+
+ if (!td_ioengine_flagged(td, FIO_SYNCIO)) {
+ if (fio_fill_issue_time(td))
+ fio_gettime(&io_u->issue_time, NULL);
+
+ /*
+ * only used for iolog
+ */
+ if (td->o.read_iolog_file)
+ memcpy(&td->last_issue, &io_u->issue_time,
+ sizeof(struct timeval));
+ }
+
+ return ret;
+}
+
+int td_io_init(struct thread_data *td)
+{
+ int ret = 0;
+
+ if (td->io_ops->init) {
+ ret = td->io_ops->init(td);
+ if (ret)
+ log_err("fio: io engine %s init failed.%s\n",
+ td->io_ops->name,
+ td->o.iodepth > 1 ?
+ " Perhaps try reducing io depth?" : "");
+ else
+ td->io_ops_init = 1;
+ if (!td->error)
+ td->error = ret;
+ }
+
+ return ret;
+}
+
+int td_io_commit(struct thread_data *td)
+{
+ int ret;
+
+ dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
+
+ if (!td->cur_depth || !td->io_u_queued)
+ return 0;
+
+ io_u_mark_depth(td, td->io_u_queued);
+
+ if (td->io_ops->commit) {
+ ret = td->io_ops->commit(td);
+ if (ret)
+ td_verror(td, -ret, "io commit");