+ dprint_io_u(io_u, "prep");
+ fio_ro_check(td, io_u);
+
+ lock_file(td, io_u->file, io_u->ddir);
+
+ if (td->io_ops->prep) {
+ int ret = td->io_ops->prep(td, io_u);
+
+ dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
+ if (ret)
+ unlock_file(td, io_u->file);
+ return ret;
+ }
+
+ return 0;
+}
+
+int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
+ const struct timespec *t)
+{
+ int r = 0;
+
+ /*
+ * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
+ * server side gets a message from the client
+ * side that the task is finished, and
+ * td->done is set to 1 after td_io_commit(). In this case,
+ * there is no need to reap complete event in server side.
+ */
+ if (td->done)
+ return 0;
+
+ if (min > 0 && td->io_ops->commit) {
+ r = td->io_ops->commit(td);
+ if (r < 0)
+ goto out;
+ }
+ if (max > td->cur_depth)
+ max = td->cur_depth;
+ if (min > max)
+ max = min;
+
+ r = 0;
+ if (max && td->io_ops->getevents)
+ r = td->io_ops->getevents(td, min, max, t);
+out:
+ if (r >= 0) {
+ /*
+ * Reflect that our submitted requests were retrieved with
+ * whatever OS async calls are in the underlying engine.
+ */
+ td->io_u_in_flight -= r;
+ io_u_mark_complete(td, r);
+ } else
+ td_verror(td, r, "get_events");
+
+ dprint(FD_IO, "getevents: %d\n", r);
+ return r;
+}
+
+int td_io_queue(struct thread_data *td, struct io_u *io_u)
+{
+ const enum fio_ddir ddir = acct_ddir(io_u);
+ unsigned long buflen = io_u->xfer_buflen;
+ int ret;
+
+ dprint_io_u(io_u, "queue");
+ fio_ro_check(td, io_u);
+
+ assert((io_u->flags & IO_U_F_FLIGHT) == 0);
+ io_u_set(td, io_u, IO_U_F_FLIGHT);
+
+ assert(fio_file_open(io_u->file));
+
+ /*
+ * If using a write iolog, store this entry.
+ */
+ log_io_u(td, io_u);
+
+ io_u->error = 0;
+ io_u->resid = 0;
+
+ if (td_ioengine_flagged(td, FIO_SYNCIO)) {
+ if (fio_fill_issue_time(td))
+ fio_gettime(&io_u->issue_time, NULL);
+
+ /*
+ * only used for iolog
+ */
+ if (td->o.read_iolog_file)
+ memcpy(&td->last_issue, &io_u->issue_time,
+ sizeof(io_u->issue_time));
+ }
+
+ if (ddir_rw(ddir)) {
+ td->io_issues[ddir]++;
+ td->io_issue_bytes[ddir] += buflen;
+ td->rate_io_issue_bytes[ddir] += buflen;
+ }
+
+ ret = td->io_ops->queue(td, io_u);
+
+ unlock_file(td, io_u->file);
+
+ if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
+ td->io_issues[ddir]--;
+ td->io_issue_bytes[ddir] -= buflen;
+ td->rate_io_issue_bytes[ddir] -= buflen;
+ io_u_clear(td, io_u, IO_U_F_FLIGHT);
+ }
+
+ /*
+ * If an error was seen and the io engine didn't propagate it
+ * back to 'td', do so.
+ */
+ if (io_u->error && !td->error)
+ td_verror(td, io_u->error, "td_io_queue");
+
+ /*
+ * Add warning for O_DIRECT so that users have an easier time
+ * spotting potentially bad alignment. If this triggers for the first
+ * IO, then it's likely an alignment problem or because the host fs
+ * does not support O_DIRECT
+ */
+ if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
+ td->o.odirect) {
+
+ log_info("fio: first direct IO errored. File system may not "
+ "support direct IO, or iomem_align= is bad. Try "
+ "setting direct=0.\n");
+ }
+
+ if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
+ io_u_mark_submit(td, 1);
+ io_u_mark_complete(td, 1);
+ }
+
+ if (ret == FIO_Q_COMPLETED) {
+ if (ddir_rw(io_u->ddir)) {
+ io_u_mark_depth(td, 1);
+ td->ts.total_io_u[io_u->ddir]++;
+ }
+ } else if (ret == FIO_Q_QUEUED) {
+ int r;
+
+ td->io_u_queued++;
+
+ if (ddir_rw(io_u->ddir))
+ td->ts.total_io_u[io_u->ddir]++;
+
+ if (td->io_u_queued >= td->o.iodepth_batch) {
+ r = td_io_commit(td);
+ if (r < 0)
+ return r;
+ }
+ }
+
+ if (!td_ioengine_flagged(td, FIO_SYNCIO)) {
+ if (fio_fill_issue_time(td))
+ fio_gettime(&io_u->issue_time, NULL);
+
+ /*
+ * only used for iolog
+ */
+ if (td->o.read_iolog_file)
+ memcpy(&td->last_issue, &io_u->issue_time,
+ sizeof(io_u->issue_time));
+ }
+
+ return ret;
+}
+
+int td_io_init(struct thread_data *td)
+{
+ int ret = 0;
+
+ if (td->io_ops->init) {
+ ret = td->io_ops->init(td);
+ if (ret)
+ log_err("fio: io engine %s init failed.%s\n",
+ td->io_ops->name,
+ td->o.iodepth > 1 ?
+ " Perhaps try reducing io depth?" : "");
+ else
+ td->io_ops_init = 1;
+ if (!td->error)
+ td->error = ret;
+ }
+
+ return ret;
+}
+
+int td_io_commit(struct thread_data *td)
+{
+ int ret;
+
+ dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
+
+ if (!td->cur_depth || !td->io_u_queued)
+ return 0;
+
+ io_u_mark_depth(td, td->io_u_queued);
+
+ if (td->io_ops->commit) {
+ ret = td->io_ops->commit(td);
+ if (ret)
+ td_verror(td, -ret, "io commit");
+ }
+
+ /*
+ * Reflect that events were submitted as async IO requests.
+ */
+ td->io_u_in_flight += td->io_u_queued;
+ td->io_u_queued = 0;