+
+int td_io_queue(struct thread_data *td, struct io_u *io_u)
+{
+ int ret;
+
+ assert((io_u->flags & IO_U_F_FLIGHT) == 0);
+ io_u->flags |= IO_U_F_FLIGHT;
+
+ assert(io_u->file->flags & FIO_FILE_OPEN);
+
+ io_u->error = 0;
+ io_u->resid = 0;
+
+ if (td->io_ops->flags & FIO_SYNCIO) {
+ fio_gettime(&io_u->issue_time, NULL);
+
+ /*
+ * for a sync engine, set the timeout upfront
+ */
+ if (mtime_since(&td->timeout_end, &io_u->issue_time) < IO_U_TIMEOUT)
+ io_u_set_timeout(td);
+ }
+
+ if (io_u->ddir != DDIR_SYNC)
+ td->io_issues[io_u->ddir]++;
+
+ io_u_mark_depth(td, io_u);
+
+ ret = td->io_ops->queue(td, io_u);
+
+ if (ret == FIO_Q_QUEUED || ret == FIO_Q_COMPLETED)
+ get_file(io_u->file);
+
+ if (ret == FIO_Q_QUEUED) {
+ int r;
+
+ td->io_u_queued++;
+ if (td->io_u_queued > td->iodepth_batch) {
+ r = td_io_commit(td);
+ if (r < 0)
+ return r;
+ }
+ }
+
+ if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
+ fio_gettime(&io_u->issue_time, NULL);
+
+ /*
+ * async engine, set the timeout here
+ */
+ if (ret == FIO_Q_QUEUED &&
+ mtime_since(&td->timeout_end, &io_u->issue_time) < IO_U_TIMEOUT)
+ io_u_set_timeout(td);
+ }
+
+ return ret;
+}
+
+int td_io_init(struct thread_data *td)
+{
+ if (td->io_ops->init)
+ return td->io_ops->init(td);
+
+ return 0;
+}
+
+int td_io_commit(struct thread_data *td)
+{
+ if (!td->cur_depth)
+ return 0;
+
+ td->io_u_queued = 0;
+ if (td->io_ops->commit)
+ return td->io_ops->commit(td);
+
+ return 0;
+}
+
+int td_io_open_file(struct thread_data *td, struct fio_file *f)
+{
+ if (td->io_ops->open_file(td, f))
+ return 1;
+
+ f->last_free_lookup = 0;
+ f->last_completed_pos = 0;
+ f->last_pos = 0;
+ f->flags |= FIO_FILE_OPEN;
+ f->flags &= ~FIO_FILE_CLOSING;
+
+ if (f->file_map)
+ memset(f->file_map, 0, f->num_maps * sizeof(long));
+
+ td->nr_open_files++;
+ get_file(f);
+ return 0;
+}
+
+void td_io_close_file(struct thread_data *td, struct fio_file *f)
+{
+ /*
+ * mark as closing, do real close when last io on it has completed
+ */
+ f->flags |= FIO_FILE_CLOSING;
+
+ put_file(td, f);
+}
+