return 1;
}
- /*
- * cpu thread doesn't need to provide anything
- */
- if (ops->flags & FIO_CPUIO)
- return 0;
-
if (!ops->queue) {
log_err("%s: no queue handler\n", ops->name);
return 1;
{
struct ioengine_ops *ops;
struct list_head *entry;
- char engine[16];
-
- strncpy(engine, name, sizeof(engine) - 1);
-
- if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
- strcpy(engine, "libaio");
list_for_each(entry, &engine_list) {
ops = list_entry(entry, struct ioengine_ops, list);
- if (!strcmp(engine, ops->name))
+ if (!strcmp(name, ops->name))
return ops;
}
if (td->io_ops->dlhandle)
dlclose(td->io_ops->dlhandle);
-#if 0
- /* we can't do this for threads, so just leak it, it's exiting */
free(td->io_ops);
-#endif
td->io_ops = NULL;
}
assert((io_u->flags & IO_U_F_FLIGHT) == 0);
io_u->flags |= IO_U_F_FLIGHT;
+ io_u->error = 0;
+ io_u->resid = 0;
+
if (td->io_ops->flags & FIO_SYNCIO) {
fio_gettime(&io_u->issue_time, NULL);
if (io_u->ddir != DDIR_SYNC)
td->io_issues[io_u->ddir]++;
+ io_u_mark_depth(td, io_u);
+
ret = td->io_ops->queue(td, io_u);
- if (ret == FIO_Q_QUEUED)
+ if (ret == FIO_Q_QUEUED || ret == FIO_Q_COMPLETED)
+ get_file(io_u->file);
+
+ if (ret == FIO_Q_QUEUED) {
+ int r;
+
td->io_u_queued++;
+ if (td->io_u_queued > td->iodepth_batch) {
+ r = td_io_commit(td);
+ if (r < 0)
+ return r;
+ }
+ }
if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
fio_gettime(&io_u->issue_time, NULL);
return 0;
}
+
+int td_io_open_file(struct thread_data *td, struct fio_file *f)
+{
+ if (td->io_ops->open_file(td, f))
+ return 1;
+
+ f->last_free_lookup = 0;
+ f->last_completed_pos = 0;
+ f->last_pos = 0;
+ f->flags |= FIO_FILE_OPEN;
+ f->flags &= ~FIO_FILE_CLOSING;
+
+ if (f->file_map)
+ memset(f->file_map, 0, f->num_maps * sizeof(long));
+
+ td->nr_open_files++;
+ get_file(f);
+ return 0;
+}
+
+void td_io_close_file(struct thread_data *td, struct fio_file *f)
+{
+ /*
+ * mark as closing, do real close when last io on it has completed
+ */
+ f->flags |= FIO_FILE_CLOSING;
+
+ put_file(td, f);
+}
+