#include <unistd.h>
#include <string.h>
#include <dlfcn.h>
+#include <assert.h>
#include "fio.h"
#include "os.h"
INIT_LIST_HEAD(&ops->list);
}
-int register_ioengine(struct ioengine_ops *ops)
+void register_ioengine(struct ioengine_ops *ops)
{
INIT_LIST_HEAD(&ops->list);
list_add_tail(&ops->list, &engine_list);
- return 0;
}
static struct ioengine_ops *find_ioengine(const char *name)
{
struct ioengine_ops *ops;
struct list_head *entry;
- char engine[16];
-
- strncpy(engine, name, sizeof(engine) - 1);
-
- if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
- strcpy(engine, "libaio");
list_for_each(entry, &engine_list) {
ops = list_entry(entry, struct ioengine_ops, list);
- if (!strcmp(engine, ops->name))
+ if (!strcmp(name, ops->name))
return ops;
}
dlerror();
dlhandle = dlopen(engine_lib, RTLD_LAZY);
if (!dlhandle) {
- td_vmsg(td, -1, dlerror());
+ td_vmsg(td, -1, dlerror(), "dlopen");
return NULL;
}
*/
ops = dlsym(dlhandle, "ioengine");
if (!ops) {
- td_vmsg(td, -1, dlerror());
+ td_vmsg(td, -1, dlerror(), "dlsym");
dlclose(dlhandle);
return NULL;
}
if (td->io_ops->dlhandle)
dlclose(td->io_ops->dlhandle);
+#if 0
+ /* we can't do this for threads, so just leak it, it's exiting */
free(td->io_ops);
+#endif
td->io_ops = NULL;
}
int td_io_getevents(struct thread_data *td, int min, int max,
struct timespec *t)
{
+ if (min > 0 && td->io_ops->commit) {
+ int r = td->io_ops->commit(td);
+
+ if (r < 0)
+ return r;
+ }
if (td->io_ops->getevents)
return td->io_ops->getevents(td, min, max, t);
{
int ret;
- if (td->io_ops->flags & FIO_SYNCIO)
+ assert((io_u->flags & IO_U_F_FLIGHT) == 0);
+ io_u->flags |= IO_U_F_FLIGHT;
+
+ io_u->error = 0;
+ io_u->resid = 0;
+
+ if (td->io_ops->flags & FIO_SYNCIO) {
fio_gettime(&io_u->issue_time, NULL);
+ /*
+ * for a sync engine, set the timeout upfront
+ */
+ if (mtime_since(&td->timeout_end, &io_u->issue_time) < IO_U_TIMEOUT)
+ io_u_set_timeout(td);
+ }
+
if (io_u->ddir != DDIR_SYNC)
td->io_issues[io_u->ddir]++;
ret = td->io_ops->queue(td, io_u);
- if ((td->io_ops->flags & FIO_SYNCIO) == 0)
+ if (ret == FIO_Q_QUEUED) {
+ int r;
+
+ td->io_u_queued++;
+ if (td->io_u_queued > td->iodepth_batch) {
+ r = td_io_commit(td);
+ if (r < 0)
+ return r;
+ }
+ }
+
+ if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
fio_gettime(&io_u->issue_time, NULL);
+ /*
+ * async engine, set the timeout here
+ */
+ if (ret == FIO_Q_QUEUED &&
+ mtime_since(&td->timeout_end, &io_u->issue_time) < IO_U_TIMEOUT)
+ io_u_set_timeout(td);
+ }
+
return ret;
}
int td_io_commit(struct thread_data *td)
{
+ if (!td->cur_depth)
+ return 0;
+
+ td->io_u_queued = 0;
if (td->io_ops->commit)
return td->io_ops->commit(td);
return 0;
}
+
+int td_io_open_file(struct thread_data *td, struct fio_file *f)
+{
+ if (!td->io_ops->open_file(td, f)) {
+ td->nr_open_files++;
+ return 0;
+ }
+
+ return 1;
+}
+
+void td_io_close_file(struct thread_data *td, struct fio_file *f)
+{
+ if (td->io_ops->close_file)
+ td->io_ops->close_file(td, f);
+ td->nr_open_files--;
+}