* generic io engine that could be used for other projects.
*
*/
-#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <dlfcn.h>
#include <fcntl.h>
#include <assert.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <errno.h>
#include "fio.h"
#include "diskutil.h"
+#include "zbd.h"
static FLIST_HEAD(engine_list);
-static bool check_engine_ops(struct ioengine_ops *ops)
+static inline bool async_ioengine_sync_trim(struct thread_data *td,
+ struct io_u *io_u)
+{
+ return td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) &&
+ io_u->ddir == DDIR_TRIM;
+}
+
+static bool check_engine_ops(struct thread_data *td, struct ioengine_ops *ops)
{
if (ops->version != FIO_IOOPS_VERSION) {
log_err("bad ioops version %d (want %d)\n", ops->version,
if (ops->flags & FIO_SYNCIO)
return false;
+ /*
+ * async engines aren't reliable with offload
+ */
+ if ((td->o.io_submit_mode == IO_MODE_OFFLOAD) &&
+ (ops->flags & FIO_NO_OFFLOAD)) {
+ log_err("%s: can't be used with offloaded submit. Use a sync "
+ "engine\n", ops->name);
+ return true;
+ }
+
if (!ops->event || !ops->getevents) {
log_err("%s: no event/getevents handler\n", ops->name);
return true;
return NULL;
}
+#ifdef CONFIG_DYNAMIC_ENGINES
+static void *dlopen_external(struct thread_data *td, const char *engine)
+{
+ char engine_path[PATH_MAX];
+ void *dlhandle;
+
+ sprintf(engine_path, "%s/fio-%s.so", FIO_EXT_ENG_DIR, engine);
+
+ dprint(FD_IO, "dlopen external %s\n", engine_path);
+ dlhandle = dlopen(engine_path, RTLD_LAZY);
+ if (!dlhandle)
+ log_info("Engine %s not found; Either name is invalid, was not built, or fio-engine-%s package is missing.\n",
+ engine, engine);
+
+ return dlhandle;
+}
+#else
+#define dlopen_external(td, engine) (NULL)
+#endif
+
static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
const char *engine_lib)
{
struct ioengine_ops *ops;
void *dlhandle;
- dprint(FD_IO, "dload engine %s\n", engine_lib);
+ if (!strncmp(engine_lib, "linuxaio", 8) ||
+ !strncmp(engine_lib, "aio", 3))
+ engine_lib = "libaio";
+
+ dprint(FD_IO, "dlopen engine %s\n", engine_lib);
dlerror();
dlhandle = dlopen(engine_lib, RTLD_LAZY);
if (!dlhandle) {
- td_vmsg(td, -1, dlerror(), "dlopen");
- return NULL;
+ dlhandle = dlopen_external(td, engine_lib);
+ if (!dlhandle) {
+ td_vmsg(td, -1, dlerror(), "dlopen");
+ return NULL;
+ }
}
/*
return NULL;
}
- td->io_ops_dlhandle = dlhandle;
+ ops->dlhandle = dlhandle;
return ops;
}
-static struct ioengine_ops *__load_ioengine(const char *name)
+static struct ioengine_ops *__load_ioengine(const char *engine)
{
- char engine[64];
-
- engine[sizeof(engine) - 1] = '\0';
- strncpy(engine, name, sizeof(engine) - 1);
-
/*
* linux libaio has alias names, so convert to what we want
*/
if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3)) {
- dprint(FD_IO, "converting ioengine name: %s -> libaio\n", name);
- strcpy(engine, "libaio");
+ dprint(FD_IO, "converting ioengine name: %s -> libaio\n",
+ engine);
+ engine = "libaio";
}
dprint(FD_IO, "load ioengine %s\n", engine);
* so as not to break job files not using the prefix.
*/
ops = __load_ioengine(td->o.ioengine);
- if (!ops)
+
+ /* We do re-dlopen existing handles, for reference counting */
+ if (!ops || ops->dlhandle)
ops = dlopen_ioengine(td, name);
/*
/*
* Check that the required methods are there.
*/
- if (check_engine_ops(ops))
+ if (check_engine_ops(td, ops))
return NULL;
return ops;
*/
void free_ioengine(struct thread_data *td)
{
+ assert(td != NULL && td->io_ops != NULL);
+
dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
if (td->eo && td->io_ops->options) {
td->eo = NULL;
}
- if (td->io_ops_dlhandle) {
- dlclose(td->io_ops_dlhandle);
- td->io_ops_dlhandle = NULL;
+ if (td->io_ops->dlhandle) {
+ dprint(FD_IO, "dlclose ioengine %s\n", td->io_ops->name);
+ dlclose(td->io_ops->dlhandle);
}
td->io_ops = NULL;
return r;
}
-int td_io_queue(struct thread_data *td, struct io_u *io_u)
+enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
{
const enum fio_ddir ddir = acct_ddir(io_u);
- unsigned long buflen = io_u->xfer_buflen;
- int ret;
+ unsigned long long buflen = io_u->xfer_buflen;
+ enum fio_q_status ret;
dprint_io_u(io_u, "queue");
fio_ro_check(td, io_u);
assert((io_u->flags & IO_U_F_FLIGHT) == 0);
io_u_set(td, io_u, IO_U_F_FLIGHT);
+ /*
+ * If overlap checking was enabled in offload mode we
+ * can release this lock that was acquired when we
+ * started the overlap check because the IO_U_F_FLIGHT
+ * flag is now set
+ */
+ if (td_offload_overlap(td)) {
+ int res;
+
+ res = pthread_mutex_unlock(&overlap_check);
+ if (fio_unlikely(res != 0)) {
+ log_err("failed to unlock overlap check mutex, err: %i:%s", errno, strerror(errno));
+ abort();
+ }
+ }
+
assert(fio_file_open(io_u->file));
/*
io_u->error = 0;
io_u->resid = 0;
- if (td_ioengine_flagged(td, FIO_SYNCIO)) {
- if (fio_fill_issue_time(td))
+ if (td_ioengine_flagged(td, FIO_SYNCIO) ||
+ async_ioengine_sync_trim(td, io_u)) {
+ if (fio_fill_issue_time(td)) {
fio_gettime(&io_u->issue_time, NULL);
- /*
- * only used for iolog
- */
- if (td->o.read_iolog_file)
- memcpy(&td->last_issue, &io_u->issue_time,
- sizeof(io_u->issue_time));
+ /*
+ * only used for iolog
+ */
+ if (td->o.read_iolog_file)
+ memcpy(&td->last_issue, &io_u->issue_time,
+ sizeof(io_u->issue_time));
+ }
}
+
if (ddir_rw(ddir)) {
if (!(io_u->flags & IO_U_F_VER_LIST)) {
td->io_issues[ddir]++;
}
ret = td->io_ops->queue(td, io_u);
+ zbd_queue_io_u(td, io_u, ret);
unlock_file(td, io_u->file);
"invalid block size. Try setting direct=0.\n");
}
- if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
+ if (zbd_unaligned_write(io_u->error) &&
+ td->io_issues[io_u->ddir & 1] == 1 &&
+ td->o.zone_mode != ZONE_MODE_ZBD) {
+ log_info("fio: first I/O failed. If %s is a zoned block device, consider --zonemode=zbd\n",
+ io_u->file->file_name);
+ }
+
+ if (!td->io_ops->commit) {
io_u_mark_submit(td, 1);
io_u_mark_complete(td, 1);
}
if (ret == FIO_Q_COMPLETED) {
- if (ddir_rw(io_u->ddir)) {
+ if (ddir_rw(io_u->ddir) ||
+ (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
io_u_mark_depth(td, 1);
td->ts.total_io_u[io_u->ddir]++;
}
} else if (ret == FIO_Q_QUEUED) {
- int r;
-
td->io_u_queued++;
- if (ddir_rw(io_u->ddir))
+ if (ddir_rw(io_u->ddir) ||
+ (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
td->ts.total_io_u[io_u->ddir]++;
- if (td->io_u_queued >= td->o.iodepth_batch) {
- r = td_io_commit(td);
- if (r < 0)
- return r;
- }
+ if (td->io_u_queued >= td->o.iodepth_batch)
+ td_io_commit(td);
}
- if (!td_ioengine_flagged(td, FIO_SYNCIO)) {
- if (fio_fill_issue_time(td))
+ if (!td_ioengine_flagged(td, FIO_SYNCIO) &&
+ !async_ioengine_sync_trim(td, io_u)) {
+ if (fio_fill_issue_time(td) &&
+ !td_ioengine_flagged(td, FIO_ASYNCIO_SETS_ISSUE_TIME)) {
fio_gettime(&io_u->issue_time, NULL);
- /*
- * only used for iolog
- */
- if (td->o.read_iolog_file)
- memcpy(&td->last_issue, &io_u->issue_time,
- sizeof(io_u->issue_time));
+ /*
+ * only used for iolog
+ */
+ if (td->o.read_iolog_file)
+ memcpy(&td->last_issue, &io_u->issue_time,
+ sizeof(io_u->issue_time));
+ }
}
return ret;
return ret;
}
-int td_io_commit(struct thread_data *td)
+void td_io_commit(struct thread_data *td)
{
int ret;
dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
if (!td->cur_depth || !td->io_u_queued)
- return 0;
+ return;
io_u_mark_depth(td, td->io_u_queued);
*/
td->io_u_in_flight += td->io_u_queued;
td->io_u_queued = 0;
-
- return 0;
}
int td_io_open_file(struct thread_data *td, struct fio_file *f)
{
+ if (fio_file_closing(f)) {
+ /*
+ * Open translates to undo closing.
+ */
+ fio_file_clear_closing(f);
+ get_file(f);
+ return 0;
+ }
assert(!fio_file_open(f));
assert(f->fd == -1);
assert(td->io_ops->open_file);
flags = POSIX_FADV_RANDOM;
else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
flags = POSIX_FADV_SEQUENTIAL;
+#ifdef POSIX_FADV_NOREUSE
+ else if (td->o.fadvise_hint == F_ADV_NOREUSE)
+ flags = POSIX_FADV_NOREUSE;
+#endif
else {
log_err("fio: unknown fadvise type %d\n",
td->o.fadvise_hint);
}
if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
- td_verror(td, errno, "fadvise");
- goto err;
+ if (!fio_did_warn(FIO_WARN_FADVISE))
+ log_err("fio: fadvise hint failed\n");
}
}
#ifdef FIO_HAVE_WRITE_HINT
*/
fio_file_set_closing(f);
- disk_util_dec(f->du);
-
- if (td->o.file_lock_mode != FILE_LOCK_NONE)
- unlock_file_all(td, f);
-
return put_file(td, f);
}
return td->io_ops->get_file_size(td, f);
}
+#ifdef CONFIG_DYNAMIC_ENGINES
+/* Load all dynamic engines in FIO_EXT_ENG_DIR for enghelp command */
+static void
+fio_load_dynamic_engines(struct thread_data *td)
+{
+ DIR *dirhandle = NULL;
+ struct dirent *dirent = NULL;
+ char engine_path[PATH_MAX];
+
+ dirhandle = opendir(FIO_EXT_ENG_DIR);
+ if (!dirhandle)
+ return;
+
+ while ((dirent = readdir(dirhandle)) != NULL) {
+ if (!strcmp(dirent->d_name, ".") ||
+ !strcmp(dirent->d_name, ".."))
+ continue;
+
+ sprintf(engine_path, "%s/%s", FIO_EXT_ENG_DIR, dirent->d_name);
+ dlopen_ioengine(td, engine_path);
+ }
+
+ closedir(dirhandle);
+}
+#else
+#define fio_load_dynamic_engines(td) do { } while (0)
+#endif
+
int fio_show_ioengine_help(const char *engine)
{
struct flist_head *entry;
+ struct thread_data td;
struct ioengine_ops *io_ops;
char *sep;
int ret = 1;
+ memset(&td, 0, sizeof(struct thread_data));
+
if (!engine || !*engine) {
log_info("Available IO engines:\n");
+ fio_load_dynamic_engines(&td);
flist_for_each(entry, &engine_list) {
io_ops = flist_entry(entry, struct ioengine_ops, list);
log_info("\t%s\n", io_ops->name);
sep++;
}
- io_ops = __load_ioengine(engine);
- if (!io_ops) {
+ td.o.ioengine = (char *)engine;
+ td.io_ops = load_ioengine(&td);
+
+ if (!td.io_ops) {
log_info("IO engine %s not found\n", engine);
return 1;
}
- if (io_ops->options)
- ret = show_cmd_help(io_ops->options, sep);
+ if (td.io_ops->options)
+ ret = show_cmd_help(td.io_ops->options, sep);
else
- log_info("IO engine %s has no options\n", io_ops->name);
+ log_info("IO engine %s has no options\n", td.io_ops->name);
+ free_ioengine(&td);
return ret;
}