2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
5 * sync io is implemented on top of aio.
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
18 #include <sys/types.h>
25 static FLIST_HEAD(engine_list);
27 static bool check_engine_ops(struct thread_data *td, struct ioengine_ops *ops)
29 if (ops->version != FIO_IOOPS_VERSION) {
30 log_err("bad ioops version %d (want %d)\n", ops->version,
36 log_err("%s: no queue handler\n", ops->name);
41 * sync engines only need a ->queue()
43 if (ops->flags & FIO_SYNCIO)
47 * async engines aren't reliable with offload
49 if ((td->o.io_submit_mode == IO_MODE_OFFLOAD) &&
50 (ops->flags & FIO_NO_OFFLOAD)) {
51 log_err("%s: can't be used with offloaded submit. Use a sync "
52 "engine\n", ops->name);
56 if (!ops->event || !ops->getevents) {
57 log_err("%s: no event/getevents handler\n", ops->name);
64 void unregister_ioengine(struct ioengine_ops *ops)
66 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
67 flist_del_init(&ops->list);
70 void register_ioengine(struct ioengine_ops *ops)
72 dprint(FD_IO, "ioengine %s registered\n", ops->name);
73 flist_add_tail(&ops->list, &engine_list);
76 static struct ioengine_ops *find_ioengine(const char *name)
78 struct ioengine_ops *ops;
79 struct flist_head *entry;
81 flist_for_each(entry, &engine_list) {
82 ops = flist_entry(entry, struct ioengine_ops, list);
83 if (!strcmp(name, ops->name))
90 #ifdef CONFIG_DYNAMIC_ENGINES
91 static void *dlopen_external(struct thread_data *td, const char *engine)
93 char engine_path[PATH_MAX];
96 sprintf(engine_path, "%s/fio-%s.so", FIO_EXT_ENG_DIR, engine);
98 dlhandle = dlopen(engine_path, RTLD_LAZY);
100 log_info("Engine %s not found; Either name is invalid, was not built, or fio-engine-%s package is missing.\n",
106 #define dlopen_external(td, engine) (NULL)
109 static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
110 const char *engine_lib)
112 struct ioengine_ops *ops;
115 dprint(FD_IO, "dload engine %s\n", engine_lib);
118 dlhandle = dlopen(engine_lib, RTLD_LAZY);
120 dlhandle = dlopen_external(td, engine_lib);
122 td_vmsg(td, -1, dlerror(), "dlopen");
128 * Unlike the included modules, external engines should have a
129 * non-static ioengine structure that we can reference.
131 ops = dlsym(dlhandle, engine_lib);
133 ops = dlsym(dlhandle, "ioengine");
136 * For some external engines (like C++ ones) it is not that trivial
137 * to provide a non-static ionengine structure that we can reference.
138 * Instead we call a method which allocates the required ioengine
142 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
149 td_vmsg(td, -1, dlerror(), "dlsym");
154 td->io_ops_dlhandle = dlhandle;
158 static struct ioengine_ops *__load_ioengine(const char *engine)
161 * linux libaio has alias names, so convert to what we want
163 if (!strncmp(engine, "linuxaio", 8)) {
164 dprint(FD_IO, "converting ioengine name: %s -> libaio\n",
169 dprint(FD_IO, "load ioengine %s\n", engine);
170 return find_ioengine(engine);
173 struct ioengine_ops *load_ioengine(struct thread_data *td)
175 struct ioengine_ops *ops = NULL;
179 * Use ->ioengine_so_path if an external ioengine path is specified.
180 * In this case, ->ioengine is "external" which also means the prefix
181 * for external ioengines "external:" is properly used.
183 name = td->o.ioengine_so_path ?: td->o.ioengine;
186 * Try to load ->ioengine first, and if failed try to dlopen(3) either
187 * ->ioengine or ->ioengine_so_path. This is redundant for an external
188 * ioengine with prefix, and also leaves the possibility of unexpected
189 * behavior (e.g. if the "external" ioengine exists), but we do this
190 * so as not to break job files not using the prefix.
192 ops = __load_ioengine(td->o.ioengine);
194 ops = dlopen_ioengine(td, name);
197 * If ops is NULL, we failed to load ->ioengine, and also failed to
198 * dlopen(3) either ->ioengine or ->ioengine_so_path as a path.
201 log_err("fio: engine %s not loadable\n", name);
206 * Check that the required methods are there.
208 if (check_engine_ops(td, ops))
215 * For cleaning up an ioengine which never made it to init().
217 void free_ioengine(struct thread_data *td)
219 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
221 if (td->eo && td->io_ops->options) {
222 options_free(td->io_ops->options, td->eo);
227 if (td->io_ops_dlhandle) {
228 dlclose(td->io_ops_dlhandle);
229 td->io_ops_dlhandle = NULL;
235 void close_ioengine(struct thread_data *td)
237 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
239 if (td->io_ops->cleanup) {
240 td->io_ops->cleanup(td);
241 td->io_ops_data = NULL;
247 int td_io_prep(struct thread_data *td, struct io_u *io_u)
249 dprint_io_u(io_u, "prep");
250 fio_ro_check(td, io_u);
252 lock_file(td, io_u->file, io_u->ddir);
254 if (td->io_ops->prep) {
255 int ret = td->io_ops->prep(td, io_u);
257 dprint(FD_IO, "prep: io_u %p: ret=%d\n", io_u, ret);
260 unlock_file(td, io_u->file);
267 int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
268 const struct timespec *t)
273 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
274 * server side gets a message from the client
275 * side that the task is finished, and
276 * td->done is set to 1 after td_io_commit(). In this case,
277 * there is no need to reap complete event in server side.
282 if (min > 0 && td->io_ops->commit) {
283 r = td->io_ops->commit(td);
287 if (max > td->cur_depth)
293 if (max && td->io_ops->getevents)
294 r = td->io_ops->getevents(td, min, max, t);
298 * Reflect that our submitted requests were retrieved with
299 * whatever OS async calls are in the underlying engine.
301 td->io_u_in_flight -= r;
302 io_u_mark_complete(td, r);
304 td_verror(td, r, "get_events");
306 dprint(FD_IO, "getevents: %d\n", r);
310 enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
312 const enum fio_ddir ddir = acct_ddir(io_u);
313 unsigned long long buflen = io_u->xfer_buflen;
314 enum fio_q_status ret;
316 dprint_io_u(io_u, "queue");
317 fio_ro_check(td, io_u);
319 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
320 io_u_set(td, io_u, IO_U_F_FLIGHT);
323 * If overlap checking was enabled in offload mode we
324 * can release this lock that was acquired when we
325 * started the overlap check because the IO_U_F_FLIGHT
328 if (td_offload_overlap(td)) {
329 int res = pthread_mutex_unlock(&overlap_check);
333 assert(fio_file_open(io_u->file));
336 * If using a write iolog, store this entry.
343 if (td_ioengine_flagged(td, FIO_SYNCIO) ||
344 (td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) &&
345 io_u->ddir == DDIR_TRIM)) {
346 if (fio_fill_issue_time(td))
347 fio_gettime(&io_u->issue_time, NULL);
350 * only used for iolog
352 if (td->o.read_iolog_file)
353 memcpy(&td->last_issue, &io_u->issue_time,
354 sizeof(io_u->issue_time));
359 if (!(io_u->flags & IO_U_F_VER_LIST)) {
360 td->io_issues[ddir]++;
361 td->io_issue_bytes[ddir] += buflen;
363 td->rate_io_issue_bytes[ddir] += buflen;
366 ret = td->io_ops->queue(td, io_u);
367 zbd_queue_io_u(td, io_u, ret);
369 unlock_file(td, io_u->file);
371 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
372 td->io_issues[ddir]--;
373 td->io_issue_bytes[ddir] -= buflen;
374 td->rate_io_issue_bytes[ddir] -= buflen;
375 io_u_clear(td, io_u, IO_U_F_FLIGHT);
379 * If an error was seen and the io engine didn't propagate it
380 * back to 'td', do so.
382 if (io_u->error && !td->error)
383 td_verror(td, io_u->error, "td_io_queue");
386 * Add warning for O_DIRECT so that users have an easier time
387 * spotting potentially bad alignment. If this triggers for the first
388 * IO, then it's likely an alignment problem or because the host fs
389 * does not support O_DIRECT
391 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
394 log_info("fio: first direct IO errored. File system may not "
395 "support direct IO, or iomem_align= is bad, or "
396 "invalid block size. Try setting direct=0.\n");
399 if (zbd_unaligned_write(io_u->error) &&
400 td->io_issues[io_u->ddir & 1] == 1 &&
401 td->o.zone_mode != ZONE_MODE_ZBD) {
402 log_info("fio: first I/O failed. If %s is a zoned block device, consider --zonemode=zbd\n",
403 io_u->file->file_name);
406 if (!td->io_ops->commit) {
407 io_u_mark_submit(td, 1);
408 io_u_mark_complete(td, 1);
409 zbd_put_io_u(td, io_u);
412 if (ret == FIO_Q_COMPLETED) {
413 if (ddir_rw(io_u->ddir) ||
414 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
415 io_u_mark_depth(td, 1);
416 td->ts.total_io_u[io_u->ddir]++;
418 } else if (ret == FIO_Q_QUEUED) {
421 if (ddir_rw(io_u->ddir) ||
422 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
423 td->ts.total_io_u[io_u->ddir]++;
425 if (td->io_u_queued >= td->o.iodepth_batch)
429 if (!td_ioengine_flagged(td, FIO_SYNCIO) &&
430 (!td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) ||
431 io_u->ddir != DDIR_TRIM)) {
432 if (fio_fill_issue_time(td))
433 fio_gettime(&io_u->issue_time, NULL);
436 * only used for iolog
438 if (td->o.read_iolog_file)
439 memcpy(&td->last_issue, &io_u->issue_time,
440 sizeof(io_u->issue_time));
446 int td_io_init(struct thread_data *td)
450 if (td->io_ops->init) {
451 ret = td->io_ops->init(td);
453 log_err("fio: io engine %s init failed.%s\n",
456 " Perhaps try reducing io depth?" : "");
466 void td_io_commit(struct thread_data *td)
470 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
472 if (!td->cur_depth || !td->io_u_queued)
475 io_u_mark_depth(td, td->io_u_queued);
477 if (td->io_ops->commit) {
478 ret = td->io_ops->commit(td);
480 td_verror(td, -ret, "io commit");
484 * Reflect that events were submitted as async IO requests.
486 td->io_u_in_flight += td->io_u_queued;
490 int td_io_open_file(struct thread_data *td, struct fio_file *f)
492 if (fio_file_closing(f)) {
494 * Open translates to undo closing.
496 fio_file_clear_closing(f);
500 assert(!fio_file_open(f));
502 assert(td->io_ops->open_file);
504 if (td->io_ops->open_file(td, f)) {
505 if (td->error == EINVAL && td->o.odirect)
506 log_err("fio: destination does not support O_DIRECT\n");
507 if (td->error == EMFILE) {
508 log_err("fio: try reducing/setting openfiles (failed"
509 " at %u of %u)\n", td->nr_open_files,
514 assert(!fio_file_open(f));
518 fio_file_reset(td, f);
519 fio_file_set_open(f);
520 fio_file_clear_closing(f);
521 disk_util_inc(f->du);
526 if (f->filetype == FIO_TYPE_PIPE) {
528 log_err("fio: can't seek on pipes (no random io)\n");
533 if (td_ioengine_flagged(td, FIO_DISKLESSIO))
536 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
539 if (td->o.fadvise_hint != F_ADV_NONE &&
540 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
543 if (td->o.fadvise_hint == F_ADV_TYPE) {
545 flags = POSIX_FADV_RANDOM;
547 flags = POSIX_FADV_SEQUENTIAL;
548 } else if (td->o.fadvise_hint == F_ADV_RANDOM)
549 flags = POSIX_FADV_RANDOM;
550 else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
551 flags = POSIX_FADV_SEQUENTIAL;
553 log_err("fio: unknown fadvise type %d\n",
555 flags = POSIX_FADV_NORMAL;
558 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
559 if (!fio_did_warn(FIO_WARN_FADVISE))
560 log_err("fio: fadvise hint failed\n");
563 #ifdef FIO_HAVE_WRITE_HINT
564 if (fio_option_is_set(&td->o, write_hint) &&
565 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
566 uint64_t hint = td->o.write_hint;
570 * For direct IO, we just need/want to set the hint on
571 * the file descriptor. For buffered IO, we need to set
575 cmd = F_SET_FILE_RW_HINT;
579 if (fcntl(f->fd, cmd, &hint) < 0) {
580 td_verror(td, errno, "fcntl write hint");
586 if (td->o.odirect && !OS_O_DIRECT && fio_set_directio(td, f))
590 log_file(td, f, FIO_LOG_OPEN_FILE);
593 disk_util_dec(f->du);
594 if (td->io_ops->close_file)
595 td->io_ops->close_file(td, f);
599 int td_io_close_file(struct thread_data *td, struct fio_file *f)
601 if (!fio_file_closing(f))
602 log_file(td, f, FIO_LOG_CLOSE_FILE);
605 * mark as closing, do real close when last io on it has completed
607 fio_file_set_closing(f);
609 return put_file(td, f);
612 int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
614 if (td->io_ops->unlink_file)
615 return td->io_ops->unlink_file(td, f);
619 ret = unlink(f->file_name);
627 int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
629 if (!td->io_ops->get_file_size)
632 return td->io_ops->get_file_size(td, f);
635 #ifdef CONFIG_DYNAMIC_ENGINES
636 /* Load all dynamic engines in FIO_EXT_ENG_DIR for enghelp command */
638 fio_load_dynamic_engines(struct thread_data *td)
640 DIR *dirhandle = NULL;
641 struct dirent *dirent = NULL;
642 char engine_path[PATH_MAX];
644 dirhandle = opendir(FIO_EXT_ENG_DIR);
648 while ((dirent = readdir(dirhandle)) != NULL) {
649 if (!strcmp(dirent->d_name, ".") ||
650 !strcmp(dirent->d_name, ".."))
653 sprintf(engine_path, "%s/%s", FIO_EXT_ENG_DIR, dirent->d_name);
654 dlopen_ioengine(td, engine_path);
660 #define fio_load_dynamic_engines(td) do { } while (0)
663 int fio_show_ioengine_help(const char *engine)
665 struct flist_head *entry;
666 struct thread_data td;
667 struct ioengine_ops *io_ops;
671 memset(&td, 0, sizeof(struct thread_data));
673 if (!engine || !*engine) {
674 log_info("Available IO engines:\n");
675 fio_load_dynamic_engines(&td);
676 flist_for_each(entry, &engine_list) {
677 io_ops = flist_entry(entry, struct ioengine_ops, list);
678 log_info("\t%s\n", io_ops->name);
682 sep = strchr(engine, ',');
688 td.o.ioengine = (char *)engine;
689 io_ops = load_ioengine(&td);
692 log_info("IO engine %s not found\n", engine);
697 ret = show_cmd_help(io_ops->options, sep);
699 log_info("IO engine %s has no options\n", io_ops->name);