2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
5 * sync io is implemented on top of aio.
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
23 static FLIST_HEAD(engine_list);
25 static bool check_engine_ops(struct ioengine_ops *ops)
27 if (ops->version != FIO_IOOPS_VERSION) {
28 log_err("bad ioops version %d (want %d)\n", ops->version,
34 log_err("%s: no queue handler\n", ops->name);
39 * sync engines only need a ->queue()
41 if (ops->flags & FIO_SYNCIO)
44 if (!ops->event || !ops->getevents) {
45 log_err("%s: no event/getevents handler\n", ops->name);
52 void unregister_ioengine(struct ioengine_ops *ops)
54 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
55 flist_del_init(&ops->list);
58 void register_ioengine(struct ioengine_ops *ops)
60 dprint(FD_IO, "ioengine %s registered\n", ops->name);
61 flist_add_tail(&ops->list, &engine_list);
64 static struct ioengine_ops *find_ioengine(const char *name)
66 struct ioengine_ops *ops;
67 struct flist_head *entry;
69 flist_for_each(entry, &engine_list) {
70 ops = flist_entry(entry, struct ioengine_ops, list);
71 if (!strcmp(name, ops->name))
78 #ifdef CONFIG_DYNAMIC_ENGINES
79 static void *dlopen_external(struct thread_data *td, const char *engine)
81 char engine_path[PATH_MAX];
83 sprintf(engine_path, "%s/lib%s.so", FIO_EXT_ENG_DIR, engine);
85 return dlopen(engine_path, RTLD_LAZY);
88 #define dlopen_external(td, engine) (NULL)
91 static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
92 const char *engine_lib)
94 struct ioengine_ops *ops;
97 dprint(FD_IO, "dload engine %s\n", engine_lib);
100 dlhandle = dlopen(engine_lib, RTLD_LAZY);
102 dlhandle = dlopen_external(td, engine_lib);
104 td_vmsg(td, -1, dlerror(), "dlopen");
110 * Unlike the included modules, external engines should have a
111 * non-static ioengine structure that we can reference.
113 ops = dlsym(dlhandle, engine_lib);
115 ops = dlsym(dlhandle, "ioengine");
118 * For some external engines (like C++ ones) it is not that trivial
119 * to provide a non-static ionengine structure that we can reference.
120 * Instead we call a method which allocates the required ioengine
124 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
131 td_vmsg(td, -1, dlerror(), "dlsym");
136 td->io_ops_dlhandle = dlhandle;
140 static struct ioengine_ops *__load_ioengine(const char *engine)
143 * linux libaio has alias names, so convert to what we want
145 if (!strncmp(engine, "linuxaio", 8)) {
146 dprint(FD_IO, "converting ioengine name: %s -> libaio\n",
151 dprint(FD_IO, "load ioengine %s\n", engine);
152 return find_ioengine(engine);
155 struct ioengine_ops *load_ioengine(struct thread_data *td)
157 struct ioengine_ops *ops = NULL;
161 * Use ->ioengine_so_path if an external ioengine path is specified.
162 * In this case, ->ioengine is "external" which also means the prefix
163 * for external ioengines "external:" is properly used.
165 name = td->o.ioengine_so_path ?: td->o.ioengine;
168 * Try to load ->ioengine first, and if failed try to dlopen(3) either
169 * ->ioengine or ->ioengine_so_path. This is redundant for an external
170 * ioengine with prefix, and also leaves the possibility of unexpected
171 * behavior (e.g. if the "external" ioengine exists), but we do this
172 * so as not to break job files not using the prefix.
174 ops = __load_ioengine(td->o.ioengine);
176 ops = dlopen_ioengine(td, name);
179 * If ops is NULL, we failed to load ->ioengine, and also failed to
180 * dlopen(3) either ->ioengine or ->ioengine_so_path as a path.
183 log_err("fio: engine %s not loadable\n", name);
188 * Check that the required methods are there.
190 if (check_engine_ops(ops))
197 * For cleaning up an ioengine which never made it to init().
199 void free_ioengine(struct thread_data *td)
201 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
203 if (td->eo && td->io_ops->options) {
204 options_free(td->io_ops->options, td->eo);
209 if (td->io_ops_dlhandle) {
210 dlclose(td->io_ops_dlhandle);
211 td->io_ops_dlhandle = NULL;
217 void close_ioengine(struct thread_data *td)
219 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
221 if (td->io_ops->cleanup) {
222 td->io_ops->cleanup(td);
223 td->io_ops_data = NULL;
229 int td_io_prep(struct thread_data *td, struct io_u *io_u)
231 dprint_io_u(io_u, "prep");
232 fio_ro_check(td, io_u);
234 lock_file(td, io_u->file, io_u->ddir);
236 if (td->io_ops->prep) {
237 int ret = td->io_ops->prep(td, io_u);
239 dprint(FD_IO, "prep: io_u %p: ret=%d\n", io_u, ret);
242 unlock_file(td, io_u->file);
249 int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
250 const struct timespec *t)
255 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
256 * server side gets a message from the client
257 * side that the task is finished, and
258 * td->done is set to 1 after td_io_commit(). In this case,
259 * there is no need to reap complete event in server side.
264 if (min > 0 && td->io_ops->commit) {
265 r = td->io_ops->commit(td);
269 if (max > td->cur_depth)
275 if (max && td->io_ops->getevents)
276 r = td->io_ops->getevents(td, min, max, t);
280 * Reflect that our submitted requests were retrieved with
281 * whatever OS async calls are in the underlying engine.
283 td->io_u_in_flight -= r;
284 io_u_mark_complete(td, r);
286 td_verror(td, r, "get_events");
288 dprint(FD_IO, "getevents: %d\n", r);
292 enum fio_q_status td_io_queue(struct thread_data *td, struct io_u *io_u)
294 const enum fio_ddir ddir = acct_ddir(io_u);
295 unsigned long long buflen = io_u->xfer_buflen;
296 enum fio_q_status ret;
298 dprint_io_u(io_u, "queue");
299 fio_ro_check(td, io_u);
301 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
302 io_u_set(td, io_u, IO_U_F_FLIGHT);
305 * If overlap checking was enabled in offload mode we
306 * can release this lock that was acquired when we
307 * started the overlap check because the IO_U_F_FLIGHT
310 if (td_offload_overlap(td))
311 pthread_mutex_unlock(&overlap_check);
313 assert(fio_file_open(io_u->file));
316 * If using a write iolog, store this entry.
323 if (td_ioengine_flagged(td, FIO_SYNCIO) ||
324 (td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) &&
325 io_u->ddir == DDIR_TRIM)) {
326 if (fio_fill_issue_time(td))
327 fio_gettime(&io_u->issue_time, NULL);
330 * only used for iolog
332 if (td->o.read_iolog_file)
333 memcpy(&td->last_issue, &io_u->issue_time,
334 sizeof(io_u->issue_time));
339 if (!(io_u->flags & IO_U_F_VER_LIST)) {
340 td->io_issues[ddir]++;
341 td->io_issue_bytes[ddir] += buflen;
343 td->rate_io_issue_bytes[ddir] += buflen;
346 ret = td->io_ops->queue(td, io_u);
347 zbd_queue_io_u(io_u, ret);
349 unlock_file(td, io_u->file);
351 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
352 td->io_issues[ddir]--;
353 td->io_issue_bytes[ddir] -= buflen;
354 td->rate_io_issue_bytes[ddir] -= buflen;
355 io_u_clear(td, io_u, IO_U_F_FLIGHT);
359 * If an error was seen and the io engine didn't propagate it
360 * back to 'td', do so.
362 if (io_u->error && !td->error)
363 td_verror(td, io_u->error, "td_io_queue");
366 * Add warning for O_DIRECT so that users have an easier time
367 * spotting potentially bad alignment. If this triggers for the first
368 * IO, then it's likely an alignment problem or because the host fs
369 * does not support O_DIRECT
371 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
374 log_info("fio: first direct IO errored. File system may not "
375 "support direct IO, or iomem_align= is bad, or "
376 "invalid block size. Try setting direct=0.\n");
379 if (zbd_unaligned_write(io_u->error) &&
380 td->io_issues[io_u->ddir & 1] == 1 &&
381 td->o.zone_mode != ZONE_MODE_ZBD) {
382 log_info("fio: first I/O failed. If %s is a zoned block device, consider --zonemode=zbd\n",
383 io_u->file->file_name);
386 if (!td->io_ops->commit) {
387 io_u_mark_submit(td, 1);
388 io_u_mark_complete(td, 1);
392 if (ret == FIO_Q_COMPLETED) {
393 if (ddir_rw(io_u->ddir) ||
394 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING)) {
395 io_u_mark_depth(td, 1);
396 td->ts.total_io_u[io_u->ddir]++;
398 } else if (ret == FIO_Q_QUEUED) {
401 if (ddir_rw(io_u->ddir) ||
402 (ddir_sync(io_u->ddir) && td->runstate != TD_FSYNCING))
403 td->ts.total_io_u[io_u->ddir]++;
405 if (td->io_u_queued >= td->o.iodepth_batch)
409 if (!td_ioengine_flagged(td, FIO_SYNCIO) &&
410 (!td_ioengine_flagged(td, FIO_ASYNCIO_SYNC_TRIM) ||
411 io_u->ddir != DDIR_TRIM)) {
412 if (fio_fill_issue_time(td))
413 fio_gettime(&io_u->issue_time, NULL);
416 * only used for iolog
418 if (td->o.read_iolog_file)
419 memcpy(&td->last_issue, &io_u->issue_time,
420 sizeof(io_u->issue_time));
426 int td_io_init(struct thread_data *td)
430 if (td->io_ops->init) {
431 ret = td->io_ops->init(td);
433 log_err("fio: io engine %s init failed.%s\n",
436 " Perhaps try reducing io depth?" : "");
446 void td_io_commit(struct thread_data *td)
450 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
452 if (!td->cur_depth || !td->io_u_queued)
455 io_u_mark_depth(td, td->io_u_queued);
457 if (td->io_ops->commit) {
458 ret = td->io_ops->commit(td);
460 td_verror(td, -ret, "io commit");
464 * Reflect that events were submitted as async IO requests.
466 td->io_u_in_flight += td->io_u_queued;
470 int td_io_open_file(struct thread_data *td, struct fio_file *f)
472 if (fio_file_closing(f)) {
474 * Open translates to undo closing.
476 fio_file_clear_closing(f);
480 assert(!fio_file_open(f));
482 assert(td->io_ops->open_file);
484 if (td->io_ops->open_file(td, f)) {
485 if (td->error == EINVAL && td->o.odirect)
486 log_err("fio: destination does not support O_DIRECT\n");
487 if (td->error == EMFILE) {
488 log_err("fio: try reducing/setting openfiles (failed"
489 " at %u of %u)\n", td->nr_open_files,
494 assert(!fio_file_open(f));
498 fio_file_reset(td, f);
499 fio_file_set_open(f);
500 fio_file_clear_closing(f);
501 disk_util_inc(f->du);
506 if (f->filetype == FIO_TYPE_PIPE) {
508 log_err("fio: can't seek on pipes (no random io)\n");
513 if (td_ioengine_flagged(td, FIO_DISKLESSIO))
516 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
519 if (td->o.fadvise_hint != F_ADV_NONE &&
520 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
523 if (td->o.fadvise_hint == F_ADV_TYPE) {
525 flags = POSIX_FADV_RANDOM;
527 flags = POSIX_FADV_SEQUENTIAL;
528 } else if (td->o.fadvise_hint == F_ADV_RANDOM)
529 flags = POSIX_FADV_RANDOM;
530 else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
531 flags = POSIX_FADV_SEQUENTIAL;
533 log_err("fio: unknown fadvise type %d\n",
535 flags = POSIX_FADV_NORMAL;
538 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
539 if (!fio_did_warn(FIO_WARN_FADVISE))
540 log_err("fio: fadvise hint failed\n");
543 #ifdef FIO_HAVE_WRITE_HINT
544 if (fio_option_is_set(&td->o, write_hint) &&
545 (f->filetype == FIO_TYPE_BLOCK || f->filetype == FIO_TYPE_FILE)) {
546 uint64_t hint = td->o.write_hint;
550 * For direct IO, we just need/want to set the hint on
551 * the file descriptor. For buffered IO, we need to set
555 cmd = F_SET_FILE_RW_HINT;
559 if (fcntl(f->fd, cmd, &hint) < 0) {
560 td_verror(td, errno, "fcntl write hint");
566 if (td->o.odirect && !OS_O_DIRECT && fio_set_directio(td, f))
570 log_file(td, f, FIO_LOG_OPEN_FILE);
573 disk_util_dec(f->du);
574 if (td->io_ops->close_file)
575 td->io_ops->close_file(td, f);
579 int td_io_close_file(struct thread_data *td, struct fio_file *f)
581 if (!fio_file_closing(f))
582 log_file(td, f, FIO_LOG_CLOSE_FILE);
585 * mark as closing, do real close when last io on it has completed
587 fio_file_set_closing(f);
589 return put_file(td, f);
592 int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
594 if (td->io_ops->unlink_file)
595 return td->io_ops->unlink_file(td, f);
599 ret = unlink(f->file_name);
607 int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
609 if (!td->io_ops->get_file_size)
612 return td->io_ops->get_file_size(td, f);
615 int fio_show_ioengine_help(const char *engine)
617 struct flist_head *entry;
618 struct thread_data td;
619 struct ioengine_ops *io_ops;
623 if (!engine || !*engine) {
624 log_info("Available IO engines:\n");
625 flist_for_each(entry, &engine_list) {
626 io_ops = flist_entry(entry, struct ioengine_ops, list);
627 log_info("\t%s\n", io_ops->name);
631 sep = strchr(engine, ',');
637 memset(&td, 0, sizeof(struct thread_data));
638 td.o.ioengine = (char *)engine;
639 io_ops = load_ioengine(&td);
642 log_info("IO engine %s not found\n", engine);
647 ret = show_cmd_help(io_ops->options, sep);
649 log_info("IO engine %s has no options\n", io_ops->name);