2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
5 * sync io is implemented on top of aio.
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
23 static FLIST_HEAD(engine_list);
25 static bool check_engine_ops(struct ioengine_ops *ops)
27 if (ops->version != FIO_IOOPS_VERSION) {
28 log_err("bad ioops version %d (want %d)\n", ops->version,
34 log_err("%s: no queue handler\n", ops->name);
39 * sync engines only need a ->queue()
41 if (ops->flags & FIO_SYNCIO)
44 if (!ops->event || !ops->getevents) {
45 log_err("%s: no event/getevents handler\n", ops->name);
52 void unregister_ioengine(struct ioengine_ops *ops)
54 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
55 flist_del(&ops->list);
56 INIT_FLIST_HEAD(&ops->list);
59 void register_ioengine(struct ioengine_ops *ops)
61 dprint(FD_IO, "ioengine %s registered\n", ops->name);
62 INIT_FLIST_HEAD(&ops->list);
63 flist_add_tail(&ops->list, &engine_list);
66 static struct ioengine_ops *find_ioengine(const char *name)
68 struct ioengine_ops *ops;
69 struct flist_head *entry;
71 flist_for_each(entry, &engine_list) {
72 ops = flist_entry(entry, struct ioengine_ops, list);
73 if (!strcmp(name, ops->name))
80 static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
81 const char *engine_lib)
83 struct ioengine_ops *ops;
86 dprint(FD_IO, "dload engine %s\n", engine_lib);
89 dlhandle = dlopen(engine_lib, RTLD_LAZY);
91 td_vmsg(td, -1, dlerror(), "dlopen");
96 * Unlike the included modules, external engines should have a
97 * non-static ioengine structure that we can reference.
99 ops = dlsym(dlhandle, engine_lib);
101 ops = dlsym(dlhandle, "ioengine");
104 * For some external engines (like C++ ones) it is not that trivial
105 * to provide a non-static ionengine structure that we can reference.
106 * Instead we call a method which allocates the required ioengine
110 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
117 td_vmsg(td, -1, dlerror(), "dlsym");
122 td->io_ops_dlhandle = dlhandle;
126 struct ioengine_ops *load_ioengine(struct thread_data *td, const char *name)
128 struct ioengine_ops *ops;
131 dprint(FD_IO, "load ioengine %s\n", name);
133 engine[sizeof(engine) - 1] = '\0';
134 strncpy(engine, name, sizeof(engine) - 1);
137 * linux libaio has alias names, so convert to what we want
139 if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
140 strcpy(engine, "libaio");
142 ops = find_ioengine(engine);
144 ops = dlopen_ioengine(td, name);
147 log_err("fio: engine %s not loadable\n", name);
152 * Check that the required methods are there.
154 if (check_engine_ops(ops))
161 * For cleaning up an ioengine which never made it to init().
163 void free_ioengine(struct thread_data *td)
165 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
167 if (td->eo && td->io_ops->options) {
168 options_free(td->io_ops->options, td->eo);
173 if (td->io_ops_dlhandle)
174 dlclose(td->io_ops_dlhandle);
179 void close_ioengine(struct thread_data *td)
181 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
183 if (td->io_ops->cleanup) {
184 td->io_ops->cleanup(td);
185 td->io_ops_data = NULL;
191 int td_io_prep(struct thread_data *td, struct io_u *io_u)
193 dprint_io_u(io_u, "prep");
194 fio_ro_check(td, io_u);
196 lock_file(td, io_u->file, io_u->ddir);
198 if (td->io_ops->prep) {
199 int ret = td->io_ops->prep(td, io_u);
201 dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
203 unlock_file(td, io_u->file);
210 int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
211 const struct timespec *t)
216 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
217 * server side gets a message from the client
218 * side that the task is finished, and
219 * td->done is set to 1 after td_io_commit(). In this case,
220 * there is no need to reap complete event in server side.
225 if (min > 0 && td->io_ops->commit) {
226 r = td->io_ops->commit(td);
230 if (max > td->cur_depth)
236 if (max && td->io_ops->getevents)
237 r = td->io_ops->getevents(td, min, max, t);
241 * Reflect that our submitted requests were retrieved with
242 * whatever OS async calls are in the underlying engine.
244 td->io_u_in_flight -= r;
245 io_u_mark_complete(td, r);
247 td_verror(td, r, "get_events");
249 dprint(FD_IO, "getevents: %d\n", r);
253 int td_io_queue(struct thread_data *td, struct io_u *io_u)
255 const enum fio_ddir ddir = acct_ddir(io_u);
256 unsigned long buflen = io_u->xfer_buflen;
259 dprint_io_u(io_u, "queue");
260 fio_ro_check(td, io_u);
262 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
263 io_u_set(td, io_u, IO_U_F_FLIGHT);
265 assert(fio_file_open(io_u->file));
268 * If using a write iolog, store this entry.
275 if (td_ioengine_flagged(td, FIO_SYNCIO)) {
276 if (fio_fill_issue_time(td))
277 fio_gettime(&io_u->issue_time, NULL);
280 * only used for iolog
282 if (td->o.read_iolog_file)
283 memcpy(&td->last_issue, &io_u->issue_time,
284 sizeof(struct timeval));
288 td->io_issues[ddir]++;
289 td->io_issue_bytes[ddir] += buflen;
290 td->rate_io_issue_bytes[ddir] += buflen;
293 ret = td->io_ops->queue(td, io_u);
295 unlock_file(td, io_u->file);
297 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
298 td->io_issues[ddir]--;
299 td->io_issue_bytes[ddir] -= buflen;
300 td->rate_io_issue_bytes[ddir] -= buflen;
301 io_u_clear(td, io_u, IO_U_F_FLIGHT);
305 * If an error was seen and the io engine didn't propagate it
306 * back to 'td', do so.
308 if (io_u->error && !td->error)
309 td_verror(td, io_u->error, "td_io_queue");
312 * Add warning for O_DIRECT so that users have an easier time
313 * spotting potentially bad alignment. If this triggers for the first
314 * IO, then it's likely an alignment problem or because the host fs
315 * does not support O_DIRECT
317 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
320 log_info("fio: first direct IO errored. File system may not "
321 "support direct IO, or iomem_align= is bad. Try "
322 "setting direct=0.\n");
325 if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
326 io_u_mark_submit(td, 1);
327 io_u_mark_complete(td, 1);
330 if (ret == FIO_Q_COMPLETED) {
331 if (ddir_rw(io_u->ddir)) {
332 io_u_mark_depth(td, 1);
333 td->ts.total_io_u[io_u->ddir]++;
335 } else if (ret == FIO_Q_QUEUED) {
340 if (ddir_rw(io_u->ddir))
341 td->ts.total_io_u[io_u->ddir]++;
343 if (td->io_u_queued >= td->o.iodepth_batch) {
344 r = td_io_commit(td);
350 if (!td_ioengine_flagged(td, FIO_SYNCIO)) {
351 if (fio_fill_issue_time(td))
352 fio_gettime(&io_u->issue_time, NULL);
355 * only used for iolog
357 if (td->o.read_iolog_file)
358 memcpy(&td->last_issue, &io_u->issue_time,
359 sizeof(struct timeval));
365 int td_io_init(struct thread_data *td)
369 if (td->io_ops->init) {
370 ret = td->io_ops->init(td);
371 if (ret && td->o.iodepth > 1) {
372 log_err("fio: io engine init failed. Perhaps try"
373 " reducing io depth?\n");
379 if (!ret && td_ioengine_flagged(td, FIO_NOIO))
380 td->flags |= TD_F_NOIO;
385 int td_io_commit(struct thread_data *td)
389 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
391 if (!td->cur_depth || !td->io_u_queued)
394 io_u_mark_depth(td, td->io_u_queued);
396 if (td->io_ops->commit) {
397 ret = td->io_ops->commit(td);
399 td_verror(td, -ret, "io commit");
403 * Reflect that events were submitted as async IO requests.
405 td->io_u_in_flight += td->io_u_queued;
411 int td_io_open_file(struct thread_data *td, struct fio_file *f)
413 assert(!fio_file_open(f));
416 if (td->io_ops->open_file(td, f)) {
417 if (td->error == EINVAL && td->o.odirect)
418 log_err("fio: destination does not support O_DIRECT\n");
419 if (td->error == EMFILE) {
420 log_err("fio: try reducing/setting openfiles (failed"
421 " at %u of %u)\n", td->nr_open_files,
426 assert(!fio_file_open(f));
430 fio_file_reset(td, f);
431 fio_file_set_open(f);
432 fio_file_clear_closing(f);
433 disk_util_inc(f->du);
438 if (f->filetype == FIO_TYPE_PIPE) {
440 log_err("fio: can't seek on pipes (no random io)\n");
445 if (td_ioengine_flagged(td, FIO_DISKLESSIO))
448 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
451 if (td->o.fadvise_hint != F_ADV_NONE &&
452 (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
455 if (td->o.fadvise_hint == F_ADV_TYPE) {
457 flags = POSIX_FADV_RANDOM;
459 flags = POSIX_FADV_SEQUENTIAL;
460 } else if (td->o.fadvise_hint == F_ADV_RANDOM)
461 flags = POSIX_FADV_RANDOM;
462 else if (td->o.fadvise_hint == F_ADV_SEQUENTIAL)
463 flags = POSIX_FADV_SEQUENTIAL;
465 log_err("fio: unknown fadvise type %d\n",
467 flags = POSIX_FADV_NORMAL;
470 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
471 td_verror(td, errno, "fadvise");
475 #ifdef FIO_HAVE_STREAMID
476 if (td->o.fadvise_stream &&
477 (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
478 off_t stream = td->o.fadvise_stream;
480 if (posix_fadvise(f->fd, stream, f->io_size, POSIX_FADV_STREAMID) < 0) {
481 td_verror(td, errno, "fadvise streamid");
487 #ifdef FIO_OS_DIRECTIO
489 * Some OS's have a distinct call to mark the file non-buffered,
490 * instead of using O_DIRECT (Solaris)
493 int ret = fio_set_odirect(f->fd);
496 td_verror(td, ret, "fio_set_odirect");
497 if (ret == ENOTTY) { /* ENOTTY suggests RAW device or ZFS */
498 log_err("fio: doing directIO to RAW devices or ZFS not supported\n");
500 log_err("fio: the file system does not seem to support direct IO\n");
509 log_file(td, f, FIO_LOG_OPEN_FILE);
512 disk_util_dec(f->du);
513 if (td->io_ops->close_file)
514 td->io_ops->close_file(td, f);
518 int td_io_close_file(struct thread_data *td, struct fio_file *f)
520 if (!fio_file_closing(f))
521 log_file(td, f, FIO_LOG_CLOSE_FILE);
524 * mark as closing, do real close when last io on it has completed
526 fio_file_set_closing(f);
528 disk_util_dec(f->du);
530 if (td->o.file_lock_mode != FILE_LOCK_NONE)
531 unlock_file_all(td, f);
533 return put_file(td, f);
536 int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
538 if (td->io_ops->unlink_file)
539 return td->io_ops->unlink_file(td, f);
543 ret = unlink(f->file_name);
551 int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
553 if (!td->io_ops->get_file_size)
556 return td->io_ops->get_file_size(td, f);
559 static int do_sync_file_range(const struct thread_data *td,
562 off64_t offset, nbytes;
564 offset = f->first_write;
565 nbytes = f->last_write - f->first_write;
570 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
573 int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
577 if (io_u->ddir == DDIR_SYNC) {
578 ret = fsync(io_u->file->fd);
579 } else if (io_u->ddir == DDIR_DATASYNC) {
580 #ifdef CONFIG_FDATASYNC
581 ret = fdatasync(io_u->file->fd);
583 ret = io_u->xfer_buflen;
584 io_u->error = EINVAL;
586 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
587 ret = do_sync_file_range(td, io_u->file);
589 ret = io_u->xfer_buflen;
590 io_u->error = EINVAL;
599 int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
601 #ifndef FIO_HAVE_TRIM
602 io_u->error = EINVAL;
605 struct fio_file *f = io_u->file;
608 ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
610 return io_u->xfer_buflen;
617 int fio_show_ioengine_help(const char *engine)
619 struct flist_head *entry;
620 struct thread_data td;
624 if (!engine || !*engine) {
625 log_info("Available IO engines:\n");
626 flist_for_each(entry, &engine_list) {
627 td.io_ops = flist_entry(entry, struct ioengine_ops,
629 log_info("\t%s\n", td.io_ops->name);
633 sep = strchr(engine, ',');
639 memset(&td, 0, sizeof(td));
641 td.io_ops = load_ioengine(&td, engine);
643 log_info("IO engine %s not found\n", engine);
647 if (td.io_ops->options)
648 ret = show_cmd_help(td.io_ops->options, sep);
650 log_info("IO engine %s has no options\n", td.io_ops->name);