2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
5 * sync io is implemented on top of aio.
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
23 static FLIST_HEAD(engine_list);
25 static bool check_engine_ops(struct ioengine_ops *ops)
27 if (ops->version != FIO_IOOPS_VERSION) {
28 log_err("bad ioops version %d (want %d)\n", ops->version,
34 log_err("%s: no queue handler\n", ops->name);
39 * sync engines only need a ->queue()
41 if (ops->flags & FIO_SYNCIO)
44 if (!ops->event || !ops->getevents) {
45 log_err("%s: no event/getevents handler\n", ops->name);
52 void unregister_ioengine(struct ioengine_ops *ops)
54 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
55 flist_del(&ops->list);
56 INIT_FLIST_HEAD(&ops->list);
59 void register_ioengine(struct ioengine_ops *ops)
61 dprint(FD_IO, "ioengine %s registered\n", ops->name);
62 INIT_FLIST_HEAD(&ops->list);
63 flist_add_tail(&ops->list, &engine_list);
66 static struct ioengine_ops *find_ioengine(const char *name)
68 struct ioengine_ops *ops;
69 struct flist_head *entry;
71 flist_for_each(entry, &engine_list) {
72 ops = flist_entry(entry, struct ioengine_ops, list);
73 if (!strcmp(name, ops->name))
80 static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
81 const char *engine_lib)
83 struct ioengine_ops *ops;
86 dprint(FD_IO, "dload engine %s\n", engine_lib);
89 dlhandle = dlopen(engine_lib, RTLD_LAZY);
91 td_vmsg(td, -1, dlerror(), "dlopen");
96 * Unlike the included modules, external engines should have a
97 * non-static ioengine structure that we can reference.
99 ops = dlsym(dlhandle, engine_lib);
101 ops = dlsym(dlhandle, "ioengine");
104 * For some external engines (like C++ ones) it is not that trivial
105 * to provide a non-static ionengine structure that we can reference.
106 * Instead we call a method which allocates the required ioengine
110 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
117 td_vmsg(td, -1, dlerror(), "dlsym");
122 ops->dlhandle = dlhandle;
126 struct ioengine_ops *load_ioengine(struct thread_data *td, const char *name)
128 struct ioengine_ops *ops, *ret;
131 dprint(FD_IO, "load ioengine %s\n", name);
133 strncpy(engine, name, sizeof(engine) - 1);
136 * linux libaio has alias names, so convert to what we want
138 if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
139 strcpy(engine, "libaio");
141 ops = find_ioengine(engine);
143 ops = dlopen_ioengine(td, name);
146 log_err("fio: engine %s not loadable\n", name);
151 * Check that the required methods are there.
153 if (check_engine_ops(ops))
156 ret = malloc(sizeof(*ret));
157 memcpy(ret, ops, sizeof(*ret));
164 * For cleaning up an ioengine which never made it to init().
166 void free_ioengine(struct thread_data *td)
168 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
170 if (td->eo && td->io_ops->options) {
171 options_free(td->io_ops->options, td->eo);
176 if (td->io_ops->dlhandle)
177 dlclose(td->io_ops->dlhandle);
183 void close_ioengine(struct thread_data *td)
185 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
187 if (td->io_ops->cleanup) {
188 td->io_ops->cleanup(td);
189 td->io_ops->data = NULL;
195 int td_io_prep(struct thread_data *td, struct io_u *io_u)
197 dprint_io_u(io_u, "prep");
198 fio_ro_check(td, io_u);
200 lock_file(td, io_u->file, io_u->ddir);
202 if (td->io_ops->prep) {
203 int ret = td->io_ops->prep(td, io_u);
205 dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
207 unlock_file(td, io_u->file);
214 int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
215 const struct timespec *t)
220 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
221 * server side gets a message from the client
222 * side that the task is finished, and
223 * td->done is set to 1 after td_io_commit(). In this case,
224 * there is no need to reap complete event in server side.
229 if (min > 0 && td->io_ops->commit) {
230 r = td->io_ops->commit(td);
234 if (max > td->cur_depth)
240 if (max && td->io_ops->getevents)
241 r = td->io_ops->getevents(td, min, max, t);
245 * Reflect that our submitted requests were retrieved with
246 * whatever OS async calls are in the underlying engine.
248 td->io_u_in_flight -= r;
249 io_u_mark_complete(td, r);
251 td_verror(td, r, "get_events");
253 dprint(FD_IO, "getevents: %d\n", r);
257 int td_io_queue(struct thread_data *td, struct io_u *io_u)
259 const enum fio_ddir ddir = acct_ddir(io_u);
260 unsigned long buflen = io_u->xfer_buflen;
263 dprint_io_u(io_u, "queue");
264 fio_ro_check(td, io_u);
266 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
267 io_u_set(io_u, IO_U_F_FLIGHT);
269 assert(fio_file_open(io_u->file));
272 * If using a write iolog, store this entry.
279 if (td->io_ops->flags & FIO_SYNCIO) {
280 if (fio_fill_issue_time(td))
281 fio_gettime(&io_u->issue_time, NULL);
284 * only used for iolog
286 if (td->o.read_iolog_file)
287 memcpy(&td->last_issue, &io_u->issue_time,
288 sizeof(struct timeval));
292 td->io_issues[ddir]++;
293 td->io_issue_bytes[ddir] += buflen;
294 td->rate_io_issue_bytes[ddir] += buflen;
297 ret = td->io_ops->queue(td, io_u);
299 unlock_file(td, io_u->file);
301 if (ret == FIO_Q_BUSY && ddir_rw(ddir)) {
302 td->io_issues[ddir]--;
303 td->io_issue_bytes[ddir] -= buflen;
304 td->rate_io_issue_bytes[ddir] -= buflen;
308 * If an error was seen and the io engine didn't propagate it
309 * back to 'td', do so.
311 if (io_u->error && !td->error)
312 td_verror(td, io_u->error, "td_io_queue");
315 * Add warning for O_DIRECT so that users have an easier time
316 * spotting potentially bad alignment. If this triggers for the first
317 * IO, then it's likely an alignment problem or because the host fs
318 * does not support O_DIRECT
320 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
323 log_info("fio: first direct IO errored. File system may not "
324 "support direct IO, or iomem_align= is bad. Try "
325 "setting direct=0.\n");
328 if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
329 io_u_mark_submit(td, 1);
330 io_u_mark_complete(td, 1);
333 if (ret == FIO_Q_COMPLETED) {
334 if (ddir_rw(io_u->ddir)) {
335 io_u_mark_depth(td, 1);
336 td->ts.total_io_u[io_u->ddir]++;
338 } else if (ret == FIO_Q_QUEUED) {
343 if (ddir_rw(io_u->ddir))
344 td->ts.total_io_u[io_u->ddir]++;
346 if (td->io_u_queued >= td->o.iodepth_batch) {
347 r = td_io_commit(td);
353 if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
354 if (fio_fill_issue_time(td))
355 fio_gettime(&io_u->issue_time, NULL);
358 * only used for iolog
360 if (td->o.read_iolog_file)
361 memcpy(&td->last_issue, &io_u->issue_time,
362 sizeof(struct timeval));
368 int td_io_init(struct thread_data *td)
372 if (td->io_ops->init) {
373 ret = td->io_ops->init(td);
374 if (ret && td->o.iodepth > 1) {
375 log_err("fio: io engine init failed. Perhaps try"
376 " reducing io depth?\n");
382 if (!ret && (td->io_ops->flags & FIO_NOIO))
383 td->flags |= TD_F_NOIO;
388 int td_io_commit(struct thread_data *td)
392 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
394 if (!td->cur_depth || !td->io_u_queued)
397 io_u_mark_depth(td, td->io_u_queued);
399 if (td->io_ops->commit) {
400 ret = td->io_ops->commit(td);
402 td_verror(td, -ret, "io commit");
406 * Reflect that events were submitted as async IO requests.
408 td->io_u_in_flight += td->io_u_queued;
414 int td_io_open_file(struct thread_data *td, struct fio_file *f)
416 assert(!fio_file_open(f));
419 if (td->io_ops->open_file(td, f)) {
420 if (td->error == EINVAL && td->o.odirect)
421 log_err("fio: destination does not support O_DIRECT\n");
422 if (td->error == EMFILE) {
423 log_err("fio: try reducing/setting openfiles (failed"
424 " at %u of %u)\n", td->nr_open_files,
429 assert(!fio_file_open(f));
433 fio_file_reset(td, f);
434 fio_file_set_open(f);
435 fio_file_clear_closing(f);
436 disk_util_inc(f->du);
441 if (f->filetype == FIO_TYPE_PIPE) {
443 log_err("fio: can't seek on pipes (no random io)\n");
448 if (td->io_ops->flags & FIO_DISKLESSIO)
451 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
454 if (td->o.fadvise_hint &&
455 (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
459 flags = POSIX_FADV_RANDOM;
461 flags = POSIX_FADV_SEQUENTIAL;
463 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
464 td_verror(td, errno, "fadvise");
468 #ifdef FIO_HAVE_STREAMID
469 if (td->o.fadvise_stream &&
470 (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
471 off_t stream = td->o.fadvise_stream;
473 if (posix_fadvise(f->fd, stream, f->io_size, POSIX_FADV_STREAMID) < 0) {
474 td_verror(td, errno, "fadvise streamid");
480 #ifdef FIO_OS_DIRECTIO
482 * Some OS's have a distinct call to mark the file non-buffered,
483 * instead of using O_DIRECT (Solaris)
486 int ret = fio_set_odirect(f->fd);
489 td_verror(td, ret, "fio_set_odirect");
490 log_err("fio: the file system does not seem to support direct IO\n");
497 log_file(td, f, FIO_LOG_OPEN_FILE);
500 disk_util_dec(f->du);
501 if (td->io_ops->close_file)
502 td->io_ops->close_file(td, f);
506 int td_io_close_file(struct thread_data *td, struct fio_file *f)
508 if (!fio_file_closing(f))
509 log_file(td, f, FIO_LOG_CLOSE_FILE);
512 * mark as closing, do real close when last io on it has completed
514 fio_file_set_closing(f);
516 disk_util_dec(f->du);
518 if (td->o.file_lock_mode != FILE_LOCK_NONE)
519 unlock_file_all(td, f);
521 return put_file(td, f);
524 int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
526 if (td->io_ops->unlink_file)
527 return td->io_ops->unlink_file(td, f);
529 return unlink(f->file_name);
532 int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
534 if (!td->io_ops->get_file_size)
537 return td->io_ops->get_file_size(td, f);
540 static int do_sync_file_range(const struct thread_data *td,
543 off64_t offset, nbytes;
545 offset = f->first_write;
546 nbytes = f->last_write - f->first_write;
551 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
554 int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
558 if (io_u->ddir == DDIR_SYNC) {
559 ret = fsync(io_u->file->fd);
560 } else if (io_u->ddir == DDIR_DATASYNC) {
561 #ifdef CONFIG_FDATASYNC
562 ret = fdatasync(io_u->file->fd);
564 ret = io_u->xfer_buflen;
565 io_u->error = EINVAL;
567 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
568 ret = do_sync_file_range(td, io_u->file);
570 ret = io_u->xfer_buflen;
571 io_u->error = EINVAL;
580 int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
582 #ifndef FIO_HAVE_TRIM
583 io_u->error = EINVAL;
586 struct fio_file *f = io_u->file;
589 ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
591 return io_u->xfer_buflen;
598 int fio_show_ioengine_help(const char *engine)
600 struct flist_head *entry;
601 struct thread_data td;
605 if (!engine || !*engine) {
606 log_info("Available IO engines:\n");
607 flist_for_each(entry, &engine_list) {
608 td.io_ops = flist_entry(entry, struct ioengine_ops,
610 log_info("\t%s\n", td.io_ops->name);
614 sep = strchr(engine, ',');
620 memset(&td, 0, sizeof(td));
622 td.io_ops = load_ioengine(&td, engine);
624 log_info("IO engine %s not found\n", engine);
628 if (td.io_ops->options)
629 ret = show_cmd_help(td.io_ops->options, sep);
631 log_info("IO engine %s has no options\n", td.io_ops->name);