2 * The io parts of the fio tool, includes workers for sync and mmap'ed
3 * io, as well as both posix and linux libaio support.
5 * sync io is implemented on top of aio.
7 * This is not really specific to fio, if the get_io_u/put_io_u and
8 * structures was pulled into this as well it would be a perfectly
9 * generic io engine that could be used for other projects.
23 static FLIST_HEAD(engine_list);
25 static int check_engine_ops(struct ioengine_ops *ops)
27 if (ops->version != FIO_IOOPS_VERSION) {
28 log_err("bad ioops version %d (want %d)\n", ops->version,
34 log_err("%s: no queue handler\n", ops->name);
39 * sync engines only need a ->queue()
41 if (ops->flags & FIO_SYNCIO)
45 log_err("%s: no event handler\n", ops->name);
48 if (!ops->getevents) {
49 log_err("%s: no getevents handler\n", ops->name);
53 log_err("%s: no queue handler\n", ops->name);
60 void unregister_ioengine(struct ioengine_ops *ops)
62 dprint(FD_IO, "ioengine %s unregistered\n", ops->name);
63 flist_del(&ops->list);
64 INIT_FLIST_HEAD(&ops->list);
67 void register_ioengine(struct ioengine_ops *ops)
69 dprint(FD_IO, "ioengine %s registered\n", ops->name);
70 INIT_FLIST_HEAD(&ops->list);
71 flist_add_tail(&ops->list, &engine_list);
74 static struct ioengine_ops *find_ioengine(const char *name)
76 struct ioengine_ops *ops;
77 struct flist_head *entry;
79 flist_for_each(entry, &engine_list) {
80 ops = flist_entry(entry, struct ioengine_ops, list);
81 if (!strcmp(name, ops->name))
88 static struct ioengine_ops *dlopen_ioengine(struct thread_data *td,
89 const char *engine_lib)
91 struct ioengine_ops *ops;
94 dprint(FD_IO, "dload engine %s\n", engine_lib);
97 dlhandle = dlopen(engine_lib, RTLD_LAZY);
99 td_vmsg(td, -1, dlerror(), "dlopen");
104 * Unlike the included modules, external engines should have a
105 * non-static ioengine structure that we can reference.
107 ops = dlsym(dlhandle, engine_lib);
109 ops = dlsym(dlhandle, "ioengine");
112 * For some external engines (like C++ ones) it is not that trivial
113 * to provide a non-static ionengine structure that we can reference.
114 * Instead we call a method which allocates the required ioengine
118 get_ioengine_t get_ioengine = dlsym(dlhandle, "get_ioengine");
125 td_vmsg(td, -1, dlerror(), "dlsym");
130 ops->dlhandle = dlhandle;
134 struct ioengine_ops *load_ioengine(struct thread_data *td, const char *name)
136 struct ioengine_ops *ops, *ret;
139 dprint(FD_IO, "load ioengine %s\n", name);
141 strncpy(engine, name, sizeof(engine) - 1);
144 * linux libaio has alias names, so convert to what we want
146 if (!strncmp(engine, "linuxaio", 8) || !strncmp(engine, "aio", 3))
147 strcpy(engine, "libaio");
149 ops = find_ioengine(engine);
151 ops = dlopen_ioengine(td, name);
154 log_err("fio: engine %s not loadable\n", name);
159 * Check that the required methods are there.
161 if (check_engine_ops(ops))
164 ret = malloc(sizeof(*ret));
165 memcpy(ret, ops, sizeof(*ret));
172 * For cleaning up an ioengine which never made it to init().
174 void free_ioengine(struct thread_data *td)
176 dprint(FD_IO, "free ioengine %s\n", td->io_ops->name);
178 if (td->eo && td->io_ops->options) {
179 options_free(td->io_ops->options, td->eo);
184 if (td->io_ops->dlhandle)
185 dlclose(td->io_ops->dlhandle);
191 void close_ioengine(struct thread_data *td)
193 dprint(FD_IO, "close ioengine %s\n", td->io_ops->name);
195 if (td->io_ops->cleanup) {
196 td->io_ops->cleanup(td);
197 td->io_ops->data = NULL;
203 int td_io_prep(struct thread_data *td, struct io_u *io_u)
205 dprint_io_u(io_u, "prep");
206 fio_ro_check(td, io_u);
208 lock_file(td, io_u->file, io_u->ddir);
210 if (td->io_ops->prep) {
211 int ret = td->io_ops->prep(td, io_u);
213 dprint(FD_IO, "->prep(%p)=%d\n", io_u, ret);
215 unlock_file(td, io_u->file);
222 int td_io_getevents(struct thread_data *td, unsigned int min, unsigned int max,
223 const struct timespec *t)
228 * For ioengine=rdma one side operation RDMA_WRITE or RDMA_READ,
229 * server side gets a message from the client
230 * side that the task is finished, and
231 * td->done is set to 1 after td_io_commit(). In this case,
232 * there is no need to reap complete event in server side.
237 if (min > 0 && td->io_ops->commit) {
238 r = td->io_ops->commit(td);
242 if (max > td->cur_depth)
248 if (max && td->io_ops->getevents)
249 r = td->io_ops->getevents(td, min, max, t);
253 * Reflect that our submitted requests were retrieved with
254 * whatever OS async calls are in the underlying engine.
256 td->io_u_in_flight -= r;
257 io_u_mark_complete(td, r);
259 td_verror(td, r, "get_events");
261 dprint(FD_IO, "getevents: %d\n", r);
265 int td_io_queue(struct thread_data *td, struct io_u *io_u)
269 dprint_io_u(io_u, "queue");
270 fio_ro_check(td, io_u);
272 assert((io_u->flags & IO_U_F_FLIGHT) == 0);
273 io_u->flags |= IO_U_F_FLIGHT;
275 assert(fio_file_open(io_u->file));
278 * If using a write iolog, store this entry.
285 if (td->io_ops->flags & FIO_SYNCIO) {
286 if (fio_fill_issue_time(td))
287 fio_gettime(&io_u->issue_time, NULL);
290 * only used for iolog
292 if (td->o.read_iolog_file)
293 memcpy(&td->last_issue, &io_u->issue_time,
294 sizeof(struct timeval));
297 if (ddir_rw(acct_ddir(io_u))) {
298 td->io_issues[acct_ddir(io_u)]++;
299 td->io_issue_bytes[acct_ddir(io_u)] += io_u->xfer_buflen;
302 ret = td->io_ops->queue(td, io_u);
304 unlock_file(td, io_u->file);
306 if (ret == FIO_Q_BUSY && ddir_rw(acct_ddir(io_u))) {
307 td->io_issues[acct_ddir(io_u)]--;
308 td->io_issue_bytes[acct_ddir(io_u)] -= io_u->xfer_buflen;
312 * If an error was seen and the io engine didn't propagate it
313 * back to 'td', do so.
315 if (io_u->error && !td->error)
316 td_verror(td, io_u->error, "td_io_queue");
319 * Add warning for O_DIRECT so that users have an easier time
320 * spotting potentially bad alignment. If this triggers for the first
321 * IO, then it's likely an alignment problem or because the host fs
322 * does not support O_DIRECT
324 if (io_u->error == EINVAL && td->io_issues[io_u->ddir & 1] == 1 &&
327 log_info("fio: first direct IO errored. File system may not "
328 "support direct IO, or iomem_align= is bad.\n");
331 if (!td->io_ops->commit || io_u->ddir == DDIR_TRIM) {
332 io_u_mark_submit(td, 1);
333 io_u_mark_complete(td, 1);
336 if (ret == FIO_Q_COMPLETED) {
337 if (ddir_rw(io_u->ddir)) {
338 io_u_mark_depth(td, 1);
339 td->ts.total_io_u[io_u->ddir]++;
341 } else if (ret == FIO_Q_QUEUED) {
344 if (ddir_rw(io_u->ddir)) {
346 td->ts.total_io_u[io_u->ddir]++;
349 if (td->io_u_queued >= td->o.iodepth_batch) {
350 r = td_io_commit(td);
356 if ((td->io_ops->flags & FIO_SYNCIO) == 0) {
357 if (fio_fill_issue_time(td))
358 fio_gettime(&io_u->issue_time, NULL);
361 * only used for iolog
363 if (td->o.read_iolog_file)
364 memcpy(&td->last_issue, &io_u->issue_time,
365 sizeof(struct timeval));
371 int td_io_init(struct thread_data *td)
375 if (td->io_ops->init) {
376 ret = td->io_ops->init(td);
377 if (ret && td->o.iodepth > 1) {
378 log_err("fio: io engine init failed. Perhaps try"
379 " reducing io depth?\n");
385 if (!ret && (td->io_ops->flags & FIO_NOIO))
386 td->flags |= TD_F_NOIO;
391 int td_io_commit(struct thread_data *td)
395 dprint(FD_IO, "calling ->commit(), depth %d\n", td->cur_depth);
397 if (!td->cur_depth || !td->io_u_queued)
400 io_u_mark_depth(td, td->io_u_queued);
402 if (td->io_ops->commit) {
403 ret = td->io_ops->commit(td);
405 td_verror(td, -ret, "io commit");
409 * Reflect that events were submitted as async IO requests.
411 td->io_u_in_flight += td->io_u_queued;
417 int td_io_open_file(struct thread_data *td, struct fio_file *f)
419 assert(!fio_file_open(f));
422 if (td->io_ops->open_file(td, f)) {
423 if (td->error == EINVAL && td->o.odirect)
424 log_err("fio: destination does not support O_DIRECT\n");
425 if (td->error == EMFILE) {
426 log_err("fio: try reducing/setting openfiles (failed"
427 " at %u of %u)\n", td->nr_open_files,
432 assert(!fio_file_open(f));
436 fio_file_reset(td, f);
437 fio_file_set_open(f);
438 fio_file_clear_closing(f);
439 disk_util_inc(f->du);
444 if (f->filetype == FIO_TYPE_PIPE) {
446 log_err("fio: can't seek on pipes (no random io)\n");
451 if (td->io_ops->flags & FIO_DISKLESSIO)
454 if (td->o.invalidate_cache && file_invalidate_cache(td, f))
457 if (td->o.fadvise_hint &&
458 (f->filetype == FIO_TYPE_BD || f->filetype == FIO_TYPE_FILE)) {
462 flags = POSIX_FADV_RANDOM;
464 flags = POSIX_FADV_SEQUENTIAL;
466 if (posix_fadvise(f->fd, f->file_offset, f->io_size, flags) < 0) {
467 td_verror(td, errno, "fadvise");
472 #ifdef FIO_OS_DIRECTIO
474 * Some OS's have a distinct call to mark the file non-buffered,
475 * instead of using O_DIRECT (Solaris)
478 int ret = fio_set_odirect(f->fd);
481 td_verror(td, ret, "fio_set_odirect");
482 log_err("fio: the file system does not seem to support direct IO\n");
489 log_file(td, f, FIO_LOG_OPEN_FILE);
492 disk_util_dec(f->du);
493 if (td->io_ops->close_file)
494 td->io_ops->close_file(td, f);
498 int td_io_close_file(struct thread_data *td, struct fio_file *f)
500 if (!fio_file_closing(f))
501 log_file(td, f, FIO_LOG_CLOSE_FILE);
504 * mark as closing, do real close when last io on it has completed
506 fio_file_set_closing(f);
508 disk_util_dec(f->du);
510 if (td->o.file_lock_mode != FILE_LOCK_NONE)
511 unlock_file_all(td, f);
513 return put_file(td, f);
516 int td_io_unlink_file(struct thread_data *td, struct fio_file *f)
518 if (td->io_ops->unlink_file)
519 return td->io_ops->unlink_file(td, f);
521 return unlink(f->file_name);
524 int td_io_get_file_size(struct thread_data *td, struct fio_file *f)
526 if (!td->io_ops->get_file_size)
529 return td->io_ops->get_file_size(td, f);
532 static int do_sync_file_range(const struct thread_data *td,
535 off64_t offset, nbytes;
537 offset = f->first_write;
538 nbytes = f->last_write - f->first_write;
543 return sync_file_range(f->fd, offset, nbytes, td->o.sync_file_range);
546 int do_io_u_sync(const struct thread_data *td, struct io_u *io_u)
550 if (io_u->ddir == DDIR_SYNC) {
551 ret = fsync(io_u->file->fd);
552 } else if (io_u->ddir == DDIR_DATASYNC) {
553 #ifdef CONFIG_FDATASYNC
554 ret = fdatasync(io_u->file->fd);
556 ret = io_u->xfer_buflen;
557 io_u->error = EINVAL;
559 } else if (io_u->ddir == DDIR_SYNC_FILE_RANGE)
560 ret = do_sync_file_range(td, io_u->file);
562 ret = io_u->xfer_buflen;
563 io_u->error = EINVAL;
572 int do_io_u_trim(const struct thread_data *td, struct io_u *io_u)
574 #ifndef FIO_HAVE_TRIM
575 io_u->error = EINVAL;
578 struct fio_file *f = io_u->file;
581 ret = os_trim(f->fd, io_u->offset, io_u->xfer_buflen);
583 return io_u->xfer_buflen;
590 int fio_show_ioengine_help(const char *engine)
592 struct flist_head *entry;
593 struct thread_data td;
597 if (!engine || !*engine) {
598 log_info("Available IO engines:\n");
599 flist_for_each(entry, &engine_list) {
600 td.io_ops = flist_entry(entry, struct ioengine_ops,
602 log_info("\t%s\n", td.io_ops->name);
606 sep = strchr(engine, ',');
612 memset(&td, 0, sizeof(td));
614 td.io_ops = load_ioengine(&td, engine);
616 log_info("IO engine %s not found\n", engine);
620 if (td.io_ops->options)
621 ret = show_cmd_help(td.io_ops->options, sep);
623 log_info("IO engine %s has no options\n", td.io_ops->name);