2 * FIO engines for DDN's Infinite Memory Engine.
3 * This file defines 3 engines: ime_psync, ime_psyncv, and ime_aio
5 * Copyright (C) 2018 DataDirect Networks. All rights reserved.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License,
9 * version 2 as published by the Free Software Foundation..
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 * Some details about the new engines are given below:
22 * Most basic engine that issues calls to ime_native whenever an IO is queued.
25 * This engine tries to queue the IOs (by creating iovecs) if asked by FIO (via
26 * iodepth_batch). It refuses to queue when the iovecs can't be appended, and
27 * waits for FIO to issue a commit. After a call to commit and get_events, new
31 * This engine tries to queue the IOs (by creating iovecs) if asked by FIO (via
32 * iodepth_batch). When the iovecs can't be appended to the current request, a
33 * new request for IME is created. These requests will be issued to IME when
34 * commit is called. Contrary to ime_psyncv, there can be several requests at
35 * once. We don't need to wait for a request to terminate before creating a new
42 #include <linux/limits.h>
43 #include <ime_native.h>
48 /**************************************************************
49 * Types and constants definitions
51 **************************************************************/
53 /* define constants for async IOs */
54 #define FIO_IME_IN_PROGRESS -1
55 #define FIO_IME_REQ_ERROR -2
57 /* This flag is used when some jobs were created using threads. In that
58 case, IME can't be finalized in the engine-specific cleanup function,
59 because other threads might still use IME. Instead, IME is finalized
60 in the destructor (see fio_ime_unregister), only when the flag
61 fio_ime_is_initialized is true (which means at least one thread has
63 static bool fio_ime_is_initialized = false;
66 int fd; /* File descriptor */
67 enum fio_ddir ddir; /* Type of IO (read or write) */
68 off_t offset; /* File offset */
71 struct ime_aiocb iocb; /* IME aio request */
72 ssize_t status; /* Status of the IME request */
73 enum fio_ddir ddir; /* Type of IO (read or write) */
74 pthread_cond_t cond_endio; /* Condition var to notify FIO */
75 pthread_mutex_t status_mutex; /* Mutex for cond_endio */
78 /* This structure will be used for 2 engines: ime_psyncv and ime_aio */
81 struct imeaio_req *aioreqs; /* array of aio requests */
82 struct imesio_req *sioreq; /* pointer to the only syncio request */
84 struct iovec *iovecs; /* array of queued iovecs */
85 struct io_u **io_us; /* array of queued io_u pointers */
86 struct io_u **event_io_us; /* array of the events retrieved after get_events*/
87 unsigned int queued; /* iovecs/io_us in the queue */
88 unsigned int events; /* number of committed iovecs/io_us */
90 /* variables used to implement a "ring" queue */
91 unsigned int depth; /* max entries in the queue */
92 unsigned int head; /* index used to append */
93 unsigned int tail; /* index used to pop */
94 unsigned int cur_commit; /* index of the first uncommitted req */
96 /* offset used by the last iovec (used to check if the iovecs can be appended)*/
97 unsigned long long last_offset;
99 /* The variables below are used for aio only */
100 struct imeaio_req *last_req; /* last request awaiting committing */
104 /**************************************************************
105 * Private functions for queueing/unqueueing
107 **************************************************************/
109 static void fio_ime_queue_incr (struct ime_data *ime_d)
111 ime_d->head = (ime_d->head + 1) % ime_d->depth;
115 static void fio_ime_queue_red (struct ime_data *ime_d)
117 ime_d->tail = (ime_d->tail + 1) % ime_d->depth;
122 static void fio_ime_queue_commit (struct ime_data *ime_d, int iovcnt)
124 ime_d->cur_commit = (ime_d->cur_commit + iovcnt) % ime_d->depth;
125 ime_d->events += iovcnt;
128 static void fio_ime_queue_reset (struct ime_data *ime_d)
132 ime_d->cur_commit = 0;
137 /**************************************************************
138 * General IME functions
139 * (needed for both sync and async IOs)
140 **************************************************************/
142 static char *fio_set_ime_filename(char* filename)
144 static __thread char ime_filename[PATH_MAX];
147 ret = snprintf(ime_filename, PATH_MAX, "%s%s", DEFAULT_IME_FILE_PREFIX, filename);
154 static int fio_ime_get_file_size(struct thread_data *td, struct fio_file *f)
160 dprint(FD_FILE, "get file size %s\n", f->file_name);
162 ime_filename = fio_set_ime_filename(f->file_name);
163 if (ime_filename == NULL)
165 ret = ime_native_stat(ime_filename, &buf);
167 td_verror(td, errno, "fstat");
171 f->real_file_size = buf.st_size;
175 /* This functions mimics the generic_file_open function, but issues
176 IME native calls instead of POSIX calls. */
177 static int fio_ime_open_file(struct thread_data *td, struct fio_file *f)
184 dprint(FD_FILE, "fd open %s\n", f->file_name);
187 td_verror(td, EINVAL, "IME does not support TRIM operation");
193 flags |= td->o.sync_io;
194 if (td->o.create_on_open && td->o.allow_create)
201 if (td->o.allow_create)
203 } else if (td_read(td)) {
206 /* We should never go here. */
207 td_verror(td, EINVAL, "Unsopported open mode");
211 ime_filename = fio_set_ime_filename(f->file_name);
212 if (ime_filename == NULL)
214 f->fd = ime_native_open(ime_filename, flags, 0600);
216 char buf[FIO_VERROR_SIZE];
219 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
220 td_verror(td, __e, buf);
224 /* Now we need to make sure the real file size is sufficient for FIO
225 to do its things. This is normally done before the file open function
226 is called, but because FIO would use POSIX calls, we need to do it
228 ret = fio_ime_get_file_size(td, f);
230 ime_native_close(f->fd);
231 td_verror(td, errno, "ime_get_file_size");
235 desired_fs = f->io_size + f->file_offset;
237 dprint(FD_FILE, "Laying out file %s%s\n",
238 DEFAULT_IME_FILE_PREFIX, f->file_name);
239 if (!td->o.create_on_open &&
240 f->real_file_size < desired_fs &&
241 ime_native_ftruncate(f->fd, desired_fs) < 0) {
242 ime_native_close(f->fd);
243 td_verror(td, errno, "ime_native_ftruncate");
246 if (f->real_file_size < desired_fs)
247 f->real_file_size = desired_fs;
248 } else if (td_read(td) && f->real_file_size < desired_fs) {
249 ime_native_close(f->fd);
250 log_err("error: can't read %lu bytes from file with "
251 "%lu bytes\n", desired_fs, f->real_file_size);
258 static int fio_ime_close_file(struct thread_data fio_unused *td, struct fio_file *f)
262 dprint(FD_FILE, "fd close %s\n", f->file_name);
264 if (ime_native_close(f->fd) < 0)
271 static int fio_ime_unlink_file(struct thread_data *td, struct fio_file *f)
273 char *ime_filename = fio_set_ime_filename(f->file_name);
276 if (ime_filename == NULL)
279 ret = unlink(ime_filename);
280 return ret < 0 ? errno : 0;
283 static struct io_u *fio_ime_event(struct thread_data *td, int event)
285 struct ime_data *ime_d = td->io_ops_data;
287 return ime_d->event_io_us[event];
290 /* Setup file used to replace get_file_sizes when settin up the file.
291 Instead we will set real_file_sie to 0 for each file. This way we
292 can avoid calling ime_native_init before the forks are created. */
293 static int fio_ime_setup(struct thread_data *td)
298 for_each_file(td, f, i) {
299 dprint(FD_FILE, "setup: set file size to 0 for %p/%d/%s\n",
301 f->real_file_size = 0;
307 static int fio_ime_engine_init(struct thread_data *td)
312 dprint(FD_IO, "ime engine init\n");
313 if (fio_ime_is_initialized && !td->o.use_thread) {
314 log_err("Warning: something might go wrong. Not all threads/forks were"
315 " created before the FIO jobs were initialized.\n");
319 fio_ime_is_initialized = true;
321 /* We have to temporarily set real_file_size so that
322 FIO can initialize properly. It will be corrected
324 for_each_file(td, f, i)
325 f->real_file_size = f->io_size + f->file_offset;
330 static void fio_ime_engine_finalize(struct thread_data *td)
332 /* Only finalize IME when using forks */
333 if (!td->o.use_thread) {
334 if (ime_native_finalize() < 0)
335 log_err("error in ime_native_finalize\n");
336 fio_ime_is_initialized = false;
341 /**************************************************************
342 * Private functions for blocking IOs
344 **************************************************************/
346 /* Notice: this function comes from the sync engine */
347 /* It is used by the commit function to return a proper code and fill
348 some attributes in the io_u used for the IO. */
349 static int fio_ime_psync_end(struct thread_data *td, struct io_u *io_u, ssize_t ret)
351 if (ret != (ssize_t) io_u->xfer_buflen) {
353 io_u->resid = io_u->xfer_buflen - ret;
355 return FIO_Q_COMPLETED;
361 io_u_log_error(td, io_u);
362 td_verror(td, io_u->error, "xfer");
365 return FIO_Q_COMPLETED;
368 static enum fio_q_status fio_ime_psync_queue(struct thread_data *td,
371 struct fio_file *f = io_u->file;
374 fio_ro_check(td, io_u);
376 if (io_u->ddir == DDIR_READ)
377 ret = ime_native_pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
378 else if (io_u->ddir == DDIR_WRITE)
379 ret = ime_native_pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
380 else if (io_u->ddir == DDIR_SYNC)
381 ret = ime_native_fsync(f->fd);
383 ret = io_u->xfer_buflen;
384 io_u->error = EINVAL;
387 return fio_ime_psync_end(td, io_u, ret);
391 /**************************************************************
392 * Private functions for blocking IOs
394 **************************************************************/
396 static bool fio_ime_psyncv_can_queue(struct ime_data *ime_d, struct io_u *io_u)
398 /* We can only queue if:
399 - There are no queued iovecs
400 - Or if there is at least one:
401 - There must be no event waiting for retrieval
402 - The offsets must be contiguous
403 - The ddir and fd must be the same */
404 return (ime_d->queued == 0 || (
405 ime_d->events == 0 &&
406 ime_d->last_offset == io_u->offset &&
407 ime_d->sioreq->ddir == io_u->ddir &&
408 ime_d->sioreq->fd == io_u->file->fd));
411 /* Before using this function, we should have already
412 ensured that the queue is not full */
413 static void fio_ime_psyncv_enqueue(struct ime_data *ime_d, struct io_u *io_u)
415 struct imesio_req *ioreq = ime_d->sioreq;
416 struct iovec *iov = &ime_d->iovecs[ime_d->head];
418 iov->iov_base = io_u->xfer_buf;
419 iov->iov_len = io_u->xfer_buflen;
421 if (ime_d->queued == 0) {
422 ioreq->offset = io_u->offset;
423 ioreq->ddir = io_u->ddir;
424 ioreq->fd = io_u->file->fd;
427 ime_d->io_us[ime_d->head] = io_u;
428 ime_d->last_offset = io_u->offset + io_u->xfer_buflen;
429 fio_ime_queue_incr(ime_d);
432 /* Tries to queue an IO. It will fail if the IO can't be appended to the
433 current request or if the current request has been committed but not
434 yet retrieved by get_events. */
435 static enum fio_q_status fio_ime_psyncv_queue(struct thread_data *td,
438 struct ime_data *ime_d = td->io_ops_data;
440 fio_ro_check(td, io_u);
442 if (ime_d->queued == ime_d->depth)
445 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
446 if (!fio_ime_psyncv_can_queue(ime_d, io_u))
449 dprint(FD_IO, "queue: ddir=%d at %u commit=%u queued=%u events=%u\n",
450 io_u->ddir, ime_d->head, ime_d->cur_commit,
451 ime_d->queued, ime_d->events);
452 fio_ime_psyncv_enqueue(ime_d, io_u);
454 } else if (io_u->ddir == DDIR_SYNC) {
455 if (ime_native_fsync(io_u->file->fd) < 0) {
457 td_verror(td, io_u->error, "fsync");
459 return FIO_Q_COMPLETED;
461 io_u->error = EINVAL;
462 td_verror(td, io_u->error, "wrong ddir");
463 return FIO_Q_COMPLETED;
467 /* Notice: this function comes from the sync engine */
468 /* It is used by the commit function to return a proper code and fill
469 some attributes in the io_us appended to the current request. */
470 static int fio_ime_psyncv_end(struct thread_data *td, ssize_t bytes)
472 struct ime_data *ime_d = td->io_ops_data;
477 for (i = 0; i < ime_d->queued; i++) {
478 io_u = ime_d->io_us[i];
483 unsigned int this_io;
486 if (this_io > io_u->xfer_buflen)
487 this_io = io_u->xfer_buflen;
489 io_u->resid = io_u->xfer_buflen - this_io;
496 td_verror(td, err, "xfer psyncv");
503 /* Commits the current request by calling ime_native (with one or several
504 iovecs). After this commit, the corresponding events (one per iovec)
505 can be retrieved by get_events. */
506 static int fio_ime_psyncv_commit(struct thread_data *td)
508 struct ime_data *ime_d = td->io_ops_data;
509 struct imesio_req *ioreq;
512 /* Exit if there are no (new) events to commit
513 or if the previous committed event haven't been retrieved */
514 if (!ime_d->queued || ime_d->events)
517 ioreq = ime_d->sioreq;
518 ime_d->events = ime_d->queued;
519 if (ioreq->ddir == DDIR_READ)
520 ret = ime_native_preadv(ioreq->fd, ime_d->iovecs, ime_d->queued, ioreq->offset);
522 ret = ime_native_pwritev(ioreq->fd, ime_d->iovecs, ime_d->queued, ioreq->offset);
524 dprint(FD_IO, "committed %d iovecs\n", ime_d->queued);
526 return fio_ime_psyncv_end(td, ret);
529 static int fio_ime_psyncv_getevents(struct thread_data *td, unsigned int min,
530 unsigned int max, const struct timespec *t)
532 struct ime_data *ime_d = td->io_ops_data;
538 for (count = 0; count < ime_d->events; count++) {
539 io_u = ime_d->io_us[count];
540 ime_d->event_io_us[events] = io_u;
543 fio_ime_queue_reset(ime_d);
546 dprint(FD_IO, "getevents(%u,%u) ret=%d queued=%u events=%u\n",
547 min, max, events, ime_d->queued, ime_d->events);
551 static int fio_ime_psyncv_init(struct thread_data *td)
553 struct ime_data *ime_d;
555 if (fio_ime_engine_init(td) < 0)
558 ime_d = calloc(1, sizeof(*ime_d));
560 ime_d->sioreq = malloc(sizeof(struct imesio_req));
561 ime_d->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
562 ime_d->io_us = malloc(2 * td->o.iodepth * sizeof(struct io_u *));
563 ime_d->event_io_us = ime_d->io_us + td->o.iodepth;
565 ime_d->depth = td->o.iodepth;
567 td->io_ops_data = ime_d;
571 static void fio_ime_psyncv_clean(struct thread_data *td)
573 struct ime_data *ime_d = td->io_ops_data;
580 td->io_ops_data = NULL;
583 fio_ime_engine_finalize(td);
587 /**************************************************************
588 * Private functions for non-blocking IOs
590 **************************************************************/
592 void fio_ime_aio_complete_cb (struct ime_aiocb *aiocb, int err,
595 struct imeaio_req *ioreq = (struct imeaio_req *) aiocb->user_context;
597 pthread_mutex_lock(&ioreq->status_mutex);
598 ioreq->status = err == 0 ? bytes : FIO_IME_REQ_ERROR;
599 pthread_mutex_unlock(&ioreq->status_mutex);
601 pthread_cond_signal(&ioreq->cond_endio);
604 static bool fio_ime_aio_can_queue (struct ime_data *ime_d, struct io_u *io_u)
606 /* So far we can queue in any case. */
609 static bool fio_ime_aio_can_append (struct ime_data *ime_d, struct io_u *io_u)
611 /* We can only append if:
612 - The iovecs will be contiguous in the array
613 - There is already a queued iovec
614 - The offsets are contiguous
615 - The ddir and fs are the same */
616 return (ime_d->head != 0 &&
617 ime_d->queued - ime_d->events > 0 &&
618 ime_d->last_offset == io_u->offset &&
619 ime_d->last_req->ddir == io_u->ddir &&
620 ime_d->last_req->iocb.fd == io_u->file->fd);
623 /* Before using this function, we should have already
624 ensured that the queue is not full */
625 static void fio_ime_aio_enqueue(struct ime_data *ime_d, struct io_u *io_u)
627 struct imeaio_req *ioreq = &ime_d->aioreqs[ime_d->head];
628 struct ime_aiocb *iocb = &ioreq->iocb;
629 struct iovec *iov = &ime_d->iovecs[ime_d->head];
631 iov->iov_base = io_u->xfer_buf;
632 iov->iov_len = io_u->xfer_buflen;
634 if (fio_ime_aio_can_append(ime_d, io_u))
635 ime_d->last_req->iocb.iovcnt++;
637 ioreq->status = FIO_IME_IN_PROGRESS;
638 ioreq->ddir = io_u->ddir;
639 ime_d->last_req = ioreq;
641 iocb->complete_cb = &fio_ime_aio_complete_cb;
642 iocb->fd = io_u->file->fd;
643 iocb->file_offset = io_u->offset;
647 iocb->user_context = (intptr_t) ioreq;
650 ime_d->io_us[ime_d->head] = io_u;
651 ime_d->last_offset = io_u->offset + io_u->xfer_buflen;
652 fio_ime_queue_incr(ime_d);
655 /* Tries to queue an IO. It will create a new request if the IO can't be
656 appended to the current request. It will fail if the queue can't contain
657 any more io_u/iovec. In this case, commit and then get_events need to be
659 static enum fio_q_status fio_ime_aio_queue(struct thread_data *td,
662 struct ime_data *ime_d = td->io_ops_data;
664 fio_ro_check(td, io_u);
666 dprint(FD_IO, "queue: ddir=%d at %u commit=%u queued=%u events=%u\n",
667 io_u->ddir, ime_d->head, ime_d->cur_commit,
668 ime_d->queued, ime_d->events);
670 if (ime_d->queued == ime_d->depth)
673 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
674 if (!fio_ime_aio_can_queue(ime_d, io_u))
677 fio_ime_aio_enqueue(ime_d, io_u);
679 } else if (io_u->ddir == DDIR_SYNC) {
680 if (ime_native_fsync(io_u->file->fd) < 0) {
682 td_verror(td, io_u->error, "fsync");
684 return FIO_Q_COMPLETED;
686 io_u->error = EINVAL;
687 td_verror(td, io_u->error, "wrong ddir");
688 return FIO_Q_COMPLETED;
692 static int fio_ime_aio_commit(struct thread_data *td)
694 struct ime_data *ime_d = td->io_ops_data;
695 struct imeaio_req *ioreq;
698 /* Loop while there are events to commit */
699 while (ime_d->queued - ime_d->events) {
700 ioreq = &ime_d->aioreqs[ime_d->cur_commit];
701 if (ioreq->ddir == DDIR_READ)
702 ret = ime_native_aio_read(&ioreq->iocb);
704 ret = ime_native_aio_write(&ioreq->iocb);
706 fio_ime_queue_commit(ime_d, ioreq->iocb.iovcnt);
708 /* fio needs a negative error code */
710 ioreq->status = FIO_IME_REQ_ERROR;
714 io_u_mark_submit(td, ioreq->iocb.iovcnt);
715 dprint(FD_IO, "committed %d iovecs commit=%u queued=%u events=%u\n",
716 ioreq->iocb.iovcnt, ime_d->cur_commit,
717 ime_d->queued, ime_d->events);
723 static int fio_ime_aio_getevents(struct thread_data *td, unsigned int min,
724 unsigned int max, const struct timespec *t)
726 struct ime_data *ime_d = td->io_ops_data;
727 struct imeaio_req *ioreq;
733 while (ime_d->events) {
734 ioreq = &ime_d->aioreqs[ime_d->tail];
736 /* Break if we already got events, and if we will
737 exceed max if we append the next events */
738 if (events && events + ioreq->iocb.iovcnt > max)
741 if (ioreq->status != FIO_IME_IN_PROGRESS) {
743 bytes = ioreq->status;
744 for (count = 0; count < ioreq->iocb.iovcnt; count++) {
745 io_u = ime_d->io_us[ime_d->tail];
746 ime_d->event_io_us[events] = io_u;
748 fio_ime_queue_red(ime_d);
750 if (ioreq->status == FIO_IME_REQ_ERROR)
753 io_u->resid = bytes > io_u->xfer_buflen ?
754 0 : io_u->xfer_buflen - bytes;
756 bytes -= io_u->xfer_buflen - io_u->resid;
760 pthread_mutex_lock(&ioreq->status_mutex);
761 while (ioreq->status == FIO_IME_IN_PROGRESS)
762 pthread_cond_wait(&ioreq->cond_endio, &ioreq->status_mutex);
763 pthread_mutex_unlock(&ioreq->status_mutex);
768 dprint(FD_IO, "getevents(%u,%u) ret=%d queued=%u events=%u\n", min, max,
769 events, ime_d->queued, ime_d->events);
773 static int fio_ime_aio_init(struct thread_data *td)
775 struct ime_data *ime_d;
776 struct imeaio_req *ioreq;
779 if (fio_ime_engine_init(td) < 0)
782 ime_d = calloc(1, sizeof(*ime_d));
784 ime_d->aioreqs = malloc(td->o.iodepth * sizeof(struct imeaio_req));
785 ime_d->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
786 ime_d->io_us = malloc(2 * td->o.iodepth * sizeof(struct io_u *));
787 ime_d->event_io_us = ime_d->io_us + td->o.iodepth;
789 ime_d->depth = td->o.iodepth;
790 for (i = 0; i < ime_d->depth; i++) {
791 ioreq = &ime_d->aioreqs[i];
792 pthread_cond_init(&ioreq->cond_endio, NULL);
793 pthread_mutex_init(&ioreq->status_mutex, NULL);
796 td->io_ops_data = ime_d;
800 static void fio_ime_aio_clean(struct thread_data *td)
802 struct ime_data *ime_d = td->io_ops_data;
803 struct imeaio_req *ioreq;
807 for (i = 0; i < ime_d->depth; i++) {
808 ioreq = &ime_d->aioreqs[i];
809 pthread_cond_destroy(&ioreq->cond_endio);
810 pthread_mutex_destroy(&ioreq->status_mutex);
812 free(ime_d->aioreqs);
816 td->io_ops_data = NULL;
819 fio_ime_engine_finalize(td);
823 /**************************************************************
824 * IO engines definitions
826 **************************************************************/
828 /* The FIO_DISKLESSIO flag used for these engines is necessary to prevent
829 FIO from using POSIX calls. See fio_ime_open_file for more details. */
831 static struct ioengine_ops ioengine_prw = {
833 .version = FIO_IOOPS_VERSION,
834 .setup = fio_ime_setup,
835 .init = fio_ime_engine_init,
836 .cleanup = fio_ime_engine_finalize,
837 .queue = fio_ime_psync_queue,
838 .open_file = fio_ime_open_file,
839 .close_file = fio_ime_close_file,
840 .get_file_size = fio_ime_get_file_size,
841 .unlink_file = fio_ime_unlink_file,
842 .flags = FIO_SYNCIO | FIO_DISKLESSIO,
845 static struct ioengine_ops ioengine_pvrw = {
846 .name = "ime_psyncv",
847 .version = FIO_IOOPS_VERSION,
848 .setup = fio_ime_setup,
849 .init = fio_ime_psyncv_init,
850 .cleanup = fio_ime_psyncv_clean,
851 .queue = fio_ime_psyncv_queue,
852 .commit = fio_ime_psyncv_commit,
853 .getevents = fio_ime_psyncv_getevents,
854 .event = fio_ime_event,
855 .open_file = fio_ime_open_file,
856 .close_file = fio_ime_close_file,
857 .get_file_size = fio_ime_get_file_size,
858 .unlink_file = fio_ime_unlink_file,
859 .flags = FIO_SYNCIO | FIO_DISKLESSIO,
862 static struct ioengine_ops ioengine_aio = {
864 .version = FIO_IOOPS_VERSION,
865 .setup = fio_ime_setup,
866 .init = fio_ime_aio_init,
867 .cleanup = fio_ime_aio_clean,
868 .queue = fio_ime_aio_queue,
869 .commit = fio_ime_aio_commit,
870 .getevents = fio_ime_aio_getevents,
871 .event = fio_ime_event,
872 .open_file = fio_ime_open_file,
873 .close_file = fio_ime_close_file,
874 .get_file_size = fio_ime_get_file_size,
875 .unlink_file = fio_ime_unlink_file,
876 .flags = FIO_DISKLESSIO,
879 static void fio_init fio_ime_register(void)
881 register_ioengine(&ioengine_prw);
882 register_ioengine(&ioengine_pvrw);
883 register_ioengine(&ioengine_aio);
886 static void fio_exit fio_ime_unregister(void)
888 unregister_ioengine(&ioengine_prw);
889 unregister_ioengine(&ioengine_pvrw);
890 unregister_ioengine(&ioengine_aio);
892 if (fio_ime_is_initialized && ime_native_finalize() < 0)
893 log_err("Warning: IME did not finalize properly\n");