2 * FIO engines for DDN's Infinite Memory Engine.
3 * This file defines 3 engines: ime_psync, ime_psyncv, and ime_aio
5 * Copyright (C) 2018 DataDirect Networks. All rights reserved.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License,
9 * version 2 as published by the Free Software Foundation..
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 * Some details about the new engines are given below:
22 * Most basic engine that issues calls to ime_native whenever an IO is queued.
25 * This engine tries to queue the IOs (by creating iovecs) if asked by FIO (via
26 * iodepth_batch). It refuses to queue when the iovecs can't be appended, and
27 * waits for FIO to issue a commit. After a call to commit and get_events, new
31 * This engine tries to queue the IOs (by creating iovecs) if asked by FIO (via
32 * iodepth_batch). When the iovecs can't be appended to the current request, a
33 * new request for IME is created. These requests will be issued to IME when
34 * commit is called. Contrary to ime_psyncv, there can be several requests at
35 * once. We don't need to wait for a request to terminate before creating a new
42 #include <linux/limits.h>
43 #include <ime_native.h>
48 /**************************************************************
49 * Types and constants definitions
51 **************************************************************/
53 /* define constants for async IOs */
54 #define FIO_IME_IN_PROGRESS -1
55 #define FIO_IME_REQ_ERROR -2
57 /* This flag is used when some jobs were created using threads. In that
58 case, IME can't be finalized in the engine-specific cleanup function,
59 because other threads might still use IME. Instead, IME is finalized
60 in the destructor (see fio_ime_unregister), only when the flag
61 fio_ime_is_initialized is true (which means at least one thread has
63 static bool fio_ime_is_initialized = false;
66 int fd; /* File descriptor */
67 enum fio_ddir ddir; /* Type of IO (read or write) */
68 off_t offset; /* File offset */
71 struct ime_aiocb iocb; /* IME aio request */
72 ssize_t status; /* Status of the IME request */
73 enum fio_ddir ddir; /* Type of IO (read or write) */
74 pthread_cond_t cond_endio; /* Condition var to notify FIO */
75 pthread_mutex_t status_mutex; /* Mutex for cond_endio */
78 /* This structure will be used for 2 engines: ime_psyncv and ime_aio */
81 struct imeaio_req *aioreqs; /* array of aio requests */
82 struct imesio_req *sioreq; /* pointer to the only syncio request */
84 struct iovec *iovecs; /* array of queued iovecs */
85 struct io_u **io_us; /* array of queued io_u pointers */
86 struct io_u **event_io_us; /* array of the events retieved afer get_events*/
87 unsigned int queued; /* iovecs/io_us in the queue */
88 unsigned int events; /* number of committed iovecs/io_us */
90 /* variables used to implement a "ring" queue */
91 unsigned int depth; /* max entries in the queue */
92 unsigned int head; /* index used to append */
93 unsigned int tail; /* index used to pop */
94 unsigned int cur_commit; /* index of the first uncommitted req */
96 /* offset used by the last iovec (used to check if the iovecs can be appended)*/
97 unsigned long long last_offset;
99 /* The variables below are used for aio only */
100 struct imeaio_req *last_req; /* last request awaiting committing */
104 /**************************************************************
105 * Private functions for queueing/unqueueing
107 **************************************************************/
109 static void fio_ime_queue_incr (struct ime_data *ime_d)
111 ime_d->head = (ime_d->head + 1) % ime_d->depth;
114 static void fio_ime_queue_red (struct ime_data *ime_d)
116 ime_d->tail = (ime_d->tail + 1) % ime_d->depth;
120 static void fio_ime_queue_commit (struct ime_data *ime_d, int iovcnt)
122 ime_d->cur_commit = (ime_d->cur_commit + iovcnt) % ime_d->depth;
123 ime_d->events += iovcnt;
125 static void fio_ime_queue_reset (struct ime_data *ime_d)
129 ime_d->cur_commit = 0;
135 /**************************************************************
136 * General IME functions
137 * (needed for both sync and async IOs)
138 **************************************************************/
140 static char *fio_set_ime_filename(char* filename)
142 static __thread char ime_filename[PATH_MAX];
143 if (snprintf(ime_filename, PATH_MAX, "%s%s", DEFAULT_IME_FILE_PREFIX, filename) < PATH_MAX)
149 static int fio_ime_get_file_size(struct thread_data *td, struct fio_file *f)
155 dprint(FD_FILE, "get file size %s\n", f->file_name);
157 ime_filename = fio_set_ime_filename(f->file_name);
158 if (ime_filename == NULL)
160 ret = ime_native_stat(ime_filename, &buf);
162 td_verror(td, errno, "fstat");
166 f->real_file_size = buf.st_size;
170 /* This functions mimics the generic_file_open function, but issues
171 IME native calls instead of POSIX calls. */
172 static int fio_ime_open_file(struct thread_data *td, struct fio_file *f)
179 dprint(FD_FILE, "fd open %s\n", f->file_name);
182 td_verror(td, EINVAL, "IME does not support TRIM operation");
187 td_verror(td, EINVAL, "IME does not support atomic IO");
194 if (td->o.create_on_open && td->o.allow_create)
201 if (td->o.allow_create)
204 else if (td_read(td)) {
208 /* We should never go here. */
209 td_verror(td, EINVAL, "Unsopported open mode");
213 ime_filename = fio_set_ime_filename(f->file_name);
214 if (ime_filename == NULL)
216 f->fd = ime_native_open(ime_filename, flags, 0600);
218 char buf[FIO_VERROR_SIZE];
221 snprintf(buf, sizeof(buf), "open(%s)", f->file_name);
222 td_verror(td, __e, buf);
226 /* Now we need to make sure the real file size is sufficient for FIO
227 to do its things. This is normally done before the file open function
228 is called, but because FIO would use POSIX calls, we need to do it
230 ret = fio_ime_get_file_size(td, f);
232 ime_native_close(f->fd);
233 td_verror(td, errno, "ime_get_file_size");
237 desired_fs = f->io_size + f->file_offset;
239 dprint(FD_FILE, "Laying out file %s%s\n",
240 DEFAULT_IME_FILE_PREFIX, f->file_name);
241 if (!td->o.create_on_open &&
242 f->real_file_size < desired_fs &&
243 ime_native_ftruncate(f->fd, desired_fs) < 0) {
244 ime_native_close(f->fd);
245 td_verror(td, errno, "ime_native_ftruncate");
248 if (f->real_file_size < desired_fs)
249 f->real_file_size = desired_fs;
251 else if (td_read(td) && f->real_file_size < desired_fs) {
252 ime_native_close(f->fd);
253 log_err("error: can't read %lu bytes from file with "
254 "%lu bytes\n", desired_fs, f->real_file_size);
261 static int fio_ime_close_file(struct thread_data fio_unused *td, struct fio_file *f)
265 dprint(FD_FILE, "fd close %s\n", f->file_name);
267 if (ime_native_close(f->fd) < 0)
274 static int fio_ime_unlink_file(struct thread_data *td, struct fio_file *f)
278 char *ime_filename = fio_set_ime_filename(f->file_name);
279 if (ime_filename == NULL)
281 ret = unlink(ime_filename);
283 return ret < 0 ? errno : 0;
286 static struct io_u *fio_ime_event(struct thread_data *td, int event)
288 struct ime_data *ime_d = td->io_ops_data;
289 return ime_d->event_io_us[event];
292 /* Setup file used to replace get_file_sizes when settin up the file.
293 Instead we will set real_file_sie to 0 for each file. This way we
294 can avoid calling ime_native_init before the forks are created. */
295 static int fio_ime_setup(struct thread_data *td)
300 for_each_file(td, f, i) {
301 dprint(FD_FILE, "setup: set file size to 0 for %p/%d/%s\n",
303 f->real_file_size = 0;
309 static int fio_ime_engine_init(struct thread_data *td)
314 dprint(FD_IO, "ime engine init\n");
315 if (fio_ime_is_initialized && !td->o.use_thread) {
316 log_err("Warning: something might go wrong. Not all threads/forks were"
317 " created before the FIO jobs were initialized.\n");
321 fio_ime_is_initialized = true;
323 /* We have to temporarily set real_file_size so that
324 FIO can initialize properly. It will be corrected
326 for_each_file(td, f, i)
327 f->real_file_size = f->io_size + f->file_offset;
332 static void fio_ime_engine_finalize(struct thread_data *td)
334 /* Only finalize IME when using forks */
335 if (!td->o.use_thread) {
336 if (ime_native_finalize() < 0)
337 log_err("error in ime_native_finalize\n");
338 fio_ime_is_initialized = false;
343 /**************************************************************
344 * Private functions for blocking IOs
346 **************************************************************/
348 /* Notice: this function comes from the sync engine */
349 /* It is used by the commit function to return a proper code and fill
350 some attributes in the io_u used for the IO. */
351 static int fio_ime_psync_end(struct thread_data *td, struct io_u *io_u, ssize_t ret)
353 if (ret != (ssize_t) io_u->xfer_buflen) {
355 io_u->resid = io_u->xfer_buflen - ret;
357 return FIO_Q_COMPLETED;
363 io_u_log_error(td, io_u);
364 td_verror(td, io_u->error, "xfer");
367 return FIO_Q_COMPLETED;
370 static enum fio_q_status fio_ime_psync_queue(struct thread_data *td,
373 struct fio_file *f = io_u->file;
376 fio_ro_check(td, io_u);
378 if (io_u->ddir == DDIR_READ)
379 ret = ime_native_pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
380 else if (io_u->ddir == DDIR_WRITE)
381 ret = ime_native_pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
382 else if (io_u->ddir == DDIR_SYNC)
383 ret = ime_native_fsync(f->fd);
385 ret = io_u->xfer_buflen;
386 io_u->error = EINVAL;
389 return fio_ime_psync_end(td, io_u, ret);
393 /**************************************************************
394 * Private functions for blocking IOs
396 **************************************************************/
398 static bool fio_ime_psyncv_can_queue(struct ime_data *ime_d, struct io_u *io_u)
400 /* We can only queue if:
401 - There are no queued iovecs
402 - Or if there is at least one:
403 - There must be no event waiting for retrieval
404 - The offsets must be contiguous
405 - The ddir and fd must be the same */
406 return (ime_d->queued == 0 || (
407 ime_d->events == 0 &&
408 ime_d->last_offset == io_u->offset &&
409 ime_d->sioreq->ddir == io_u->ddir &&
410 ime_d->sioreq->fd == io_u->file->fd));
413 /* Before using this function, we should have already
414 ensured that the queue is not full */
415 static void fio_ime_psyncv_enqueue(struct ime_data *ime_d, struct io_u *io_u)
417 struct imesio_req *ioreq = ime_d->sioreq;
418 struct iovec *iov = &ime_d->iovecs[ime_d->head];
420 iov->iov_base = io_u->xfer_buf;
421 iov->iov_len = io_u->xfer_buflen;
423 if (ime_d->queued == 0) {
424 ioreq->offset = io_u->offset;
425 ioreq->ddir = io_u->ddir;
426 ioreq->fd = io_u->file->fd;
429 ime_d->io_us[ime_d->head] = io_u;
430 ime_d->last_offset = io_u->offset + io_u->xfer_buflen;
431 fio_ime_queue_incr(ime_d);
434 /* Tries to queue an IO. It will fail if the IO can't be appended to the
435 current request or if the current request has been committed but not
436 yet retrieved by get_events. */
437 static enum fio_q_status fio_ime_psyncv_queue(struct thread_data *td,
440 struct ime_data *ime_d = td->io_ops_data;
442 fio_ro_check(td, io_u);
444 if (ime_d->queued == ime_d->depth)
447 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
448 if (!fio_ime_psyncv_can_queue(ime_d, io_u))
451 dprint(FD_IO, "queue: ddir=%d at %u commit=%u queued=%u events=%u\n",
452 io_u->ddir, ime_d->head, ime_d->cur_commit,
453 ime_d->queued, ime_d->events);
454 fio_ime_psyncv_enqueue(ime_d, io_u);
457 else if (io_u->ddir == DDIR_SYNC) {
458 if (ime_native_fsync(io_u->file->fd) < 0) {
460 td_verror(td, io_u->error, "fsync");
462 return FIO_Q_COMPLETED;
464 io_u->error = EINVAL;
465 td_verror(td, io_u->error, "wrong ddir");
466 return FIO_Q_COMPLETED;
470 /* Notice: this function comes from the sync engine */
471 /* It is used by the commit function to return a proper code and fill
472 some attributes in the io_us appended to the current request. */
473 static int fio_ime_psyncv_end(struct thread_data *td, ssize_t bytes)
475 struct ime_data *ime_d = td->io_ops_data;
480 for (i = 0; i < ime_d->queued; i++) {
481 io_u = ime_d->io_us[i];
486 unsigned int this_io;
489 if (this_io > io_u->xfer_buflen)
490 this_io = io_u->xfer_buflen;
492 io_u->resid = io_u->xfer_buflen - this_io;
499 td_verror(td, err, "xfer psyncv");
506 /* Commits the current request by calling ime_native (with one or several
507 iovecs). After this commit, the corresponding events (one per iovec)
508 can be retrieved by get_events. */
509 static int fio_ime_psyncv_commit(struct thread_data *td)
511 struct ime_data *ime_d = td->io_ops_data;
512 struct imesio_req *ioreq;
515 /* Exit if there are no (new) events to commit
516 or if the previous committed event haven't been retrieved */
517 if (!ime_d->queued || ime_d->events)
520 ioreq = ime_d->sioreq;
521 ime_d->events = ime_d->queued;
522 if (ioreq->ddir == DDIR_READ)
523 ret = ime_native_preadv(ioreq->fd, ime_d->iovecs, ime_d->queued, ioreq->offset);
525 ret = ime_native_pwritev(ioreq->fd, ime_d->iovecs, ime_d->queued, ioreq->offset);
527 dprint(FD_IO, "committed %d iovecs\n", ime_d->queued);
529 return fio_ime_psyncv_end(td, ret);
532 static int fio_ime_psyncv_getevents(struct thread_data *td, unsigned int min,
533 unsigned int max, const struct timespec *t)
535 struct ime_data *ime_d = td->io_ops_data;
541 for (count = 0; count < ime_d->events; count++) {
542 io_u = ime_d->io_us[count];
543 ime_d->event_io_us[events] = io_u;
546 fio_ime_queue_reset(ime_d);
549 dprint(FD_IO, "getevents(%u,%u) ret=%d queued=%u events=%u\n",
550 min, max, events, ime_d->queued, ime_d->events);
554 static int fio_ime_psyncv_init(struct thread_data *td)
556 struct ime_data *ime_d;
558 if (fio_ime_engine_init(td) < 0)
561 ime_d = calloc(1, sizeof(*ime_d));
563 ime_d->sioreq = malloc(sizeof(struct imesio_req));
564 ime_d->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
565 ime_d->io_us = malloc(2 * td->o.iodepth * sizeof(struct io_u *));
566 ime_d->event_io_us = ime_d->io_us + td->o.iodepth;
568 ime_d->depth = td->o.iodepth;
570 td->io_ops_data = ime_d;
574 static void fio_ime_psyncv_clean(struct thread_data *td)
576 struct ime_data *ime_d = td->io_ops_data;
583 td->io_ops_data = NULL;
586 fio_ime_engine_finalize(td);
590 /**************************************************************
591 * Private functions for non-blocking IOs
593 **************************************************************/
595 void fio_ime_aio_complete_cb (struct ime_aiocb *aiocb, int err,
598 struct imeaio_req *ioreq = (struct imeaio_req *) aiocb->user_context;
600 pthread_mutex_lock(&ioreq->status_mutex);
601 ioreq->status = err == 0 ? bytes : FIO_IME_REQ_ERROR;
602 pthread_mutex_unlock(&ioreq->status_mutex);
604 pthread_cond_signal(&ioreq->cond_endio);
607 static bool fio_ime_aio_can_queue (struct ime_data *ime_d, struct io_u *io_u)
609 /* So far we can queue in any case. */
612 static bool fio_ime_aio_can_append (struct ime_data *ime_d, struct io_u *io_u)
614 /* We can only append if:
615 - The iovecs will be contiguous in the array
616 - There is already a queued iovec
617 - The offsets are contiguous
618 - The ddir and fs are the same */
619 return (ime_d->head != 0 &&
620 ime_d->queued - ime_d->events > 0 &&
621 ime_d->last_offset == io_u->offset &&
622 ime_d->last_req->ddir == io_u->ddir &&
623 ime_d->last_req->iocb.fd == io_u->file->fd);
626 /* Before using this function, we should have already
627 ensured that the queue is not full */
628 static void fio_ime_aio_enqueue(struct ime_data *ime_d, struct io_u *io_u)
630 struct imeaio_req *ioreq = &ime_d->aioreqs[ime_d->head];
631 struct ime_aiocb *iocb = &ioreq->iocb;
632 struct iovec *iov = &ime_d->iovecs[ime_d->head];
634 iov->iov_base = io_u->xfer_buf;
635 iov->iov_len = io_u->xfer_buflen;
637 if (fio_ime_aio_can_append(ime_d, io_u))
638 ime_d->last_req->iocb.iovcnt++;
640 ioreq->status = FIO_IME_IN_PROGRESS;
641 ioreq->ddir = io_u->ddir;
642 ime_d->last_req = ioreq;
644 iocb->complete_cb = &fio_ime_aio_complete_cb;
645 iocb->fd = io_u->file->fd;
646 iocb->file_offset = io_u->offset;
650 iocb->user_context = (intptr_t) ioreq;
653 ime_d->io_us[ime_d->head] = io_u;
654 ime_d->last_offset = io_u->offset + io_u->xfer_buflen;
655 fio_ime_queue_incr(ime_d);
658 /* Tries to queue an IO. It will create a new request if the IO can't be
659 appended to the current request. It will fail if the queue can't contain
660 any more io_u/iovec. In this case, commit and then get_events need to be
662 static enum fio_q_status fio_ime_aio_queue(struct thread_data *td,
665 struct ime_data *ime_d = td->io_ops_data;
667 fio_ro_check(td, io_u);
669 dprint(FD_IO, "queue: ddir=%d at %u commit=%u queued=%u events=%u\n",
670 io_u->ddir, ime_d->head, ime_d->cur_commit,
671 ime_d->queued, ime_d->events);
673 if (ime_d->queued == ime_d->depth)
676 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
677 if (!fio_ime_aio_can_queue(ime_d, io_u))
680 fio_ime_aio_enqueue(ime_d, io_u);
683 else if (io_u->ddir == DDIR_SYNC) {
684 if (ime_native_fsync(io_u->file->fd) < 0) {
686 td_verror(td, io_u->error, "fsync");
688 return FIO_Q_COMPLETED;
690 io_u->error = EINVAL;
691 td_verror(td, io_u->error, "wrong ddir");
692 return FIO_Q_COMPLETED;
696 static int fio_ime_aio_commit(struct thread_data *td)
698 struct ime_data *ime_d = td->io_ops_data;
699 struct imeaio_req *ioreq;
702 /* Loop while there are events to commit */
703 while (ime_d->queued - ime_d->events) {
704 ioreq = &ime_d->aioreqs[ime_d->cur_commit];
705 if (ioreq->ddir == DDIR_READ)
706 ret = ime_native_aio_read(&ioreq->iocb);
708 ret = ime_native_aio_write(&ioreq->iocb);
710 fio_ime_queue_commit(ime_d, ioreq->iocb.iovcnt);
712 /* fio needs a negative error code */
714 ioreq->status = FIO_IME_REQ_ERROR;
718 io_u_mark_submit(td, ioreq->iocb.iovcnt);
719 dprint(FD_IO, "committed %d iovecs commit=%u queued=%u events=%u\n",
720 ioreq->iocb.iovcnt, ime_d->cur_commit,
721 ime_d->queued, ime_d->events);
727 static int fio_ime_aio_getevents(struct thread_data *td, unsigned int min,
728 unsigned int max, const struct timespec *t)
730 struct ime_data *ime_d = td->io_ops_data;
731 struct imeaio_req *ioreq;
737 while (ime_d->events) {
738 ioreq = &ime_d->aioreqs[ime_d->tail];
740 /* Break if we already got events, and if we will
741 exceed max if we append the next events */
742 if (events && events + ioreq->iocb.iovcnt > max)
745 if (ioreq->status != FIO_IME_IN_PROGRESS) {
747 bytes = ioreq->status;
748 for (count = 0; count < ioreq->iocb.iovcnt; count++) {
749 io_u = ime_d->io_us[ime_d->tail];
750 ime_d->event_io_us[events] = io_u;
752 fio_ime_queue_red(ime_d);
754 if (ioreq->status == FIO_IME_REQ_ERROR)
757 io_u->resid = bytes > io_u->xfer_buflen ?
758 0 : io_u->xfer_buflen - bytes;
760 bytes -= io_u->xfer_buflen - io_u->resid;
764 pthread_mutex_lock(&ioreq->status_mutex);
765 while (ioreq->status == FIO_IME_IN_PROGRESS) {
766 pthread_cond_wait(&ioreq->cond_endio, &ioreq->status_mutex);
768 pthread_mutex_unlock(&ioreq->status_mutex);
773 dprint(FD_IO, "getevents(%u,%u) ret=%d queued=%u events=%u\n", min, max,
774 events, ime_d->queued, ime_d->events);
778 static int fio_ime_aio_init(struct thread_data *td)
780 struct ime_data *ime_d;
781 struct imeaio_req *ioreq;
784 if (fio_ime_engine_init(td) < 0)
787 ime_d = calloc(1, sizeof(*ime_d));
789 ime_d->aioreqs = malloc(td->o.iodepth * sizeof(struct imeaio_req));
790 ime_d->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
791 ime_d->io_us = malloc(2 * td->o.iodepth * sizeof(struct io_u *));
792 ime_d->event_io_us = ime_d->io_us + td->o.iodepth;
794 ime_d->depth = td->o.iodepth;
795 for (i = 0; i < ime_d->depth; i++) {
796 ioreq = &ime_d->aioreqs[i];
797 pthread_cond_init(&ioreq->cond_endio, NULL);
798 pthread_mutex_init(&ioreq->status_mutex, NULL);
801 td->io_ops_data = ime_d;
805 static void fio_ime_aio_clean(struct thread_data *td)
807 struct ime_data *ime_d = td->io_ops_data;
808 struct imeaio_req *ioreq;
812 for (i = 0; i < ime_d->depth; i++) {
813 ioreq = &ime_d->aioreqs[i];
814 pthread_cond_destroy(&ioreq->cond_endio);
815 pthread_mutex_destroy(&ioreq->status_mutex);
817 free(ime_d->aioreqs);
821 td->io_ops_data = NULL;
824 fio_ime_engine_finalize(td);
828 /**************************************************************
829 * IO engines definitions
831 **************************************************************/
833 /* The FIO_DISKLESSIO flag used for these engines is necessary to prevent
834 FIO from using POSIX calls. See fio_ime_open_file for more details. */
836 static struct ioengine_ops ioengine_prw = {
838 .version = FIO_IOOPS_VERSION,
839 .setup = fio_ime_setup,
840 .init = fio_ime_engine_init,
841 .cleanup = fio_ime_engine_finalize,
842 .queue = fio_ime_psync_queue,
843 .open_file = fio_ime_open_file,
844 .close_file = fio_ime_close_file,
845 .get_file_size = fio_ime_get_file_size,
846 .unlink_file = fio_ime_unlink_file,
847 .flags = FIO_SYNCIO | FIO_DISKLESSIO,
850 static struct ioengine_ops ioengine_pvrw = {
851 .name = "ime_psyncv",
852 .version = FIO_IOOPS_VERSION,
853 .setup = fio_ime_setup,
854 .init = fio_ime_psyncv_init,
855 .cleanup = fio_ime_psyncv_clean,
856 .queue = fio_ime_psyncv_queue,
857 .commit = fio_ime_psyncv_commit,
858 .getevents = fio_ime_psyncv_getevents,
859 .event = fio_ime_event,
860 .open_file = fio_ime_open_file,
861 .close_file = fio_ime_close_file,
862 .get_file_size = fio_ime_get_file_size,
863 .unlink_file = fio_ime_unlink_file,
864 .flags = FIO_SYNCIO | FIO_DISKLESSIO,
867 static struct ioengine_ops ioengine_aio = {
869 .version = FIO_IOOPS_VERSION,
870 .setup = fio_ime_setup,
871 .init = fio_ime_aio_init,
872 .cleanup = fio_ime_aio_clean,
873 .queue = fio_ime_aio_queue,
874 .commit = fio_ime_aio_commit,
875 .getevents = fio_ime_aio_getevents,
876 .event = fio_ime_event,
877 .open_file = fio_ime_open_file,
878 .close_file = fio_ime_close_file,
879 .get_file_size = fio_ime_get_file_size,
880 .unlink_file = fio_ime_unlink_file,
881 .flags = FIO_DISKLESSIO,
884 static void fio_init fio_ime_register(void)
886 register_ioengine(&ioengine_prw);
887 register_ioengine(&ioengine_pvrw);
888 register_ioengine(&ioengine_aio);
891 static void fio_exit fio_ime_unregister(void)
893 unregister_ioengine(&ioengine_prw);
894 unregister_ioengine(&ioengine_pvrw);
895 unregister_ioengine(&ioengine_aio);
896 if (fio_ime_is_initialized && ime_native_finalize() < 0) {
897 log_err("Warning: IME did not finalize properly\n");