2 * regular read/write sync io engine
15 struct io_u *last_io_u;
18 static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
19 int max, struct timespec fio_unused *t)
24 * we can only have one finished io_u for sync io, since the depth
27 if (list_empty(&td->io_u_busylist))
33 static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
35 struct mmapio_data *sd = td->io_ops->data;
43 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
45 unsigned long long real_off = io_u->offset - td->file_offset;
46 struct mmapio_data *sd = td->io_ops->data;
48 if (io_u->ddir == DDIR_READ)
49 memcpy(io_u->buf, td->mmap + real_off, io_u->buflen);
51 memcpy(td->mmap + real_off, io_u->buf, io_u->buflen);
54 * not really direct, but should drop the pages from the cache
57 if (msync(td->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
59 if (madvise(td->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
69 static int fio_mmapio_sync(struct thread_data *td)
71 return msync(td->mmap, td->file_size, MS_SYNC);
74 static void fio_mmapio_cleanup(struct thread_data *td)
76 if (td->io_ops->data) {
77 free(td->io_ops->data);
78 td->io_ops->data = NULL;
82 static int fio_mmapio_init(struct thread_data *td)
84 struct mmapio_data *sd = malloc(sizeof(*sd));
87 td->io_ops->data = sd;
91 struct ioengine_ops ioengine = {
93 .version = FIO_IOOPS_VERSION,
94 .init = fio_mmapio_init,
95 .queue = fio_mmapio_queue,
96 .getevents = fio_mmapio_getevents,
97 .event = fio_mmapio_event,
98 .cleanup = fio_mmapio_cleanup,
99 .sync = fio_mmapio_sync,