2 * regular read/write sync io engine
15 struct io_u *last_io_u;
18 static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
19 int max, struct timespec fio_unused *t)
24 * we can only have one finished io_u for sync io, since the depth
27 if (list_empty(&td->io_u_busylist))
33 static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
35 struct mmapio_data *sd = td->io_ops->data;
43 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
45 struct fio_file *f = io_u->file;
46 unsigned long long real_off = io_u->offset - f->file_offset;
47 struct mmapio_data *sd = td->io_ops->data;
49 if (io_u->ddir == DDIR_READ)
50 memcpy(io_u->buf, f->mmap + real_off, io_u->buflen);
51 else if (io_u->ddir == DDIR_WRITE)
52 memcpy(f->mmap + real_off, io_u->buf, io_u->buflen);
53 else if (io_u->ddir == DDIR_SYNC) {
54 if (msync(f->mmap, f->file_size, MS_SYNC))
59 * not really direct, but should drop the pages from the cache
61 if (td->odirect && io_u->ddir != DDIR_SYNC) {
62 if (msync(f->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
64 if (madvise(f->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
74 static void fio_mmapio_cleanup(struct thread_data *td)
76 if (td->io_ops->data) {
77 free(td->io_ops->data);
78 td->io_ops->data = NULL;
82 static int fio_mmapio_init(struct thread_data *td)
84 struct mmapio_data *sd = malloc(sizeof(*sd));
87 td->io_ops->data = sd;
91 struct ioengine_ops ioengine = {
93 .version = FIO_IOOPS_VERSION,
94 .init = fio_mmapio_init,
95 .queue = fio_mmapio_queue,
96 .getevents = fio_mmapio_getevents,
97 .event = fio_mmapio_event,
98 .cleanup = fio_mmapio_cleanup,
99 .flags = FIO_SYNCIO | FIO_MMAPIO,