2 * regular read/write sync io engine
16 struct io_u *last_io_u;
19 static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
20 int max, struct timespec fio_unused *t)
25 * we can only have one finished io_u for sync io, since the depth
28 if (list_empty(&td->io_u_busylist))
34 static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
36 struct mmapio_data *sd = td->io_ops->data;
44 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
46 struct fio_file *f = io_u->file;
47 unsigned long long real_off = io_u->offset - f->file_offset;
48 struct mmapio_data *sd = td->io_ops->data;
50 if (io_u->ddir == DDIR_READ)
51 memcpy(io_u->buf, f->mmap + real_off, io_u->buflen);
52 else if (io_u->ddir == DDIR_WRITE)
53 memcpy(f->mmap + real_off, io_u->buf, io_u->buflen);
54 else if (io_u->ddir == DDIR_SYNC) {
55 if (msync(f->mmap, f->file_size, MS_SYNC))
60 * not really direct, but should drop the pages from the cache
62 if (td->odirect && io_u->ddir != DDIR_SYNC) {
63 if (msync(f->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
65 if (madvise(f->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
75 static void fio_mmapio_cleanup(struct thread_data *td)
77 if (td->io_ops->data) {
78 free(td->io_ops->data);
79 td->io_ops->data = NULL;
83 static int fio_mmapio_init(struct thread_data *td)
85 struct mmapio_data *sd = malloc(sizeof(*sd));
88 td->io_ops->data = sd;
92 static struct ioengine_ops ioengine = {
94 .version = FIO_IOOPS_VERSION,
95 .init = fio_mmapio_init,
96 .queue = fio_mmapio_queue,
97 .getevents = fio_mmapio_getevents,
98 .event = fio_mmapio_event,
99 .cleanup = fio_mmapio_cleanup,
100 .flags = FIO_SYNCIO | FIO_MMAPIO,
103 static void fio_init fio_mmapio_register(void)
105 register_ioengine(&ioengine);
108 static void fio_exit fio_mmapio_unregister(void)
110 unregister_ioengine(&ioengine);