2 * regular read/write sync io engine
16 struct io_u *last_io_u;
19 static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
20 int max, struct timespec fio_unused *t)
25 * we can only have one finished io_u for sync io, since the depth
28 if (list_empty(&td->io_u_busylist))
34 static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
36 struct mmapio_data *sd = td->io_ops->data;
44 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
46 struct fio_file *f = io_u->file;
47 unsigned long long real_off = io_u->offset - f->file_offset;
48 struct mmapio_data *sd = td->io_ops->data;
50 if (io_u->ddir == DDIR_READ)
51 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
52 else if (io_u->ddir == DDIR_WRITE)
53 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
54 else if (io_u->ddir == DDIR_SYNC) {
55 if (msync(f->mmap, f->file_size, MS_SYNC))
60 * not really direct, but should drop the pages from the cache
62 if (td->odirect && io_u->ddir != DDIR_SYNC) {
63 if (msync(f->mmap + real_off, io_u->xfer_buflen, MS_SYNC) < 0)
65 if (madvise(f->mmap + real_off, io_u->xfer_buflen, MADV_DONTNEED) < 0)
72 td_verror(td, io_u->error);
77 static void fio_mmapio_cleanup(struct thread_data *td)
79 if (td->io_ops->data) {
80 free(td->io_ops->data);
81 td->io_ops->data = NULL;
85 static int fio_mmapio_init(struct thread_data *td)
87 struct mmapio_data *sd = malloc(sizeof(*sd));
90 td->io_ops->data = sd;
94 static struct ioengine_ops ioengine = {
96 .version = FIO_IOOPS_VERSION,
97 .init = fio_mmapio_init,
98 .queue = fio_mmapio_queue,
99 .getevents = fio_mmapio_getevents,
100 .event = fio_mmapio_event,
101 .cleanup = fio_mmapio_cleanup,
102 .flags = FIO_SYNCIO | FIO_MMAPIO,
105 static void fio_init fio_mmapio_register(void)
107 register_ioengine(&ioengine);
110 static void fio_exit fio_mmapio_unregister(void)
112 unregister_ioengine(&ioengine);