2 * regular read/write sync io engine
15 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
17 struct fio_file *f = io_u->file;
18 unsigned long long real_off = io_u->offset - f->file_offset;
20 if (io_u->ddir == DDIR_READ)
21 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
22 else if (io_u->ddir == DDIR_WRITE)
23 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
24 else if (io_u->ddir == DDIR_SYNC) {
25 if (msync(f->mmap, f->file_size, MS_SYNC))
30 * not really direct, but should drop the pages from the cache
32 if (td->odirect && io_u->ddir != DDIR_SYNC) {
33 if (msync(f->mmap + real_off, io_u->xfer_buflen, MS_SYNC) < 0)
35 if (madvise(f->mmap + real_off, io_u->xfer_buflen, MADV_DONTNEED) < 0)
40 td_verror(td, io_u->error, "sync");
42 return FIO_Q_COMPLETED;
45 static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
49 ret = generic_open_file(td, f);
54 flags = PROT_READ | PROT_WRITE;
55 else if (td_write(td)) {
58 if (td->verify != VERIFY_NONE)
63 f->mmap = mmap(NULL, f->file_size, flags, MAP_SHARED, f->fd, f->file_offset);
64 if (f->mmap == MAP_FAILED) {
66 td_verror(td, errno, "mmap");
70 if (file_invalidate_cache(td, f))
74 if (madvise(f->mmap, f->file_size, MADV_SEQUENTIAL) < 0) {
75 td_verror(td, errno, "madvise");
79 if (madvise(f->mmap, f->file_size, MADV_RANDOM) < 0) {
80 td_verror(td, errno, "madvise");
88 td->io_ops->close_file(td, f);
92 static void fio_mmapio_close(struct thread_data fio_unused *td,
96 munmap(f->mmap, f->file_size);
99 generic_close_file(td, f);
102 static struct ioengine_ops ioengine = {
104 .version = FIO_IOOPS_VERSION,
105 .queue = fio_mmapio_queue,
106 .open_file = fio_mmapio_open,
107 .close_file = fio_mmapio_close,
108 .flags = FIO_SYNCIO | FIO_NOEXTEND,
111 static void fio_init fio_mmapio_register(void)
113 register_ioengine(&ioengine);
116 static void fio_exit fio_mmapio_unregister(void)
118 unregister_ioengine(&ioengine);