4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
17 * Limits us to 2GB of mapped files in total
19 #define MMAP_TOTAL_SZ (2 * 1024 * 1024 * 1024UL)
21 static unsigned long mmap_map_size;
22 static unsigned long mmap_map_mask;
24 static int fio_mmap_file(struct thread_data *td, struct fio_file *f,
25 size_t length, off_t off)
31 flags = PROT_READ | PROT_WRITE;
32 else if (td_write(td)) {
35 if (td->o.verify != VERIFY_NONE)
40 f->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off);
41 if (f->mmap_ptr == MAP_FAILED) {
45 td_verror(td, err, "mmap");
46 if (err == EINVAL && f->io_size > 2*1024*1024*1024UL)
47 log_err("fio: mmap size likely too large\n");
51 if (file_invalidate_cache(td, f))
55 if (madvise(f->mmap_ptr, length, MADV_SEQUENTIAL) < 0) {
56 td_verror(td, errno, "madvise");
60 if (madvise(f->mmap_ptr, length, MADV_RANDOM) < 0) {
61 td_verror(td, errno, "madvise");
70 static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
72 struct fio_file *f = io_u->file;
75 if (io_u->buflen > mmap_map_size) {
76 log_err("fio: bs too big for mmap engine\n");
81 if (io_u->offset >= f->mmap_off &&
82 io_u->offset + io_u->buflen < f->mmap_off + f->mmap_sz)
86 if (munmap(f->mmap_ptr, f->mmap_sz) < 0) {
93 f->mmap_sz = mmap_map_size;
94 if (f->mmap_sz > f->io_size)
95 f->mmap_sz = f->io_size;
97 f->mmap_off = io_u->offset & ~mmap_map_mask;
98 if (io_u->offset + io_u->buflen >= f->mmap_off + f->mmap_sz)
99 f->mmap_off -= io_u->buflen;
101 ret = fio_mmap_file(td, f, f->mmap_sz, f->mmap_off);
104 io_u->mmap_data = f->mmap_ptr + io_u->offset - f->mmap_off -
110 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
112 struct fio_file *f = io_u->file;
114 fio_ro_check(td, io_u);
116 if (io_u->ddir == DDIR_READ)
117 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
118 else if (io_u->ddir == DDIR_WRITE)
119 memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
120 else if (io_u->ddir == DDIR_SYNC) {
121 if (msync(f->mmap_ptr, f->mmap_sz, MS_SYNC)) {
123 td_verror(td, io_u->error, "msync");
128 * not really direct, but should drop the pages from the cache
130 if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
131 if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
133 td_verror(td, io_u->error, "msync");
135 if (madvise(io_u->mmap_data, io_u->xfer_buflen, MADV_DONTNEED) < 0) {
137 td_verror(td, io_u->error, "madvise");
141 return FIO_Q_COMPLETED;
144 static int fio_mmapio_init(struct thread_data *td)
146 unsigned long shift, mask;
148 mmap_map_size = MMAP_TOTAL_SZ / td->o.nr_files;
149 mask = mmap_map_size;
158 mmap_map_mask = 1UL << shift;
162 static struct ioengine_ops ioengine = {
164 .version = FIO_IOOPS_VERSION,
165 .init = fio_mmapio_init,
166 .prep = fio_mmapio_prep,
167 .queue = fio_mmapio_queue,
168 .open_file = generic_open_file,
169 .close_file = generic_close_file,
170 .get_file_size = generic_get_file_size,
171 .flags = FIO_SYNCIO | FIO_NOEXTEND,
174 static void fio_init fio_mmapio_register(void)
176 register_ioengine(&ioengine);
179 static void fio_exit fio_mmapio_unregister(void)
181 unregister_ioengine(&ioengine);