4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
14 #include "../verify.h"
17 * Limits us to 1GiB of mapped files in total
19 #define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
21 static unsigned long mmap_map_size;
23 struct fio_mmap_data {
29 static bool fio_madvise_file(struct thread_data *td, struct fio_file *f,
33 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
35 if (!td->o.fadvise_hint)
39 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_SEQUENTIAL) < 0) {
40 td_verror(td, errno, "madvise");
44 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_RANDOM) < 0) {
45 td_verror(td, errno, "madvise");
53 static int fio_mmap_file(struct thread_data *td, struct fio_file *f,
54 size_t length, off_t off)
56 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
59 if (td_rw(td) && !td->o.verify_only)
60 flags = PROT_READ | PROT_WRITE;
61 else if (td_write(td) && !td->o.verify_only) {
64 if (td->o.verify != VERIFY_NONE)
69 fmd->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off);
70 if (fmd->mmap_ptr == MAP_FAILED) {
72 td_verror(td, errno, "mmap");
76 if (!fio_madvise_file(td, f, length))
79 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_DONTNEED) < 0) {
80 td_verror(td, errno, "madvise");
85 if (f->filetype == FIO_TYPE_BLOCK)
86 (void) posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, FIO_MADV_FREE);
90 if (td->error && fmd->mmap_ptr)
91 munmap(fmd->mmap_ptr, length);
97 * Just mmap an appropriate portion, we cannot mmap the full extent
99 static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u)
101 struct fio_file *f = io_u->file;
102 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
104 if (io_u->buflen > mmap_map_size) {
105 log_err("fio: bs too big for mmap engine\n");
109 fmd->mmap_sz = mmap_map_size;
110 if (fmd->mmap_sz > f->io_size)
111 fmd->mmap_sz = f->io_size;
113 fmd->mmap_off = io_u->offset;
115 return fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
119 * Attempt to mmap the entire file
121 static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u)
123 struct fio_file *f = io_u->file;
124 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
127 if (fio_file_partial_mmap(f))
129 if (io_u->offset != (size_t) io_u->offset ||
130 f->io_size != (size_t) f->io_size) {
131 fio_file_set_partial_mmap(f);
135 fmd->mmap_sz = f->io_size;
138 ret = fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
140 fio_file_set_partial_mmap(f);
145 static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
147 struct fio_file *f = io_u->file;
148 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
152 * It fits within existing mapping, use it
154 if (io_u->offset >= fmd->mmap_off &&
155 io_u->offset + io_u->buflen <= fmd->mmap_off + fmd->mmap_sz)
159 * unmap any existing mapping
162 if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0)
164 fmd->mmap_ptr = NULL;
167 if (fio_mmapio_prep_full(td, io_u)) {
169 ret = fio_mmapio_prep_limited(td, io_u);
175 io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off -
180 static enum fio_q_status fio_mmapio_queue(struct thread_data *td,
183 struct fio_file *f = io_u->file;
184 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
186 fio_ro_check(td, io_u);
188 if (io_u->ddir == DDIR_READ)
189 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
190 else if (io_u->ddir == DDIR_WRITE)
191 memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
192 else if (ddir_sync(io_u->ddir)) {
193 if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) {
195 td_verror(td, io_u->error, "msync");
197 } else if (io_u->ddir == DDIR_TRIM) {
198 int ret = do_io_u_trim(td, io_u);
201 td_verror(td, io_u->error, "trim");
206 * not really direct, but should drop the pages from the cache
208 if (td->o.odirect && ddir_rw(io_u->ddir)) {
209 if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
211 td_verror(td, io_u->error, "msync");
213 if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) {
215 td_verror(td, io_u->error, "madvise");
219 return FIO_Q_COMPLETED;
222 static int fio_mmapio_init(struct thread_data *td)
224 struct thread_options *o = &td->o;
226 if ((o->rw_min_bs & page_mask) &&
227 (o->odirect || o->fsync_blocks || o->fdatasync_blocks)) {
228 log_err("fio: mmap options dictate a minimum block size of "
229 "%llu bytes\n", (unsigned long long) page_size);
233 mmap_map_size = MMAP_TOTAL_SZ / o->nr_files;
237 static int fio_mmapio_open_file(struct thread_data *td, struct fio_file *f)
239 struct fio_mmap_data *fmd;
242 ret = generic_open_file(td, f);
246 fmd = calloc(1, sizeof(*fmd));
248 int fio_unused __ret;
249 __ret = generic_close_file(td, f);
253 FILE_SET_ENG_DATA(f, fmd);
257 static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f)
259 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
261 FILE_SET_ENG_DATA(f, NULL);
263 fio_file_clear_partial_mmap(f);
265 return generic_close_file(td, f);
268 static struct ioengine_ops ioengine = {
270 .version = FIO_IOOPS_VERSION,
271 .init = fio_mmapio_init,
272 .prep = fio_mmapio_prep,
273 .queue = fio_mmapio_queue,
274 .open_file = fio_mmapio_open_file,
275 .close_file = fio_mmapio_close_file,
276 .get_file_size = generic_get_file_size,
277 .flags = FIO_SYNCIO | FIO_NOEXTEND,
280 static void fio_init fio_mmapio_register(void)
282 register_ioengine(&ioengine);
285 static void fio_exit fio_mmapio_unregister(void)
287 unregister_ioengine(&ioengine);