4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
15 #include "../verify.h"
18 * Limits us to 1GB of mapped files in total
20 #define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
22 static unsigned long mmap_map_size;
23 static unsigned long mmap_map_mask;
25 struct fio_mmap_data {
31 static int fio_mmap_file(struct thread_data *td, struct fio_file *f,
32 size_t length, off_t off)
34 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
38 flags = PROT_READ | PROT_WRITE;
39 else if (td_write(td)) {
42 if (td->o.verify != VERIFY_NONE)
47 fmd->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off);
48 if (fmd->mmap_ptr == MAP_FAILED) {
50 td_verror(td, errno, "mmap");
55 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_SEQUENTIAL) < 0) {
56 td_verror(td, errno, "madvise");
60 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_RANDOM) < 0) {
61 td_verror(td, errno, "madvise");
67 if (td->error && fmd->mmap_ptr)
68 munmap(fmd->mmap_ptr, length);
74 * Just mmap an appropriate portion, we cannot mmap the full extent
76 static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u)
78 struct fio_file *f = io_u->file;
79 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
81 if (io_u->buflen > mmap_map_size) {
82 log_err("fio: bs too big for mmap engine\n");
86 fmd->mmap_sz = mmap_map_size;
87 if (fmd->mmap_sz > f->io_size)
88 fmd->mmap_sz = f->io_size;
90 fmd->mmap_off = io_u->offset;
92 return fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
96 * Attempt to mmap the entire file
98 static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u)
100 struct fio_file *f = io_u->file;
101 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
104 if (fio_file_partial_mmap(f))
106 if (io_u->offset != (size_t) io_u->offset ||
107 f->io_size != (size_t) f->io_size) {
108 fio_file_set_partial_mmap(f);
112 fmd->mmap_sz = f->io_size;
115 ret = fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
117 fio_file_set_partial_mmap(f);
122 static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
124 struct fio_file *f = io_u->file;
125 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
129 * It fits within existing mapping, use it
131 if (io_u->offset >= fmd->mmap_off &&
132 io_u->offset + io_u->buflen < fmd->mmap_off + fmd->mmap_sz)
136 * unmap any existing mapping
139 if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0)
141 fmd->mmap_ptr = NULL;
144 if (fio_mmapio_prep_full(td, io_u)) {
146 ret = fio_mmapio_prep_limited(td, io_u);
152 io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off -
157 static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
159 struct fio_file *f = io_u->file;
160 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
162 fio_ro_check(td, io_u);
164 if (io_u->ddir == DDIR_READ)
165 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
166 else if (io_u->ddir == DDIR_WRITE)
167 memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
168 else if (ddir_sync(io_u->ddir)) {
169 if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) {
171 td_verror(td, io_u->error, "msync");
173 } else if (io_u->ddir == DDIR_TRIM) {
174 int ret = do_io_u_trim(td, io_u);
177 td_verror(td, io_u->error, "trim");
182 * not really direct, but should drop the pages from the cache
184 if (td->o.odirect && ddir_rw(io_u->ddir)) {
185 if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
187 td_verror(td, io_u->error, "msync");
189 if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) {
191 td_verror(td, io_u->error, "madvise");
195 return FIO_Q_COMPLETED;
198 static int fio_mmapio_init(struct thread_data *td)
200 struct thread_options *o = &td->o;
201 unsigned long shift, mask;
203 if ((td->o.rw_min_bs & page_mask) &&
204 (o->odirect || o->fsync_blocks || o->fdatasync_blocks)) {
205 log_err("fio: mmap options dictate a minimum block size of "
206 "%llu bytes\n", (unsigned long long) page_size);
210 mmap_map_size = MMAP_TOTAL_SZ / td->o.nr_files;
211 mask = mmap_map_size;
220 mmap_map_mask = 1UL << shift;
224 static int fio_mmapio_open_file(struct thread_data *td, struct fio_file *f)
226 struct fio_mmap_data *fmd;
229 ret = generic_open_file(td, f);
233 fmd = calloc(1, sizeof(*fmd));
236 ret = generic_close_file(td, f);
240 FILE_SET_ENG_DATA(f, fmd);
244 static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f)
246 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
248 FILE_SET_ENG_DATA(f, NULL);
250 fio_file_clear_partial_mmap(f);
252 return generic_close_file(td, f);
255 static int fio_mmapio_invalidate(struct thread_data *td, struct fio_file *f)
257 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
260 ret = posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, POSIX_MADV_DONTNEED);
262 if (f->filetype == FIO_TYPE_BD)
263 (void) posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, FIO_MADV_FREE);
269 static struct ioengine_ops ioengine = {
271 .version = FIO_IOOPS_VERSION,
272 .init = fio_mmapio_init,
273 .prep = fio_mmapio_prep,
274 .queue = fio_mmapio_queue,
275 .open_file = fio_mmapio_open_file,
276 .close_file = fio_mmapio_close_file,
277 .invalidate = fio_mmapio_invalidate,
278 .get_file_size = generic_get_file_size,
279 .flags = FIO_SYNCIO | FIO_NOEXTEND,
282 static void fio_init fio_mmapio_register(void)
284 register_ioengine(&ioengine);
287 static void fio_exit fio_mmapio_unregister(void)
289 unregister_ioengine(&ioengine);