Split mutex.c and .h each into three files
[fio.git] / engines / mmap.c
... / ...
CommitLineData
1/*
2 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <sys/mman.h>
13
14#include "../fio.h"
15#include "../verify.h"
16
17/*
18 * Limits us to 1GiB of mapped files in total
19 */
20#define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
21
22static unsigned long mmap_map_size;
23
24struct fio_mmap_data {
25 void *mmap_ptr;
26 size_t mmap_sz;
27 off_t mmap_off;
28};
29
30static bool fio_madvise_file(struct thread_data *td, struct fio_file *f,
31 size_t length)
32
33{
34 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
35
36 if (!td->o.fadvise_hint)
37 return true;
38
39 if (!td_random(td)) {
40 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_SEQUENTIAL) < 0) {
41 td_verror(td, errno, "madvise");
42 return false;
43 }
44 } else {
45 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_RANDOM) < 0) {
46 td_verror(td, errno, "madvise");
47 return false;
48 }
49 }
50
51 return true;
52}
53
54static int fio_mmap_file(struct thread_data *td, struct fio_file *f,
55 size_t length, off_t off)
56{
57 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
58 int flags = 0;
59
60 if (td_rw(td) && !td->o.verify_only)
61 flags = PROT_READ | PROT_WRITE;
62 else if (td_write(td) && !td->o.verify_only) {
63 flags = PROT_WRITE;
64
65 if (td->o.verify != VERIFY_NONE)
66 flags |= PROT_READ;
67 } else
68 flags = PROT_READ;
69
70 fmd->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off);
71 if (fmd->mmap_ptr == MAP_FAILED) {
72 fmd->mmap_ptr = NULL;
73 td_verror(td, errno, "mmap");
74 goto err;
75 }
76
77 if (!fio_madvise_file(td, f, length))
78 goto err;
79
80 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_DONTNEED) < 0) {
81 td_verror(td, errno, "madvise");
82 goto err;
83 }
84
85#ifdef FIO_MADV_FREE
86 if (f->filetype == FIO_TYPE_BLOCK)
87 (void) posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, FIO_MADV_FREE);
88#endif
89
90err:
91 if (td->error && fmd->mmap_ptr)
92 munmap(fmd->mmap_ptr, length);
93
94 return td->error;
95}
96
97/*
98 * Just mmap an appropriate portion, we cannot mmap the full extent
99 */
100static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u)
101{
102 struct fio_file *f = io_u->file;
103 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
104
105 if (io_u->buflen > mmap_map_size) {
106 log_err("fio: bs too big for mmap engine\n");
107 return EIO;
108 }
109
110 fmd->mmap_sz = mmap_map_size;
111 if (fmd->mmap_sz > f->io_size)
112 fmd->mmap_sz = f->io_size;
113
114 fmd->mmap_off = io_u->offset;
115
116 return fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
117}
118
119/*
120 * Attempt to mmap the entire file
121 */
122static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u)
123{
124 struct fio_file *f = io_u->file;
125 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
126 int ret;
127
128 if (fio_file_partial_mmap(f))
129 return EINVAL;
130 if (io_u->offset != (size_t) io_u->offset ||
131 f->io_size != (size_t) f->io_size) {
132 fio_file_set_partial_mmap(f);
133 return EINVAL;
134 }
135
136 fmd->mmap_sz = f->io_size;
137 fmd->mmap_off = 0;
138
139 ret = fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
140 if (ret)
141 fio_file_set_partial_mmap(f);
142
143 return ret;
144}
145
146static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
147{
148 struct fio_file *f = io_u->file;
149 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
150 int ret;
151
152 /*
153 * It fits within existing mapping, use it
154 */
155 if (io_u->offset >= fmd->mmap_off &&
156 io_u->offset + io_u->buflen <= fmd->mmap_off + fmd->mmap_sz)
157 goto done;
158
159 /*
160 * unmap any existing mapping
161 */
162 if (fmd->mmap_ptr) {
163 if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0)
164 return errno;
165 fmd->mmap_ptr = NULL;
166 }
167
168 if (fio_mmapio_prep_full(td, io_u)) {
169 td_clear_error(td);
170 ret = fio_mmapio_prep_limited(td, io_u);
171 if (ret)
172 return ret;
173 }
174
175done:
176 io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off -
177 f->file_offset;
178 return 0;
179}
180
181static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
182{
183 struct fio_file *f = io_u->file;
184 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
185
186 fio_ro_check(td, io_u);
187
188 if (io_u->ddir == DDIR_READ)
189 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
190 else if (io_u->ddir == DDIR_WRITE)
191 memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
192 else if (ddir_sync(io_u->ddir)) {
193 if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) {
194 io_u->error = errno;
195 td_verror(td, io_u->error, "msync");
196 }
197 } else if (io_u->ddir == DDIR_TRIM) {
198 int ret = do_io_u_trim(td, io_u);
199
200 if (!ret)
201 td_verror(td, io_u->error, "trim");
202 }
203
204
205 /*
206 * not really direct, but should drop the pages from the cache
207 */
208 if (td->o.odirect && ddir_rw(io_u->ddir)) {
209 if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
210 io_u->error = errno;
211 td_verror(td, io_u->error, "msync");
212 }
213 if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) {
214 io_u->error = errno;
215 td_verror(td, io_u->error, "madvise");
216 }
217 }
218
219 return FIO_Q_COMPLETED;
220}
221
222static int fio_mmapio_init(struct thread_data *td)
223{
224 struct thread_options *o = &td->o;
225
226 if ((o->rw_min_bs & page_mask) &&
227 (o->odirect || o->fsync_blocks || o->fdatasync_blocks)) {
228 log_err("fio: mmap options dictate a minimum block size of "
229 "%llu bytes\n", (unsigned long long) page_size);
230 return 1;
231 }
232
233 mmap_map_size = MMAP_TOTAL_SZ / o->nr_files;
234 return 0;
235}
236
237static int fio_mmapio_open_file(struct thread_data *td, struct fio_file *f)
238{
239 struct fio_mmap_data *fmd;
240 int ret;
241
242 ret = generic_open_file(td, f);
243 if (ret)
244 return ret;
245
246 fmd = calloc(1, sizeof(*fmd));
247 if (!fmd) {
248 int fio_unused __ret;
249 __ret = generic_close_file(td, f);
250 return 1;
251 }
252
253 FILE_SET_ENG_DATA(f, fmd);
254 return 0;
255}
256
257static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f)
258{
259 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
260
261 FILE_SET_ENG_DATA(f, NULL);
262 free(fmd);
263 fio_file_clear_partial_mmap(f);
264
265 return generic_close_file(td, f);
266}
267
268static struct ioengine_ops ioengine = {
269 .name = "mmap",
270 .version = FIO_IOOPS_VERSION,
271 .init = fio_mmapio_init,
272 .prep = fio_mmapio_prep,
273 .queue = fio_mmapio_queue,
274 .open_file = fio_mmapio_open_file,
275 .close_file = fio_mmapio_close_file,
276 .get_file_size = generic_get_file_size,
277 .flags = FIO_SYNCIO | FIO_NOEXTEND,
278};
279
280static void fio_init fio_mmapio_register(void)
281{
282 register_ioengine(&ioengine);
283}
284
285static void fio_exit fio_mmapio_unregister(void)
286{
287 unregister_ioengine(&ioengine);
288}