Makefile: Fix android compilation
[fio.git] / engines / mmap.c
... / ...
CommitLineData
1/*
2 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <errno.h>
11#include <sys/mman.h>
12
13#include "../fio.h"
14#include "../optgroup.h"
15#include "../verify.h"
16
17/*
18 * Limits us to 1GiB of mapped files in total
19 */
20#define MMAP_TOTAL_SZ (1 * 1024 * 1024 * 1024UL)
21
22static unsigned long mmap_map_size;
23
24struct fio_mmap_data {
25 void *mmap_ptr;
26 size_t mmap_sz;
27 off_t mmap_off;
28};
29
30#ifdef CONFIG_HAVE_THP
31struct mmap_options {
32 void *pad;
33 unsigned int thp;
34};
35
36static struct fio_option options[] = {
37 {
38 .name = "thp",
39 .lname = "Transparent Huge Pages",
40 .type = FIO_OPT_INT,
41 .off1 = offsetof(struct mmap_options, thp),
42 .help = "Memory Advise Huge Page",
43 .category = FIO_OPT_C_ENGINE,
44 .group = FIO_OPT_G_MMAP,
45 },
46 {
47 .name = NULL,
48 },
49};
50#endif
51
52static bool fio_madvise_file(struct thread_data *td, struct fio_file *f,
53 size_t length)
54
55{
56 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
57#ifdef CONFIG_HAVE_THP
58 struct mmap_options *o = td->eo;
59
60 /* Ignore errors on this optional advisory */
61 if (o->thp)
62 madvise(fmd->mmap_ptr, length, MADV_HUGEPAGE);
63#endif
64
65 if (!td->o.fadvise_hint)
66 return true;
67
68 if (!td_random(td)) {
69 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_SEQUENTIAL) < 0) {
70 td_verror(td, errno, "madvise");
71 return false;
72 }
73 } else {
74 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_RANDOM) < 0) {
75 td_verror(td, errno, "madvise");
76 return false;
77 }
78 }
79
80 return true;
81}
82
83#ifdef CONFIG_HAVE_THP
84static int fio_mmap_get_shared(struct thread_data *td)
85{
86 struct mmap_options *o = td->eo;
87
88 if (o->thp)
89 return MAP_PRIVATE;
90 return MAP_SHARED;
91}
92#else
93static int fio_mmap_get_shared(struct thread_data *td)
94{
95 return MAP_SHARED;
96}
97#endif
98
99static int fio_mmap_file(struct thread_data *td, struct fio_file *f,
100 size_t length, off_t off)
101{
102 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
103 int flags = 0, shared = fio_mmap_get_shared(td);
104
105 if (td_rw(td) && !td->o.verify_only)
106 flags = PROT_READ | PROT_WRITE;
107 else if (td_write(td) && !td->o.verify_only) {
108 flags = PROT_WRITE;
109
110 if (td->o.verify != VERIFY_NONE)
111 flags |= PROT_READ;
112 } else
113 flags = PROT_READ;
114
115 fmd->mmap_ptr = mmap(NULL, length, flags, shared, f->fd, off);
116 if (fmd->mmap_ptr == MAP_FAILED) {
117 fmd->mmap_ptr = NULL;
118 td_verror(td, errno, "mmap");
119 goto err;
120 }
121
122 if (!fio_madvise_file(td, f, length))
123 goto err;
124
125 if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_DONTNEED) < 0) {
126 td_verror(td, errno, "madvise");
127 goto err;
128 }
129
130#ifdef FIO_MADV_FREE
131 if (f->filetype == FIO_TYPE_BLOCK)
132 (void) posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, FIO_MADV_FREE);
133#endif
134
135err:
136 if (td->error && fmd->mmap_ptr)
137 munmap(fmd->mmap_ptr, length);
138
139 return td->error;
140}
141
142/*
143 * Just mmap an appropriate portion, we cannot mmap the full extent
144 */
145static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u)
146{
147 struct fio_file *f = io_u->file;
148 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
149
150 if (io_u->buflen > mmap_map_size) {
151 log_err("fio: bs too big for mmap engine\n");
152 return EIO;
153 }
154
155 fmd->mmap_sz = mmap_map_size;
156 if (fmd->mmap_sz > f->io_size)
157 fmd->mmap_sz = f->io_size;
158
159 fmd->mmap_off = io_u->offset;
160
161 return fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
162}
163
164/*
165 * Attempt to mmap the entire file
166 */
167static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u)
168{
169 struct fio_file *f = io_u->file;
170 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
171 int ret;
172
173 if (fio_file_partial_mmap(f))
174 return EINVAL;
175 if (io_u->offset != (size_t) io_u->offset ||
176 f->io_size != (size_t) f->io_size) {
177 fio_file_set_partial_mmap(f);
178 return EINVAL;
179 }
180
181 fmd->mmap_sz = f->io_size;
182 fmd->mmap_off = 0;
183
184 ret = fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off);
185 if (ret)
186 fio_file_set_partial_mmap(f);
187
188 return ret;
189}
190
191static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u)
192{
193 struct fio_file *f = io_u->file;
194 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
195 int ret;
196
197 /*
198 * It fits within existing mapping, use it
199 */
200 if (io_u->offset >= fmd->mmap_off &&
201 io_u->offset + io_u->buflen <= fmd->mmap_off + fmd->mmap_sz)
202 goto done;
203
204 /*
205 * unmap any existing mapping
206 */
207 if (fmd->mmap_ptr) {
208 if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0)
209 return errno;
210 fmd->mmap_ptr = NULL;
211 }
212
213 if (fio_mmapio_prep_full(td, io_u)) {
214 td_clear_error(td);
215 ret = fio_mmapio_prep_limited(td, io_u);
216 if (ret)
217 return ret;
218 }
219
220done:
221 io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off -
222 f->file_offset;
223 return 0;
224}
225
226static enum fio_q_status fio_mmapio_queue(struct thread_data *td,
227 struct io_u *io_u)
228{
229 struct fio_file *f = io_u->file;
230 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
231
232 fio_ro_check(td, io_u);
233
234 if (io_u->ddir == DDIR_READ)
235 memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
236 else if (io_u->ddir == DDIR_WRITE)
237 memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
238 else if (ddir_sync(io_u->ddir)) {
239 if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) {
240 io_u->error = errno;
241 td_verror(td, io_u->error, "msync");
242 }
243 } else if (io_u->ddir == DDIR_TRIM) {
244 int ret = do_io_u_trim(td, io_u);
245
246 if (!ret)
247 td_verror(td, io_u->error, "trim");
248 }
249
250
251 /*
252 * not really direct, but should drop the pages from the cache
253 */
254 if (td->o.odirect && ddir_rw(io_u->ddir)) {
255 if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
256 io_u->error = errno;
257 td_verror(td, io_u->error, "msync");
258 }
259 if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) {
260 io_u->error = errno;
261 td_verror(td, io_u->error, "madvise");
262 }
263 }
264
265 return FIO_Q_COMPLETED;
266}
267
268static int fio_mmapio_init(struct thread_data *td)
269{
270 struct thread_options *o = &td->o;
271
272 if ((o->rw_min_bs & page_mask) &&
273 (o->odirect || o->fsync_blocks || o->fdatasync_blocks)) {
274 log_err("fio: mmap options dictate a minimum block size of "
275 "%llu bytes\n", (unsigned long long) page_size);
276 return 1;
277 }
278
279 mmap_map_size = MMAP_TOTAL_SZ / o->nr_files;
280 return 0;
281}
282
283static int fio_mmapio_open_file(struct thread_data *td, struct fio_file *f)
284{
285 struct fio_mmap_data *fmd;
286 int ret;
287
288 ret = generic_open_file(td, f);
289 if (ret)
290 return ret;
291
292 fmd = calloc(1, sizeof(*fmd));
293 if (!fmd) {
294 int fio_unused __ret;
295 __ret = generic_close_file(td, f);
296 return 1;
297 }
298
299 FILE_SET_ENG_DATA(f, fmd);
300 return 0;
301}
302
303static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f)
304{
305 struct fio_mmap_data *fmd = FILE_ENG_DATA(f);
306
307 FILE_SET_ENG_DATA(f, NULL);
308 free(fmd);
309 fio_file_clear_partial_mmap(f);
310
311 return generic_close_file(td, f);
312}
313
314static struct ioengine_ops ioengine = {
315 .name = "mmap",
316 .version = FIO_IOOPS_VERSION,
317 .init = fio_mmapio_init,
318 .prep = fio_mmapio_prep,
319 .queue = fio_mmapio_queue,
320 .open_file = fio_mmapio_open_file,
321 .close_file = fio_mmapio_close_file,
322 .get_file_size = generic_get_file_size,
323 .flags = FIO_SYNCIO | FIO_NOEXTEND,
324#ifdef CONFIG_HAVE_THP
325 .options = options,
326 .option_struct_size = sizeof(struct mmap_options),
327#endif
328};
329
330static void fio_init fio_mmapio_register(void)
331{
332 register_ioengine(&ioengine);
333}
334
335static void fio_exit fio_mmapio_unregister(void)
336{
337 unregister_ioengine(&ioengine);
338}