Add appropriate warning in mmap engine for large maps failing
[fio.git] / engines / mmap.c
... / ...
CommitLineData
1/*
2 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <sys/mman.h>
13
14#include "../fio.h"
15
16static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
17{
18 struct fio_file *f = io_u->file;
19 unsigned long long real_off = io_u->offset - f->file_offset;
20
21 fio_ro_check(td, io_u);
22
23 if (io_u->ddir == DDIR_READ)
24 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
25 else if (io_u->ddir == DDIR_WRITE)
26 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
27 else if (io_u->ddir == DDIR_SYNC) {
28 size_t len = (f->io_size + page_size - 1) & ~page_mask;
29
30 if (msync(f->mmap, len, MS_SYNC)) {
31 io_u->error = errno;
32 td_verror(td, io_u->error, "msync");
33 }
34 }
35
36 /*
37 * not really direct, but should drop the pages from the cache
38 */
39 if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
40 size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
41 unsigned long long off = real_off & ~page_mask;
42
43 if (msync(f->mmap + off, len, MS_SYNC) < 0) {
44 io_u->error = errno;
45 td_verror(td, io_u->error, "msync");
46 }
47 if (madvise(f->mmap + off, len, MADV_DONTNEED) < 0) {
48 io_u->error = errno;
49 td_verror(td, io_u->error, "madvise");
50 }
51 }
52
53 return FIO_Q_COMPLETED;
54}
55
56static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
57{
58 int ret, flags;
59
60 ret = generic_open_file(td, f);
61 if (ret)
62 return ret;
63
64 /*
65 * for size checkup, don't mmap anything.
66 */
67 if (!f->io_size)
68 return 0;
69
70 if (td_rw(td))
71 flags = PROT_READ | PROT_WRITE;
72 else if (td_write(td)) {
73 flags = PROT_WRITE;
74
75 if (td->o.verify != VERIFY_NONE)
76 flags |= PROT_READ;
77 } else
78 flags = PROT_READ;
79
80 f->mmap = mmap(NULL, f->io_size, flags, MAP_SHARED, f->fd, f->file_offset);
81 if (f->mmap == MAP_FAILED) {
82 int err = errno;
83
84 f->mmap = NULL;
85 td_verror(td, err, "mmap");
86 if (err == EINVAL && f->io_size > 2*1024*1024*1024UL)
87 log_err("fio: mmap size likely too large\n");
88 goto err;
89 }
90
91 if (file_invalidate_cache(td, f))
92 goto err;
93
94 if (!td_random(td)) {
95 if (madvise(f->mmap, f->io_size, MADV_SEQUENTIAL) < 0) {
96 td_verror(td, errno, "madvise");
97 goto err;
98 }
99 } else {
100 if (madvise(f->mmap, f->io_size, MADV_RANDOM) < 0) {
101 td_verror(td, errno, "madvise");
102 goto err;
103 }
104 }
105
106 return 0;
107
108err:
109 td->io_ops->close_file(td, f);
110 return 1;
111}
112
113static int fio_mmapio_close(struct thread_data fio_unused *td,
114 struct fio_file *f)
115{
116 int ret = 0, ret2;
117
118 if (f->mmap) {
119 if (munmap(f->mmap, f->io_size) < 0)
120 ret = errno;
121
122 f->mmap = NULL;
123 }
124
125 ret2 = generic_close_file(td, f);
126 if (!ret && ret2)
127 ret = ret2;
128
129 return ret;
130}
131
132static struct ioengine_ops ioengine = {
133 .name = "mmap",
134 .version = FIO_IOOPS_VERSION,
135 .queue = fio_mmapio_queue,
136 .open_file = fio_mmapio_open,
137 .close_file = fio_mmapio_close,
138 .get_file_size = generic_get_file_size,
139 .flags = FIO_SYNCIO | FIO_NOEXTEND,
140};
141
142static void fio_init fio_mmapio_register(void)
143{
144 register_ioengine(&ioengine);
145}
146
147static void fio_exit fio_mmapio_unregister(void)
148{
149 unregister_ioengine(&ioengine);
150}