Need to clear ->files_size when ->files is cleared
[fio.git] / engines / mmap.c
... / ...
CommitLineData
1/*
2 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <sys/mman.h>
13
14#include "../fio.h"
15
16static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
17{
18 struct fio_file *f = io_u->file;
19 unsigned long long real_off = io_u->offset - f->file_offset;
20
21 fio_ro_check(td, io_u);
22
23 if (io_u->ddir == DDIR_READ)
24 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
25 else if (io_u->ddir == DDIR_WRITE)
26 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
27 else if (io_u->ddir == DDIR_SYNC) {
28 size_t len = (f->io_size + page_size - 1) & ~page_mask;
29
30 if (msync(f->mmap, len, MS_SYNC)) {
31 io_u->error = errno;
32 td_verror(td, io_u->error, "msync");
33 }
34 }
35
36 /*
37 * not really direct, but should drop the pages from the cache
38 */
39 if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
40 size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
41 unsigned long long off = real_off & ~page_mask;
42
43 if (msync(f->mmap + off, len, MS_SYNC) < 0) {
44 io_u->error = errno;
45 td_verror(td, io_u->error, "msync");
46 }
47 if (madvise(f->mmap + off, len, MADV_DONTNEED) < 0) {
48 io_u->error = errno;
49 td_verror(td, io_u->error, "madvise");
50 }
51 }
52
53 return FIO_Q_COMPLETED;
54}
55
56static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
57{
58 int ret, flags;
59
60 ret = generic_open_file(td, f);
61 if (ret)
62 return ret;
63
64 /*
65 * for size checkup, don't mmap anything.
66 */
67 if (!f->io_size)
68 return 0;
69
70 if (td_rw(td))
71 flags = PROT_READ | PROT_WRITE;
72 else if (td_write(td)) {
73 flags = PROT_WRITE;
74
75 if (td->o.verify != VERIFY_NONE)
76 flags |= PROT_READ;
77 } else
78 flags = PROT_READ;
79
80 f->mmap = mmap(NULL, f->io_size, flags, MAP_SHARED, f->fd, f->file_offset);
81 if (f->mmap == MAP_FAILED) {
82 f->mmap = NULL;
83 td_verror(td, errno, "mmap");
84 goto err;
85 }
86
87 if (file_invalidate_cache(td, f))
88 goto err;
89
90 if (!td_random(td)) {
91 if (madvise(f->mmap, f->io_size, MADV_SEQUENTIAL) < 0) {
92 td_verror(td, errno, "madvise");
93 goto err;
94 }
95 } else {
96 if (madvise(f->mmap, f->io_size, MADV_RANDOM) < 0) {
97 td_verror(td, errno, "madvise");
98 goto err;
99 }
100 }
101
102 return 0;
103
104err:
105 td->io_ops->close_file(td, f);
106 return 1;
107}
108
109static int fio_mmapio_close(struct thread_data fio_unused *td,
110 struct fio_file *f)
111{
112 int ret = 0, ret2;
113
114 if (f->mmap) {
115 if (munmap(f->mmap, f->io_size) < 0)
116 ret = errno;
117
118 f->mmap = NULL;
119 }
120
121 ret2 = generic_close_file(td, f);
122 if (!ret && ret2)
123 ret = ret2;
124
125 return ret;
126}
127
128static struct ioengine_ops ioengine = {
129 .name = "mmap",
130 .version = FIO_IOOPS_VERSION,
131 .queue = fio_mmapio_queue,
132 .open_file = fio_mmapio_open,
133 .close_file = fio_mmapio_close,
134 .flags = FIO_SYNCIO | FIO_NOEXTEND,
135};
136
137static void fio_init fio_mmapio_register(void)
138{
139 register_ioengine(&ioengine);
140}
141
142static void fio_exit fio_mmapio_unregister(void)
143{
144 unregister_ioengine(&ioengine);
145}