Full readonly check
[fio.git] / engines / mmap.c
... / ...
CommitLineData
1/*
2 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <sys/mman.h>
14
15#include "../fio.h"
16
17static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
18{
19 struct fio_file *f = io_u->file;
20 unsigned long long real_off = io_u->offset - f->file_offset;
21
22 fio_ro_check(td, io_u);
23
24 if (io_u->ddir == DDIR_READ)
25 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
26 else if (io_u->ddir == DDIR_WRITE)
27 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
28 else if (io_u->ddir == DDIR_SYNC) {
29 size_t len = (f->io_size + page_size - 1) & ~page_mask;
30
31 if (msync(f->mmap, len, MS_SYNC)) {
32 io_u->error = errno;
33 td_verror(td, io_u->error, "msync");
34 }
35 }
36
37 /*
38 * not really direct, but should drop the pages from the cache
39 */
40 if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
41 size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
42 unsigned long long off = real_off & ~page_mask;
43
44 if (msync(f->mmap + off, len, MS_SYNC) < 0) {
45 io_u->error = errno;
46 td_verror(td, io_u->error, "msync");
47 }
48 if (madvise(f->mmap + off, len, MADV_DONTNEED) < 0) {
49 io_u->error = errno;
50 td_verror(td, io_u->error, "madvise");
51 }
52 }
53
54 return FIO_Q_COMPLETED;
55}
56
57static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
58{
59 int ret, flags;
60
61 ret = generic_open_file(td, f);
62 if (ret)
63 return ret;
64
65 /*
66 * for size checkup, don't mmap anything.
67 */
68 if (!f->io_size)
69 return 0;
70
71 if (td_rw(td))
72 flags = PROT_READ | PROT_WRITE;
73 else if (td_write(td)) {
74 flags = PROT_WRITE;
75
76 if (td->o.verify != VERIFY_NONE)
77 flags |= PROT_READ;
78 } else
79 flags = PROT_READ;
80
81 f->mmap = mmap(NULL, f->io_size, flags, MAP_SHARED, f->fd, f->file_offset);
82 if (f->mmap == MAP_FAILED) {
83 f->mmap = NULL;
84 td_verror(td, errno, "mmap");
85 goto err;
86 }
87
88 if (file_invalidate_cache(td, f))
89 goto err;
90
91 if (!td_random(td)) {
92 if (madvise(f->mmap, f->io_size, MADV_SEQUENTIAL) < 0) {
93 td_verror(td, errno, "madvise");
94 goto err;
95 }
96 } else {
97 if (madvise(f->mmap, f->io_size, MADV_RANDOM) < 0) {
98 td_verror(td, errno, "madvise");
99 goto err;
100 }
101 }
102
103 return 0;
104
105err:
106 td->io_ops->close_file(td, f);
107 return 1;
108}
109
110static void fio_mmapio_close(struct thread_data fio_unused *td,
111 struct fio_file *f)
112{
113 if (f->mmap) {
114 munmap(f->mmap, f->io_size);
115 f->mmap = NULL;
116 }
117 generic_close_file(td, f);
118}
119
120static struct ioengine_ops ioengine = {
121 .name = "mmap",
122 .version = FIO_IOOPS_VERSION,
123 .queue = fio_mmapio_queue,
124 .open_file = fio_mmapio_open,
125 .close_file = fio_mmapio_close,
126 .flags = FIO_SYNCIO | FIO_NOEXTEND,
127};
128
129static void fio_init fio_mmapio_register(void)
130{
131 register_ioengine(&ioengine);
132}
133
134static void fio_exit fio_mmapio_unregister(void)
135{
136 unregister_ioengine(&ioengine);
137}