Fio 1.17.1
[fio.git] / engines / mmap.c
... / ...
CommitLineData
1/*
2 * mmap engine
3 *
4 * IO engine that reads/writes from files by doing memcpy to/from
5 * a memory mapped region of the file.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13#include <sys/mman.h>
14
15#include "../fio.h"
16
17static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
18{
19 struct fio_file *f = io_u->file;
20 unsigned long long real_off = io_u->offset - f->file_offset;
21
22 if (io_u->ddir == DDIR_READ)
23 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
24 else if (io_u->ddir == DDIR_WRITE)
25 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
26 else if (io_u->ddir == DDIR_SYNC) {
27 size_t len = (f->io_size + page_size - 1) & ~page_mask;
28
29 if (msync(f->mmap, len, MS_SYNC)) {
30 io_u->error = errno;
31 td_verror(td, io_u->error, "msync");
32 }
33 }
34
35 /*
36 * not really direct, but should drop the pages from the cache
37 */
38 if (td->o.odirect && io_u->ddir != DDIR_SYNC) {
39 size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
40 unsigned long long off = real_off & ~page_mask;
41
42 if (msync(f->mmap + off, len, MS_SYNC) < 0) {
43 io_u->error = errno;
44 td_verror(td, io_u->error, "msync");
45 }
46 if (madvise(f->mmap + off, len, MADV_DONTNEED) < 0) {
47 io_u->error = errno;
48 td_verror(td, io_u->error, "madvise");
49 }
50 }
51
52 return FIO_Q_COMPLETED;
53}
54
55static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
56{
57 int ret, flags;
58
59 ret = generic_open_file(td, f);
60 if (ret)
61 return ret;
62
63 /*
64 * for size checkup, don't mmap anything.
65 */
66 if (!f->io_size)
67 return 0;
68
69 if (td_rw(td))
70 flags = PROT_READ | PROT_WRITE;
71 else if (td_write(td)) {
72 flags = PROT_WRITE;
73
74 if (td->o.verify != VERIFY_NONE)
75 flags |= PROT_READ;
76 } else
77 flags = PROT_READ;
78
79 f->mmap = mmap(NULL, f->io_size, flags, MAP_SHARED, f->fd, f->file_offset);
80 if (f->mmap == MAP_FAILED) {
81 f->mmap = NULL;
82 td_verror(td, errno, "mmap");
83 goto err;
84 }
85
86 if (file_invalidate_cache(td, f))
87 goto err;
88
89 if (!td_random(td)) {
90 if (madvise(f->mmap, f->io_size, MADV_SEQUENTIAL) < 0) {
91 td_verror(td, errno, "madvise");
92 goto err;
93 }
94 } else {
95 if (madvise(f->mmap, f->io_size, MADV_RANDOM) < 0) {
96 td_verror(td, errno, "madvise");
97 goto err;
98 }
99 }
100
101 return 0;
102
103err:
104 td->io_ops->close_file(td, f);
105 return 1;
106}
107
108static void fio_mmapio_close(struct thread_data fio_unused *td,
109 struct fio_file *f)
110{
111 if (f->mmap) {
112 munmap(f->mmap, f->io_size);
113 f->mmap = NULL;
114 }
115 generic_close_file(td, f);
116}
117
118static struct ioengine_ops ioengine = {
119 .name = "mmap",
120 .version = FIO_IOOPS_VERSION,
121 .queue = fio_mmapio_queue,
122 .open_file = fio_mmapio_open,
123 .close_file = fio_mmapio_close,
124 .flags = FIO_SYNCIO | FIO_NOEXTEND,
125};
126
127static void fio_init fio_mmapio_register(void)
128{
129 register_ioengine(&ioengine);
130}
131
132static void fio_exit fio_mmapio_unregister(void)
133{
134 unregister_ioengine(&ioengine);
135}