mmap engine: make sure that page unaligned syncs work
[fio.git] / engines / mmap.c
CommitLineData
2866c82d
JA
1/*
2 * regular read/write sync io engine
3 *
4 */
5#include <stdio.h>
6#include <stdlib.h>
7#include <unistd.h>
8#include <errno.h>
9#include <assert.h>
10#include <sys/mman.h>
5f350952
JA
11
12#include "../fio.h"
13#include "../os.h"
2866c82d 14
2866c82d
JA
15static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
16{
53cdc686
JA
17 struct fio_file *f = io_u->file;
18 unsigned long long real_off = io_u->offset - f->file_offset;
2866c82d
JA
19
20 if (io_u->ddir == DDIR_READ)
cec6b55d 21 memcpy(io_u->xfer_buf, f->mmap + real_off, io_u->xfer_buflen);
87dc1ab1 22 else if (io_u->ddir == DDIR_WRITE)
cec6b55d 23 memcpy(f->mmap + real_off, io_u->xfer_buf, io_u->xfer_buflen);
b907a5b5 24 else if (io_u->ddir == DDIR_SYNC) {
cfc99db7
JA
25 size_t len = (f->file_size + page_size - 1) & ~page_mask;
26
27 if (msync(f->mmap, len, MS_SYNC)) {
b907a5b5 28 io_u->error = errno;
cfc99db7
JA
29 td_verror(td, io_u->error, "msync");
30 }
b907a5b5 31 }
2866c82d
JA
32
33 /*
34 * not really direct, but should drop the pages from the cache
35 */
b907a5b5 36 if (td->odirect && io_u->ddir != DDIR_SYNC) {
cfc99db7
JA
37 size_t len = (io_u->xfer_buflen + page_size - 1) & ~page_mask;
38 unsigned long long off = real_off & ~page_mask;
39
40 if (msync(f->mmap + off, len, MS_SYNC) < 0) {
2866c82d 41 io_u->error = errno;
cfc99db7
JA
42 td_verror(td, io_u->error, "msync");
43 }
44 if (madvise(f->mmap + off, len, MADV_DONTNEED) < 0) {
2866c82d 45 io_u->error = errno;
cfc99db7
JA
46 td_verror(td, io_u->error, "madvise");
47 }
2866c82d
JA
48 }
49
36167d82 50 return FIO_Q_COMPLETED;
2866c82d
JA
51}
52
b5af8293
JA
53static int fio_mmapio_open(struct thread_data *td, struct fio_file *f)
54{
55 int ret, flags;
56
57 ret = generic_open_file(td, f);
58 if (ret)
59 return ret;
60
61 if (td_rw(td))
62 flags = PROT_READ | PROT_WRITE;
63 else if (td_write(td)) {
64 flags = PROT_WRITE;
65
66 if (td->verify != VERIFY_NONE)
67 flags |= PROT_READ;
68 } else
69 flags = PROT_READ;
70
71 f->mmap = mmap(NULL, f->file_size, flags, MAP_SHARED, f->fd, f->file_offset);
72 if (f->mmap == MAP_FAILED) {
73 f->mmap = NULL;
74 td_verror(td, errno, "mmap");
75 goto err;
76 }
77
78 if (file_invalidate_cache(td, f))
79 goto err;
80
81 if (!td_random(td)) {
82 if (madvise(f->mmap, f->file_size, MADV_SEQUENTIAL) < 0) {
83 td_verror(td, errno, "madvise");
84 goto err;
85 }
86 } else {
87 if (madvise(f->mmap, f->file_size, MADV_RANDOM) < 0) {
88 td_verror(td, errno, "madvise");
89 goto err;
90 }
91 }
92
93 return 0;
94
95err:
0263882a 96 td->io_ops->close_file(td, f);
b5af8293
JA
97 return 1;
98}
99
100static void fio_mmapio_close(struct thread_data fio_unused *td,
101 struct fio_file *f)
102{
103 if (f->mmap) {
104 munmap(f->mmap, f->file_size);
105 f->mmap = NULL;
106 }
0263882a 107 generic_close_file(td, f);
b5af8293
JA
108}
109
5f350952 110static struct ioengine_ops ioengine = {
2866c82d
JA
111 .name = "mmap",
112 .version = FIO_IOOPS_VERSION,
2866c82d 113 .queue = fio_mmapio_queue,
b5af8293
JA
114 .open_file = fio_mmapio_open,
115 .close_file = fio_mmapio_close,
0263882a 116 .flags = FIO_SYNCIO | FIO_NOEXTEND,
2866c82d 117};
5f350952
JA
118
119static void fio_init fio_mmapio_register(void)
120{
121 register_ioengine(&ioengine);
122}
123
124static void fio_exit fio_mmapio_unregister(void)
125{
126 unregister_ioengine(&ioengine);
127}