If verify is enabled, automatically enable refill_buffers
[fio.git] / engines / sync.c
... / ...
CommitLineData
1/*
2 * sync/psync engine
3 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <errno.h>
12#include <assert.h>
13
14#include "../fio.h"
15
16struct syncio_data {
17 struct iovec *iovecs;
18 struct io_u **io_us;
19 unsigned int queued;
20 unsigned long queued_bytes;
21
22 unsigned long long last_offset;
23 struct fio_file *last_file;
24 enum fio_ddir last_ddir;
25};
26
27static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
28{
29 struct fio_file *f = io_u->file;
30
31 if (io_u->ddir == DDIR_SYNC)
32 return 0;
33
34 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
35 td_verror(td, errno, "lseek");
36 return 1;
37 }
38
39 return 0;
40}
41
42static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
43{
44 if (ret != (int) io_u->xfer_buflen) {
45 if (ret >= 0) {
46 io_u->resid = io_u->xfer_buflen - ret;
47 io_u->error = 0;
48 return FIO_Q_COMPLETED;
49 } else
50 io_u->error = errno;
51 }
52
53 if (io_u->error)
54 td_verror(td, io_u->error, "xfer");
55
56 return FIO_Q_COMPLETED;
57}
58
59static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
60{
61 struct fio_file *f = io_u->file;
62 int ret;
63
64 fio_ro_check(td, io_u);
65
66 if (io_u->ddir == DDIR_READ)
67 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
68 else if (io_u->ddir == DDIR_WRITE)
69 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
70 else
71 ret = fsync(f->fd);
72
73 return fio_io_end(td, io_u, ret);
74}
75
76static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
77{
78 struct fio_file *f = io_u->file;
79 int ret;
80
81 fio_ro_check(td, io_u);
82
83 if (io_u->ddir == DDIR_READ)
84 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
85 else if (io_u->ddir == DDIR_WRITE)
86 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
87 else
88 ret = fsync(f->fd);
89
90 return fio_io_end(td, io_u, ret);
91}
92
93static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
94 unsigned int max,
95 struct timespec fio_unused *t)
96{
97 struct syncio_data *sd = td->io_ops->data;
98 int ret;
99
100 if (min) {
101 ret = sd->queued;
102 sd->queued = 0;
103 } else
104 ret = 0;
105
106 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
107 return ret;
108}
109
110static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
111{
112 struct syncio_data *sd = td->io_ops->data;
113
114 return sd->io_us[event];
115}
116
117static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
118{
119 struct syncio_data *sd = td->io_ops->data;
120
121 if (io_u->ddir == DDIR_SYNC)
122 return 0;
123
124 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
125 io_u->ddir == sd->last_ddir)
126 return 1;
127
128 return 0;
129}
130
131static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
132 int index)
133{
134 sd->io_us[index] = io_u;
135 sd->iovecs[index].iov_base = io_u->xfer_buf;
136 sd->iovecs[index].iov_len = io_u->xfer_buflen;
137 sd->last_offset = io_u->offset + io_u->xfer_buflen;
138 sd->last_file = io_u->file;
139 sd->last_ddir = io_u->ddir;
140 sd->queued_bytes += io_u->xfer_buflen;
141 sd->queued++;
142}
143
144static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
145{
146 struct syncio_data *sd = td->io_ops->data;
147
148 fio_ro_check(td, io_u);
149
150 if (!fio_vsyncio_append(td, io_u)) {
151 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
152 /*
153 * If we can't append and have stuff queued, tell fio to
154 * commit those first and then retry this io
155 */
156 if (sd->queued)
157 return FIO_Q_BUSY;
158 if (io_u->ddir == DDIR_SYNC) {
159 int ret = fsync(io_u->file->fd);
160
161 return fio_io_end(td, io_u, ret);
162 }
163
164 sd->queued = 0;
165 sd->queued_bytes = 0;
166 fio_vsyncio_set_iov(sd, io_u, 0);
167 } else {
168 if (sd->queued == td->o.iodepth) {
169 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
170 return FIO_Q_BUSY;
171 }
172
173 dprint(FD_IO, "vsyncio_queue: append\n");
174 fio_vsyncio_set_iov(sd, io_u, sd->queued);
175 }
176
177 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
178 return FIO_Q_QUEUED;
179}
180
181/*
182 * Check that we transferred all bytes, or saw an error, etc
183 */
184static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
185{
186 struct syncio_data *sd = td->io_ops->data;
187 struct io_u *io_u;
188 unsigned int i;
189 int err;
190
191 /*
192 * transferred everything, perfect
193 */
194 if (bytes == sd->queued_bytes)
195 return 0;
196
197 err = errno;
198 for (i = 0; i < sd->queued; i++) {
199 io_u = sd->io_us[i];
200
201 if (bytes == -1) {
202 io_u->error = err;
203 } else {
204 unsigned int this_io;
205
206 this_io = bytes;
207 if (this_io > io_u->xfer_buflen)
208 this_io = io_u->xfer_buflen;
209
210 io_u->resid = io_u->xfer_buflen - this_io;
211 io_u->error = 0;
212 bytes -= this_io;
213 }
214 }
215
216 if (bytes == -1) {
217 td_verror(td, err, "xfer vsync");
218 return -err;
219 }
220
221 return 0;
222}
223
224static int fio_vsyncio_commit(struct thread_data *td)
225{
226 struct syncio_data *sd = td->io_ops->data;
227 struct fio_file *f;
228 ssize_t ret;
229
230 if (!sd->queued)
231 return 0;
232
233 f = sd->last_file;
234
235 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
236 int err = -errno;
237
238 td_verror(td, errno, "lseek");
239 return err;
240 }
241
242 if (sd->last_ddir == DDIR_READ)
243 ret = readv(f->fd, sd->iovecs, sd->queued);
244 else
245 ret = writev(f->fd, sd->iovecs, sd->queued);
246
247 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
248 return fio_vsyncio_end(td, ret);
249}
250
251static int fio_vsyncio_init(struct thread_data *td)
252{
253 struct syncio_data *sd;
254
255 sd = malloc(sizeof(*sd));
256 memset(sd, 0, sizeof(*sd));
257 sd->last_offset = -1ULL;
258 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
259 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
260
261 td->io_ops->data = sd;
262 return 0;
263}
264
265static void fio_vsyncio_cleanup(struct thread_data *td)
266{
267 struct syncio_data *sd = td->io_ops->data;
268
269 free(sd->iovecs);
270 free(sd->io_us);
271 free(sd);
272 td->io_ops->data = NULL;
273}
274
275static struct ioengine_ops ioengine_rw = {
276 .name = "sync",
277 .version = FIO_IOOPS_VERSION,
278 .prep = fio_syncio_prep,
279 .queue = fio_syncio_queue,
280 .open_file = generic_open_file,
281 .close_file = generic_close_file,
282 .flags = FIO_SYNCIO,
283};
284
285static struct ioengine_ops ioengine_prw = {
286 .name = "psync",
287 .version = FIO_IOOPS_VERSION,
288 .queue = fio_psyncio_queue,
289 .open_file = generic_open_file,
290 .close_file = generic_close_file,
291 .flags = FIO_SYNCIO,
292};
293
294static struct ioengine_ops ioengine_vrw = {
295 .name = "vsync",
296 .version = FIO_IOOPS_VERSION,
297 .init = fio_vsyncio_init,
298 .cleanup = fio_vsyncio_cleanup,
299 .queue = fio_vsyncio_queue,
300 .commit = fio_vsyncio_commit,
301 .event = fio_vsyncio_event,
302 .getevents = fio_vsyncio_getevents,
303 .open_file = generic_open_file,
304 .close_file = generic_close_file,
305 .flags = FIO_SYNCIO,
306};
307
308static void fio_init fio_syncio_register(void)
309{
310 register_ioengine(&ioengine_rw);
311 register_ioengine(&ioengine_prw);
312 register_ioengine(&ioengine_vrw);
313}
314
315static void fio_exit fio_syncio_unregister(void)
316{
317 unregister_ioengine(&ioengine_rw);
318 unregister_ioengine(&ioengine_prw);
319 unregister_ioengine(&ioengine_vrw);
320}