Fix filling verify pattern for byte sizes of 3, 5, 7, ...
[fio.git] / engines / sync.c
... / ...
CommitLineData
1/*
2 * sync/psync engine
3 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <sys/uio.h>
12#include <errno.h>
13#include <assert.h>
14
15#include "../fio.h"
16
17/*
18 * Sync engine uses engine_data to store last offset
19 */
20#define LAST_POS(f) ((f)->engine_data)
21
22struct syncio_data {
23 struct iovec *iovecs;
24 struct io_u **io_us;
25 unsigned int queued;
26 unsigned int events;
27 unsigned long queued_bytes;
28
29 unsigned long long last_offset;
30 struct fio_file *last_file;
31 enum fio_ddir last_ddir;
32};
33
34static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
35{
36 struct fio_file *f = io_u->file;
37
38 if (!ddir_rw(io_u->ddir))
39 return 0;
40
41 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
42 return 0;
43
44 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
45 td_verror(td, errno, "lseek");
46 return 1;
47 }
48
49 return 0;
50}
51
52static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
53{
54 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
55 LAST_POS(io_u->file) = io_u->offset + ret;
56
57 if (ret != (int) io_u->xfer_buflen) {
58 if (ret >= 0) {
59 io_u->resid = io_u->xfer_buflen - ret;
60 io_u->error = 0;
61 return FIO_Q_COMPLETED;
62 } else
63 io_u->error = errno;
64 }
65
66 if (io_u->error)
67 td_verror(td, io_u->error, "xfer");
68
69 return FIO_Q_COMPLETED;
70}
71
72static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
73{
74 struct fio_file *f = io_u->file;
75 int ret;
76
77 fio_ro_check(td, io_u);
78
79 if (io_u->ddir == DDIR_READ)
80 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
81 else if (io_u->ddir == DDIR_WRITE)
82 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
83 else if (io_u->ddir == DDIR_TRIM) {
84 do_io_u_trim(td, io_u);
85 return FIO_Q_COMPLETED;
86 } else
87 ret = do_io_u_sync(td, io_u);
88
89 return fio_io_end(td, io_u, ret);
90}
91
92static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
93{
94 struct fio_file *f = io_u->file;
95 int ret;
96
97 fio_ro_check(td, io_u);
98
99 if (io_u->ddir == DDIR_READ)
100 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
101 else if (io_u->ddir == DDIR_WRITE)
102 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
103 else if (io_u->ddir == DDIR_TRIM) {
104 do_io_u_trim(td, io_u);
105 return FIO_Q_COMPLETED;
106 } else
107 ret = do_io_u_sync(td, io_u);
108
109 return fio_io_end(td, io_u, ret);
110}
111
112static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
113 unsigned int max,
114 struct timespec fio_unused *t)
115{
116 struct syncio_data *sd = td->io_ops->data;
117 int ret;
118
119 if (min) {
120 ret = sd->events;
121 sd->events = 0;
122 } else
123 ret = 0;
124
125 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
126 return ret;
127}
128
129static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
130{
131 struct syncio_data *sd = td->io_ops->data;
132
133 return sd->io_us[event];
134}
135
136static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
137{
138 struct syncio_data *sd = td->io_ops->data;
139
140 if (ddir_sync(io_u->ddir))
141 return 0;
142
143 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
144 io_u->ddir == sd->last_ddir)
145 return 1;
146
147 return 0;
148}
149
150static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
151 int idx)
152{
153 sd->io_us[idx] = io_u;
154 sd->iovecs[idx].iov_base = io_u->xfer_buf;
155 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
156 sd->last_offset = io_u->offset + io_u->xfer_buflen;
157 sd->last_file = io_u->file;
158 sd->last_ddir = io_u->ddir;
159 sd->queued_bytes += io_u->xfer_buflen;
160 sd->queued++;
161}
162
163static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
164{
165 struct syncio_data *sd = td->io_ops->data;
166
167 fio_ro_check(td, io_u);
168
169 if (!fio_vsyncio_append(td, io_u)) {
170 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
171 /*
172 * If we can't append and have stuff queued, tell fio to
173 * commit those first and then retry this io
174 */
175 if (sd->queued)
176 return FIO_Q_BUSY;
177 if (ddir_sync(io_u->ddir)) {
178 int ret = do_io_u_sync(td, io_u);
179
180 return fio_io_end(td, io_u, ret);
181 }
182
183 sd->queued = 0;
184 sd->queued_bytes = 0;
185 fio_vsyncio_set_iov(sd, io_u, 0);
186 } else {
187 if (sd->queued == td->o.iodepth) {
188 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
189 return FIO_Q_BUSY;
190 }
191
192 dprint(FD_IO, "vsyncio_queue: append\n");
193 fio_vsyncio_set_iov(sd, io_u, sd->queued);
194 }
195
196 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
197 return FIO_Q_QUEUED;
198}
199
200/*
201 * Check that we transferred all bytes, or saw an error, etc
202 */
203static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
204{
205 struct syncio_data *sd = td->io_ops->data;
206 struct io_u *io_u;
207 unsigned int i;
208 int err;
209
210 /*
211 * transferred everything, perfect
212 */
213 if (bytes == sd->queued_bytes)
214 return 0;
215
216 err = errno;
217 for (i = 0; i < sd->queued; i++) {
218 io_u = sd->io_us[i];
219
220 if (bytes == -1) {
221 io_u->error = err;
222 } else {
223 unsigned int this_io;
224
225 this_io = bytes;
226 if (this_io > io_u->xfer_buflen)
227 this_io = io_u->xfer_buflen;
228
229 io_u->resid = io_u->xfer_buflen - this_io;
230 io_u->error = 0;
231 bytes -= this_io;
232 }
233 }
234
235 if (bytes == -1) {
236 td_verror(td, err, "xfer vsync");
237 return -err;
238 }
239
240 return 0;
241}
242
243static int fio_vsyncio_commit(struct thread_data *td)
244{
245 struct syncio_data *sd = td->io_ops->data;
246 struct fio_file *f;
247 ssize_t ret;
248
249 if (!sd->queued)
250 return 0;
251
252 io_u_mark_submit(td, sd->queued);
253 f = sd->last_file;
254
255 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
256 int err = -errno;
257
258 td_verror(td, errno, "lseek");
259 return err;
260 }
261
262 if (sd->last_ddir == DDIR_READ)
263 ret = readv(f->fd, sd->iovecs, sd->queued);
264 else
265 ret = writev(f->fd, sd->iovecs, sd->queued);
266
267 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
268 sd->events = sd->queued;
269 sd->queued = 0;
270 return fio_vsyncio_end(td, ret);
271}
272
273static int fio_vsyncio_init(struct thread_data *td)
274{
275 struct syncio_data *sd;
276
277 sd = malloc(sizeof(*sd));
278 memset(sd, 0, sizeof(*sd));
279 sd->last_offset = -1ULL;
280 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
281 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
282
283 td->io_ops->data = sd;
284 return 0;
285}
286
287static void fio_vsyncio_cleanup(struct thread_data *td)
288{
289 struct syncio_data *sd = td->io_ops->data;
290
291 free(sd->iovecs);
292 free(sd->io_us);
293 free(sd);
294}
295
296static struct ioengine_ops ioengine_rw = {
297 .name = "sync",
298 .version = FIO_IOOPS_VERSION,
299 .prep = fio_syncio_prep,
300 .queue = fio_syncio_queue,
301 .open_file = generic_open_file,
302 .close_file = generic_close_file,
303 .get_file_size = generic_get_file_size,
304 .flags = FIO_SYNCIO,
305};
306
307static struct ioengine_ops ioengine_prw = {
308 .name = "psync",
309 .version = FIO_IOOPS_VERSION,
310 .queue = fio_psyncio_queue,
311 .open_file = generic_open_file,
312 .close_file = generic_close_file,
313 .get_file_size = generic_get_file_size,
314 .flags = FIO_SYNCIO,
315};
316
317static struct ioengine_ops ioengine_vrw = {
318 .name = "vsync",
319 .version = FIO_IOOPS_VERSION,
320 .init = fio_vsyncio_init,
321 .cleanup = fio_vsyncio_cleanup,
322 .queue = fio_vsyncio_queue,
323 .commit = fio_vsyncio_commit,
324 .event = fio_vsyncio_event,
325 .getevents = fio_vsyncio_getevents,
326 .open_file = generic_open_file,
327 .close_file = generic_close_file,
328 .get_file_size = generic_get_file_size,
329 .flags = FIO_SYNCIO,
330};
331
332static void fio_init fio_syncio_register(void)
333{
334 register_ioengine(&ioengine_rw);
335 register_ioengine(&ioengine_prw);
336 register_ioengine(&ioengine_vrw);
337}
338
339static void fio_exit fio_syncio_unregister(void)
340{
341 unregister_ioengine(&ioengine_rw);
342 unregister_ioengine(&ioengine_prw);
343 unregister_ioengine(&ioengine_vrw);
344}