Makefile: Suppress `-Wimplicit-fallthrough` when compiling `lex.yy`
[fio.git] / engines / sync.c
... / ...
CommitLineData
1/*
2 * sync/psync engine
3 *
4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
6 *
7 */
8#include <stdio.h>
9#include <stdlib.h>
10#include <unistd.h>
11#include <sys/uio.h>
12#include <errno.h>
13
14#include "../fio.h"
15#include "../optgroup.h"
16#include "../lib/rand.h"
17
18/*
19 * Sync engine uses engine_data to store last offset
20 */
21#define LAST_POS(f) ((f)->engine_pos)
22
23struct syncio_data {
24 struct iovec *iovecs;
25 struct io_u **io_us;
26 unsigned int queued;
27 unsigned int events;
28 unsigned long queued_bytes;
29
30 unsigned long long last_offset;
31 struct fio_file *last_file;
32 enum fio_ddir last_ddir;
33
34 struct frand_state rand_state;
35};
36
37#ifdef FIO_HAVE_PWRITEV2
38struct psyncv2_options {
39 void *pad;
40 unsigned int hipri;
41 unsigned int hipri_percentage;
42 unsigned int uncached;
43 unsigned int nowait;
44};
45
46static struct fio_option options[] = {
47 {
48 .name = "hipri",
49 .lname = "RWF_HIPRI",
50 .type = FIO_OPT_STR_SET,
51 .off1 = offsetof(struct psyncv2_options, hipri),
52 .help = "Set RWF_HIPRI for pwritev2/preadv2",
53 .category = FIO_OPT_C_ENGINE,
54 .group = FIO_OPT_G_INVALID,
55 },
56 {
57 .name = "hipri_percentage",
58 .lname = "RWF_HIPRI_PERCENTAGE",
59 .type = FIO_OPT_INT,
60 .off1 = offsetof(struct psyncv2_options, hipri_percentage),
61 .minval = 0,
62 .maxval = 100,
63 .def = "100",
64 .help = "Probabilistically set RWF_HIPRI for pwritev2/preadv2",
65 .category = FIO_OPT_C_ENGINE,
66 .group = FIO_OPT_G_INVALID,
67 },
68 {
69 .name = "uncached",
70 .lname = "Uncached",
71 .type = FIO_OPT_INT,
72 .off1 = offsetof(struct psyncv2_options, uncached),
73 .help = "Use RWF_UNCACHED for buffered read/writes",
74 .category = FIO_OPT_C_ENGINE,
75 .group = FIO_OPT_G_INVALID,
76 },
77 {
78 .name = "nowait",
79 .lname = "RWF_NOWAIT",
80 .type = FIO_OPT_BOOL,
81 .off1 = offsetof(struct psyncv2_options, nowait),
82 .help = "Set RWF_NOWAIT for pwritev2/preadv2",
83 .category = FIO_OPT_C_ENGINE,
84 .group = FIO_OPT_G_INVALID,
85 },
86 {
87 .name = NULL,
88 },
89};
90#endif
91
92static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
93{
94 struct fio_file *f = io_u->file;
95
96 if (!ddir_rw(io_u->ddir))
97 return 0;
98
99 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
100 return 0;
101
102 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
103 td_verror(td, errno, "lseek");
104 return 1;
105 }
106
107 return 0;
108}
109
110static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
111{
112 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
113 LAST_POS(io_u->file) = io_u->offset + ret;
114
115 if (ret != (int) io_u->xfer_buflen) {
116 if (ret >= 0) {
117 io_u->resid = io_u->xfer_buflen - ret;
118 io_u->error = 0;
119 return FIO_Q_COMPLETED;
120 } else
121 io_u->error = errno;
122 }
123
124 if (io_u->error) {
125 io_u_log_error(td, io_u);
126 td_verror(td, io_u->error, "xfer");
127 }
128
129 return FIO_Q_COMPLETED;
130}
131
132#ifdef CONFIG_PWRITEV
133static enum fio_q_status fio_pvsyncio_queue(struct thread_data *td,
134 struct io_u *io_u)
135{
136 struct syncio_data *sd = td->io_ops_data;
137 struct iovec *iov = &sd->iovecs[0];
138 struct fio_file *f = io_u->file;
139 int ret;
140
141 fio_ro_check(td, io_u);
142
143 iov->iov_base = io_u->xfer_buf;
144 iov->iov_len = io_u->xfer_buflen;
145
146 if (io_u->ddir == DDIR_READ)
147 ret = preadv(f->fd, iov, 1, io_u->offset);
148 else if (io_u->ddir == DDIR_WRITE)
149 ret = pwritev(f->fd, iov, 1, io_u->offset);
150 else if (io_u->ddir == DDIR_TRIM) {
151 do_io_u_trim(td, io_u);
152 return FIO_Q_COMPLETED;
153 } else
154 ret = do_io_u_sync(td, io_u);
155
156 return fio_io_end(td, io_u, ret);
157}
158#endif
159
160#ifdef FIO_HAVE_PWRITEV2
161static enum fio_q_status fio_pvsyncio2_queue(struct thread_data *td,
162 struct io_u *io_u)
163{
164 struct syncio_data *sd = td->io_ops_data;
165 struct psyncv2_options *o = td->eo;
166 struct iovec *iov = &sd->iovecs[0];
167 struct fio_file *f = io_u->file;
168 int ret, flags = 0;
169
170 fio_ro_check(td, io_u);
171
172 if (o->hipri &&
173 (rand_between(&sd->rand_state, 1, 100) <= o->hipri_percentage))
174 flags |= RWF_HIPRI;
175 if (!td->o.odirect && o->uncached)
176 flags |= RWF_UNCACHED;
177 if (o->nowait)
178 flags |= RWF_NOWAIT;
179
180 iov->iov_base = io_u->xfer_buf;
181 iov->iov_len = io_u->xfer_buflen;
182
183 if (io_u->ddir == DDIR_READ)
184 ret = preadv2(f->fd, iov, 1, io_u->offset, flags);
185 else if (io_u->ddir == DDIR_WRITE)
186 ret = pwritev2(f->fd, iov, 1, io_u->offset, flags);
187 else if (io_u->ddir == DDIR_TRIM) {
188 do_io_u_trim(td, io_u);
189 return FIO_Q_COMPLETED;
190 } else
191 ret = do_io_u_sync(td, io_u);
192
193 return fio_io_end(td, io_u, ret);
194}
195#endif
196
197static enum fio_q_status fio_psyncio_queue(struct thread_data *td,
198 struct io_u *io_u)
199{
200 struct fio_file *f = io_u->file;
201 int ret;
202
203 fio_ro_check(td, io_u);
204
205 if (io_u->ddir == DDIR_READ)
206 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
207 else if (io_u->ddir == DDIR_WRITE)
208 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
209 else if (io_u->ddir == DDIR_TRIM) {
210 do_io_u_trim(td, io_u);
211 return FIO_Q_COMPLETED;
212 } else
213 ret = do_io_u_sync(td, io_u);
214
215 return fio_io_end(td, io_u, ret);
216}
217
218static enum fio_q_status fio_syncio_queue(struct thread_data *td,
219 struct io_u *io_u)
220{
221 struct fio_file *f = io_u->file;
222 int ret;
223
224 fio_ro_check(td, io_u);
225
226 if (io_u->ddir == DDIR_READ)
227 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
228 else if (io_u->ddir == DDIR_WRITE)
229 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
230 else if (io_u->ddir == DDIR_TRIM) {
231 do_io_u_trim(td, io_u);
232 return FIO_Q_COMPLETED;
233 } else
234 ret = do_io_u_sync(td, io_u);
235
236 return fio_io_end(td, io_u, ret);
237}
238
239static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
240 unsigned int max,
241 const struct timespec fio_unused *t)
242{
243 struct syncio_data *sd = td->io_ops_data;
244 int ret;
245
246 if (min) {
247 ret = sd->events;
248 sd->events = 0;
249 } else
250 ret = 0;
251
252 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
253 return ret;
254}
255
256static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
257{
258 struct syncio_data *sd = td->io_ops_data;
259
260 return sd->io_us[event];
261}
262
263static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
264{
265 struct syncio_data *sd = td->io_ops_data;
266
267 if (ddir_sync(io_u->ddir))
268 return 0;
269
270 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
271 io_u->ddir == sd->last_ddir)
272 return 1;
273
274 return 0;
275}
276
277static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
278 int idx)
279{
280 sd->io_us[idx] = io_u;
281 sd->iovecs[idx].iov_base = io_u->xfer_buf;
282 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
283 sd->last_offset = io_u->offset + io_u->xfer_buflen;
284 sd->last_file = io_u->file;
285 sd->last_ddir = io_u->ddir;
286 sd->queued_bytes += io_u->xfer_buflen;
287 sd->queued++;
288}
289
290static enum fio_q_status fio_vsyncio_queue(struct thread_data *td,
291 struct io_u *io_u)
292{
293 struct syncio_data *sd = td->io_ops_data;
294
295 fio_ro_check(td, io_u);
296
297 if (!fio_vsyncio_append(td, io_u)) {
298 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
299 /*
300 * If we can't append and have stuff queued, tell fio to
301 * commit those first and then retry this io
302 */
303 if (sd->queued)
304 return FIO_Q_BUSY;
305 if (ddir_sync(io_u->ddir)) {
306 int ret = do_io_u_sync(td, io_u);
307
308 return fio_io_end(td, io_u, ret);
309 }
310
311 sd->queued = 0;
312 sd->queued_bytes = 0;
313 fio_vsyncio_set_iov(sd, io_u, 0);
314 } else {
315 if (sd->queued == td->o.iodepth) {
316 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
317 return FIO_Q_BUSY;
318 }
319
320 dprint(FD_IO, "vsyncio_queue: append\n");
321 fio_vsyncio_set_iov(sd, io_u, sd->queued);
322 }
323
324 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
325 return FIO_Q_QUEUED;
326}
327
328/*
329 * Check that we transferred all bytes, or saw an error, etc
330 */
331static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
332{
333 struct syncio_data *sd = td->io_ops_data;
334 struct io_u *io_u;
335 unsigned int i;
336 int err;
337
338 /*
339 * transferred everything, perfect
340 */
341 if (bytes == sd->queued_bytes)
342 return 0;
343
344 err = errno;
345 for (i = 0; i < sd->queued; i++) {
346 io_u = sd->io_us[i];
347
348 if (bytes == -1) {
349 io_u->error = err;
350 } else {
351 unsigned int this_io;
352
353 this_io = bytes;
354 if (this_io > io_u->xfer_buflen)
355 this_io = io_u->xfer_buflen;
356
357 io_u->resid = io_u->xfer_buflen - this_io;
358 io_u->error = 0;
359 bytes -= this_io;
360 }
361 }
362
363 if (bytes == -1) {
364 td_verror(td, err, "xfer vsync");
365 return -err;
366 }
367
368 return 0;
369}
370
371static int fio_vsyncio_commit(struct thread_data *td)
372{
373 struct syncio_data *sd = td->io_ops_data;
374 struct fio_file *f;
375 ssize_t ret;
376
377 if (!sd->queued)
378 return 0;
379
380 io_u_mark_submit(td, sd->queued);
381 f = sd->last_file;
382
383 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
384 int err = -errno;
385
386 td_verror(td, errno, "lseek");
387 return err;
388 }
389
390 if (sd->last_ddir == DDIR_READ)
391 ret = readv(f->fd, sd->iovecs, sd->queued);
392 else
393 ret = writev(f->fd, sd->iovecs, sd->queued);
394
395 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
396 sd->events = sd->queued;
397 sd->queued = 0;
398 return fio_vsyncio_end(td, ret);
399}
400
401static int fio_vsyncio_init(struct thread_data *td)
402{
403 struct syncio_data *sd;
404
405 sd = malloc(sizeof(*sd));
406 memset(sd, 0, sizeof(*sd));
407 sd->last_offset = -1ULL;
408 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
409 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
410 init_rand(&sd->rand_state, 0);
411
412 td->io_ops_data = sd;
413 return 0;
414}
415
416static void fio_vsyncio_cleanup(struct thread_data *td)
417{
418 struct syncio_data *sd = td->io_ops_data;
419
420 if (sd) {
421 free(sd->iovecs);
422 free(sd->io_us);
423 free(sd);
424 }
425}
426
427static struct ioengine_ops ioengine_rw = {
428 .name = "sync",
429 .version = FIO_IOOPS_VERSION,
430 .prep = fio_syncio_prep,
431 .queue = fio_syncio_queue,
432 .open_file = generic_open_file,
433 .close_file = generic_close_file,
434 .get_file_size = generic_get_file_size,
435 .flags = FIO_SYNCIO,
436};
437
438static struct ioengine_ops ioengine_prw = {
439 .name = "psync",
440 .version = FIO_IOOPS_VERSION,
441 .queue = fio_psyncio_queue,
442 .open_file = generic_open_file,
443 .close_file = generic_close_file,
444 .get_file_size = generic_get_file_size,
445 .flags = FIO_SYNCIO,
446};
447
448static struct ioengine_ops ioengine_vrw = {
449 .name = "vsync",
450 .version = FIO_IOOPS_VERSION,
451 .init = fio_vsyncio_init,
452 .cleanup = fio_vsyncio_cleanup,
453 .queue = fio_vsyncio_queue,
454 .commit = fio_vsyncio_commit,
455 .event = fio_vsyncio_event,
456 .getevents = fio_vsyncio_getevents,
457 .open_file = generic_open_file,
458 .close_file = generic_close_file,
459 .get_file_size = generic_get_file_size,
460 .flags = FIO_SYNCIO,
461};
462
463#ifdef CONFIG_PWRITEV
464static struct ioengine_ops ioengine_pvrw = {
465 .name = "pvsync",
466 .version = FIO_IOOPS_VERSION,
467 .init = fio_vsyncio_init,
468 .cleanup = fio_vsyncio_cleanup,
469 .queue = fio_pvsyncio_queue,
470 .open_file = generic_open_file,
471 .close_file = generic_close_file,
472 .get_file_size = generic_get_file_size,
473 .flags = FIO_SYNCIO,
474};
475#endif
476
477#ifdef FIO_HAVE_PWRITEV2
478static struct ioengine_ops ioengine_pvrw2 = {
479 .name = "pvsync2",
480 .version = FIO_IOOPS_VERSION,
481 .init = fio_vsyncio_init,
482 .cleanup = fio_vsyncio_cleanup,
483 .queue = fio_pvsyncio2_queue,
484 .open_file = generic_open_file,
485 .close_file = generic_close_file,
486 .get_file_size = generic_get_file_size,
487 .flags = FIO_SYNCIO,
488 .options = options,
489 .option_struct_size = sizeof(struct psyncv2_options),
490};
491#endif
492
493static void fio_init fio_syncio_register(void)
494{
495 register_ioengine(&ioengine_rw);
496 register_ioengine(&ioengine_prw);
497 register_ioengine(&ioengine_vrw);
498#ifdef CONFIG_PWRITEV
499 register_ioengine(&ioengine_pvrw);
500#endif
501#ifdef FIO_HAVE_PWRITEV2
502 register_ioengine(&ioengine_pvrw2);
503#endif
504}
505
506static void fio_exit fio_syncio_unregister(void)
507{
508 unregister_ioengine(&ioengine_rw);
509 unregister_ioengine(&ioengine_prw);
510 unregister_ioengine(&ioengine_vrw);
511#ifdef CONFIG_PWRITEV
512 unregister_ioengine(&ioengine_pvrw);
513#endif
514#ifdef FIO_HAVE_PWRITEV2
515 unregister_ioengine(&ioengine_pvrw2);
516#endif
517}