4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
15 #include "../optgroup.h"
16 #include "../lib/rand.h"
19 * Sync engine uses engine_data to store last offset
21 #define LAST_POS(f) ((f)->engine_pos)
28 unsigned long queued_bytes;
30 unsigned long long last_offset;
31 struct fio_file *last_file;
32 enum fio_ddir last_ddir;
34 struct frand_state rand_state;
37 #ifdef FIO_HAVE_PWRITEV2
38 struct psyncv2_options {
41 unsigned int hipri_percentage;
44 static struct fio_option options[] = {
48 .type = FIO_OPT_STR_SET,
49 .off1 = offsetof(struct psyncv2_options, hipri),
50 .help = "Set RWF_HIPRI for pwritev2/preadv2",
51 .category = FIO_OPT_C_ENGINE,
52 .group = FIO_OPT_G_INVALID,
55 .name = "hipri_percentage",
56 .lname = "RWF_HIPRI_PERCENTAGE",
58 .off1 = offsetof(struct psyncv2_options, hipri_percentage),
62 .help = "Probabilistically set RWF_HIPRI for pwritev2/preadv2",
63 .category = FIO_OPT_C_ENGINE,
64 .group = FIO_OPT_G_INVALID,
72 static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
74 struct fio_file *f = io_u->file;
76 if (!ddir_rw(io_u->ddir))
79 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
82 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
83 td_verror(td, errno, "lseek");
90 static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
92 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
93 LAST_POS(io_u->file) = io_u->offset + ret;
95 if (ret != (int) io_u->xfer_buflen) {
97 io_u->resid = io_u->xfer_buflen - ret;
99 return FIO_Q_COMPLETED;
105 io_u_log_error(td, io_u);
106 td_verror(td, io_u->error, "xfer");
109 return FIO_Q_COMPLETED;
112 #ifdef CONFIG_PWRITEV
113 static int fio_pvsyncio_queue(struct thread_data *td, struct io_u *io_u)
115 struct syncio_data *sd = td->io_ops_data;
116 struct iovec *iov = &sd->iovecs[0];
117 struct fio_file *f = io_u->file;
120 fio_ro_check(td, io_u);
122 iov->iov_base = io_u->xfer_buf;
123 iov->iov_len = io_u->xfer_buflen;
125 if (io_u->ddir == DDIR_READ)
126 ret = preadv(f->fd, iov, 1, io_u->offset);
127 else if (io_u->ddir == DDIR_WRITE)
128 ret = pwritev(f->fd, iov, 1, io_u->offset);
129 else if (io_u->ddir == DDIR_TRIM) {
130 do_io_u_trim(td, io_u);
131 return FIO_Q_COMPLETED;
133 ret = do_io_u_sync(td, io_u);
135 return fio_io_end(td, io_u, ret);
139 #ifdef FIO_HAVE_PWRITEV2
140 static int fio_pvsyncio2_queue(struct thread_data *td, struct io_u *io_u)
142 struct syncio_data *sd = td->io_ops_data;
143 struct psyncv2_options *o = td->eo;
144 struct iovec *iov = &sd->iovecs[0];
145 struct fio_file *f = io_u->file;
148 fio_ro_check(td, io_u);
151 (rand32_between(&sd->rand_state, 1, 100) <= o->hipri_percentage))
154 iov->iov_base = io_u->xfer_buf;
155 iov->iov_len = io_u->xfer_buflen;
157 if (io_u->ddir == DDIR_READ)
158 ret = preadv2(f->fd, iov, 1, io_u->offset, flags);
159 else if (io_u->ddir == DDIR_WRITE)
160 ret = pwritev2(f->fd, iov, 1, io_u->offset, flags);
161 else if (io_u->ddir == DDIR_TRIM) {
162 do_io_u_trim(td, io_u);
163 return FIO_Q_COMPLETED;
165 ret = do_io_u_sync(td, io_u);
167 return fio_io_end(td, io_u, ret);
172 static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
174 struct fio_file *f = io_u->file;
177 fio_ro_check(td, io_u);
179 if (io_u->ddir == DDIR_READ)
180 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
181 else if (io_u->ddir == DDIR_WRITE)
182 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
183 else if (io_u->ddir == DDIR_TRIM) {
184 do_io_u_trim(td, io_u);
185 return FIO_Q_COMPLETED;
187 ret = do_io_u_sync(td, io_u);
189 return fio_io_end(td, io_u, ret);
192 static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
194 struct fio_file *f = io_u->file;
197 fio_ro_check(td, io_u);
199 if (io_u->ddir == DDIR_READ)
200 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
201 else if (io_u->ddir == DDIR_WRITE)
202 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
203 else if (io_u->ddir == DDIR_TRIM) {
204 do_io_u_trim(td, io_u);
205 return FIO_Q_COMPLETED;
207 ret = do_io_u_sync(td, io_u);
209 return fio_io_end(td, io_u, ret);
212 static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
214 const struct timespec fio_unused *t)
216 struct syncio_data *sd = td->io_ops_data;
225 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
229 static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
231 struct syncio_data *sd = td->io_ops_data;
233 return sd->io_us[event];
236 static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
238 struct syncio_data *sd = td->io_ops_data;
240 if (ddir_sync(io_u->ddir))
243 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
244 io_u->ddir == sd->last_ddir)
250 static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
253 sd->io_us[idx] = io_u;
254 sd->iovecs[idx].iov_base = io_u->xfer_buf;
255 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
256 sd->last_offset = io_u->offset + io_u->xfer_buflen;
257 sd->last_file = io_u->file;
258 sd->last_ddir = io_u->ddir;
259 sd->queued_bytes += io_u->xfer_buflen;
263 static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
265 struct syncio_data *sd = td->io_ops_data;
267 fio_ro_check(td, io_u);
269 if (!fio_vsyncio_append(td, io_u)) {
270 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
272 * If we can't append and have stuff queued, tell fio to
273 * commit those first and then retry this io
277 if (ddir_sync(io_u->ddir)) {
278 int ret = do_io_u_sync(td, io_u);
280 return fio_io_end(td, io_u, ret);
284 sd->queued_bytes = 0;
285 fio_vsyncio_set_iov(sd, io_u, 0);
287 if (sd->queued == td->o.iodepth) {
288 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
292 dprint(FD_IO, "vsyncio_queue: append\n");
293 fio_vsyncio_set_iov(sd, io_u, sd->queued);
296 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
301 * Check that we transferred all bytes, or saw an error, etc
303 static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
305 struct syncio_data *sd = td->io_ops_data;
311 * transferred everything, perfect
313 if (bytes == sd->queued_bytes)
317 for (i = 0; i < sd->queued; i++) {
323 unsigned int this_io;
326 if (this_io > io_u->xfer_buflen)
327 this_io = io_u->xfer_buflen;
329 io_u->resid = io_u->xfer_buflen - this_io;
336 td_verror(td, err, "xfer vsync");
343 static int fio_vsyncio_commit(struct thread_data *td)
345 struct syncio_data *sd = td->io_ops_data;
352 io_u_mark_submit(td, sd->queued);
355 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
358 td_verror(td, errno, "lseek");
362 if (sd->last_ddir == DDIR_READ)
363 ret = readv(f->fd, sd->iovecs, sd->queued);
365 ret = writev(f->fd, sd->iovecs, sd->queued);
367 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
368 sd->events = sd->queued;
370 return fio_vsyncio_end(td, ret);
373 static int fio_vsyncio_init(struct thread_data *td)
375 struct syncio_data *sd;
377 sd = malloc(sizeof(*sd));
378 memset(sd, 0, sizeof(*sd));
379 sd->last_offset = -1ULL;
380 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
381 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
382 init_rand(&sd->rand_state, 0);
384 td->io_ops_data = sd;
388 static void fio_vsyncio_cleanup(struct thread_data *td)
390 struct syncio_data *sd = td->io_ops_data;
399 static struct ioengine_ops ioengine_rw = {
401 .version = FIO_IOOPS_VERSION,
402 .prep = fio_syncio_prep,
403 .queue = fio_syncio_queue,
404 .open_file = generic_open_file,
405 .close_file = generic_close_file,
406 .get_file_size = generic_get_file_size,
410 static struct ioengine_ops ioengine_prw = {
412 .version = FIO_IOOPS_VERSION,
413 .queue = fio_psyncio_queue,
414 .open_file = generic_open_file,
415 .close_file = generic_close_file,
416 .get_file_size = generic_get_file_size,
420 static struct ioengine_ops ioengine_vrw = {
422 .version = FIO_IOOPS_VERSION,
423 .init = fio_vsyncio_init,
424 .cleanup = fio_vsyncio_cleanup,
425 .queue = fio_vsyncio_queue,
426 .commit = fio_vsyncio_commit,
427 .event = fio_vsyncio_event,
428 .getevents = fio_vsyncio_getevents,
429 .open_file = generic_open_file,
430 .close_file = generic_close_file,
431 .get_file_size = generic_get_file_size,
435 #ifdef CONFIG_PWRITEV
436 static struct ioengine_ops ioengine_pvrw = {
438 .version = FIO_IOOPS_VERSION,
439 .init = fio_vsyncio_init,
440 .cleanup = fio_vsyncio_cleanup,
441 .queue = fio_pvsyncio_queue,
442 .open_file = generic_open_file,
443 .close_file = generic_close_file,
444 .get_file_size = generic_get_file_size,
449 #ifdef FIO_HAVE_PWRITEV2
450 static struct ioengine_ops ioengine_pvrw2 = {
452 .version = FIO_IOOPS_VERSION,
453 .init = fio_vsyncio_init,
454 .cleanup = fio_vsyncio_cleanup,
455 .queue = fio_pvsyncio2_queue,
456 .open_file = generic_open_file,
457 .close_file = generic_close_file,
458 .get_file_size = generic_get_file_size,
461 .option_struct_size = sizeof(struct psyncv2_options),
465 static void fio_init fio_syncio_register(void)
467 register_ioengine(&ioengine_rw);
468 register_ioengine(&ioengine_prw);
469 register_ioengine(&ioengine_vrw);
470 #ifdef CONFIG_PWRITEV
471 register_ioengine(&ioengine_pvrw);
473 #ifdef FIO_HAVE_PWRITEV2
474 register_ioengine(&ioengine_pvrw2);
478 static void fio_exit fio_syncio_unregister(void)
480 unregister_ioengine(&ioengine_rw);
481 unregister_ioengine(&ioengine_prw);
482 unregister_ioengine(&ioengine_vrw);
483 #ifdef CONFIG_PWRITEV
484 unregister_ioengine(&ioengine_pvrw);
486 #ifdef FIO_HAVE_PWRITEV2
487 unregister_ioengine(&ioengine_pvrw2);