4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
15 #include "../optgroup.h"
16 #include "../lib/rand.h"
19 * Sync engine uses engine_data to store last offset
21 #define LAST_POS(f) ((f)->engine_pos)
28 unsigned long queued_bytes;
30 unsigned long long last_offset;
31 struct fio_file *last_file;
32 enum fio_ddir last_ddir;
34 struct frand_state rand_state;
37 #ifdef FIO_HAVE_PWRITEV2
38 struct psyncv2_options {
41 unsigned int hipri_percentage;
44 static struct fio_option options[] = {
48 .type = FIO_OPT_STR_SET,
49 .off1 = offsetof(struct psyncv2_options, hipri),
50 .help = "Set RWF_HIPRI for pwritev2/preadv2",
51 .category = FIO_OPT_C_ENGINE,
52 .group = FIO_OPT_G_INVALID,
55 .name = "hipri_percentage",
56 .lname = "RWF_HIPRI_PERCENTAGE",
58 .off1 = offsetof(struct psyncv2_options, hipri_percentage),
62 .help = "Probabilistically set RWF_HIPRI for pwritev2/preadv2",
63 .category = FIO_OPT_C_ENGINE,
64 .group = FIO_OPT_G_INVALID,
72 static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
74 struct fio_file *f = io_u->file;
76 if (!ddir_rw(io_u->ddir))
79 if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset)
82 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
83 td_verror(td, errno, "lseek");
90 static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
92 if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir))
93 LAST_POS(io_u->file) = io_u->offset + ret;
95 if (ret != (int) io_u->xfer_buflen) {
97 io_u->resid = io_u->xfer_buflen - ret;
99 return FIO_Q_COMPLETED;
105 io_u_log_error(td, io_u);
106 td_verror(td, io_u->error, "xfer");
109 return FIO_Q_COMPLETED;
112 #ifdef CONFIG_PWRITEV
113 static enum fio_q_status fio_pvsyncio_queue(struct thread_data *td,
116 struct syncio_data *sd = td->io_ops_data;
117 struct iovec *iov = &sd->iovecs[0];
118 struct fio_file *f = io_u->file;
121 fio_ro_check(td, io_u);
123 iov->iov_base = io_u->xfer_buf;
124 iov->iov_len = io_u->xfer_buflen;
126 if (io_u->ddir == DDIR_READ)
127 ret = preadv(f->fd, iov, 1, io_u->offset);
128 else if (io_u->ddir == DDIR_WRITE)
129 ret = pwritev(f->fd, iov, 1, io_u->offset);
130 else if (io_u->ddir == DDIR_TRIM) {
131 do_io_u_trim(td, io_u);
132 return FIO_Q_COMPLETED;
134 ret = do_io_u_sync(td, io_u);
136 return fio_io_end(td, io_u, ret);
140 #ifdef FIO_HAVE_PWRITEV2
141 static enum fio_q_status fio_pvsyncio2_queue(struct thread_data *td,
144 struct syncio_data *sd = td->io_ops_data;
145 struct psyncv2_options *o = td->eo;
146 struct iovec *iov = &sd->iovecs[0];
147 struct fio_file *f = io_u->file;
150 fio_ro_check(td, io_u);
153 (rand_between(&sd->rand_state, 1, 100) <= o->hipri_percentage))
156 iov->iov_base = io_u->xfer_buf;
157 iov->iov_len = io_u->xfer_buflen;
159 if (io_u->ddir == DDIR_READ)
160 ret = preadv2(f->fd, iov, 1, io_u->offset, flags);
161 else if (io_u->ddir == DDIR_WRITE)
162 ret = pwritev2(f->fd, iov, 1, io_u->offset, flags);
163 else if (io_u->ddir == DDIR_TRIM) {
164 do_io_u_trim(td, io_u);
165 return FIO_Q_COMPLETED;
167 ret = do_io_u_sync(td, io_u);
169 return fio_io_end(td, io_u, ret);
173 static enum fio_q_status fio_psyncio_queue(struct thread_data *td,
176 struct fio_file *f = io_u->file;
179 fio_ro_check(td, io_u);
181 if (io_u->ddir == DDIR_READ)
182 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
183 else if (io_u->ddir == DDIR_WRITE)
184 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
185 else if (io_u->ddir == DDIR_TRIM) {
186 do_io_u_trim(td, io_u);
187 return FIO_Q_COMPLETED;
189 ret = do_io_u_sync(td, io_u);
191 return fio_io_end(td, io_u, ret);
194 static enum fio_q_status fio_syncio_queue(struct thread_data *td,
197 struct fio_file *f = io_u->file;
200 fio_ro_check(td, io_u);
202 if (io_u->ddir == DDIR_READ)
203 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
204 else if (io_u->ddir == DDIR_WRITE)
205 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
206 else if (io_u->ddir == DDIR_TRIM) {
207 do_io_u_trim(td, io_u);
208 return FIO_Q_COMPLETED;
210 ret = do_io_u_sync(td, io_u);
212 return fio_io_end(td, io_u, ret);
215 static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
217 const struct timespec fio_unused *t)
219 struct syncio_data *sd = td->io_ops_data;
228 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
232 static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
234 struct syncio_data *sd = td->io_ops_data;
236 return sd->io_us[event];
239 static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
241 struct syncio_data *sd = td->io_ops_data;
243 if (ddir_sync(io_u->ddir))
246 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
247 io_u->ddir == sd->last_ddir)
253 static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
256 sd->io_us[idx] = io_u;
257 sd->iovecs[idx].iov_base = io_u->xfer_buf;
258 sd->iovecs[idx].iov_len = io_u->xfer_buflen;
259 sd->last_offset = io_u->offset + io_u->xfer_buflen;
260 sd->last_file = io_u->file;
261 sd->last_ddir = io_u->ddir;
262 sd->queued_bytes += io_u->xfer_buflen;
266 static enum fio_q_status fio_vsyncio_queue(struct thread_data *td,
269 struct syncio_data *sd = td->io_ops_data;
271 fio_ro_check(td, io_u);
273 if (!fio_vsyncio_append(td, io_u)) {
274 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
276 * If we can't append and have stuff queued, tell fio to
277 * commit those first and then retry this io
281 if (ddir_sync(io_u->ddir)) {
282 int ret = do_io_u_sync(td, io_u);
284 return fio_io_end(td, io_u, ret);
288 sd->queued_bytes = 0;
289 fio_vsyncio_set_iov(sd, io_u, 0);
291 if (sd->queued == td->o.iodepth) {
292 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
296 dprint(FD_IO, "vsyncio_queue: append\n");
297 fio_vsyncio_set_iov(sd, io_u, sd->queued);
300 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
305 * Check that we transferred all bytes, or saw an error, etc
307 static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
309 struct syncio_data *sd = td->io_ops_data;
315 * transferred everything, perfect
317 if (bytes == sd->queued_bytes)
321 for (i = 0; i < sd->queued; i++) {
327 unsigned int this_io;
330 if (this_io > io_u->xfer_buflen)
331 this_io = io_u->xfer_buflen;
333 io_u->resid = io_u->xfer_buflen - this_io;
340 td_verror(td, err, "xfer vsync");
347 static int fio_vsyncio_commit(struct thread_data *td)
349 struct syncio_data *sd = td->io_ops_data;
356 io_u_mark_submit(td, sd->queued);
359 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
362 td_verror(td, errno, "lseek");
366 if (sd->last_ddir == DDIR_READ)
367 ret = readv(f->fd, sd->iovecs, sd->queued);
369 ret = writev(f->fd, sd->iovecs, sd->queued);
371 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
372 sd->events = sd->queued;
374 return fio_vsyncio_end(td, ret);
377 static int fio_vsyncio_init(struct thread_data *td)
379 struct syncio_data *sd;
381 sd = malloc(sizeof(*sd));
382 memset(sd, 0, sizeof(*sd));
383 sd->last_offset = -1ULL;
384 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
385 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
386 init_rand(&sd->rand_state, 0);
388 td->io_ops_data = sd;
392 static void fio_vsyncio_cleanup(struct thread_data *td)
394 struct syncio_data *sd = td->io_ops_data;
403 static struct ioengine_ops ioengine_rw = {
405 .version = FIO_IOOPS_VERSION,
406 .prep = fio_syncio_prep,
407 .queue = fio_syncio_queue,
408 .open_file = generic_open_file,
409 .close_file = generic_close_file,
410 .get_file_size = generic_get_file_size,
414 static struct ioengine_ops ioengine_prw = {
416 .version = FIO_IOOPS_VERSION,
417 .queue = fio_psyncio_queue,
418 .open_file = generic_open_file,
419 .close_file = generic_close_file,
420 .get_file_size = generic_get_file_size,
424 static struct ioengine_ops ioengine_vrw = {
426 .version = FIO_IOOPS_VERSION,
427 .init = fio_vsyncio_init,
428 .cleanup = fio_vsyncio_cleanup,
429 .queue = fio_vsyncio_queue,
430 .commit = fio_vsyncio_commit,
431 .event = fio_vsyncio_event,
432 .getevents = fio_vsyncio_getevents,
433 .open_file = generic_open_file,
434 .close_file = generic_close_file,
435 .get_file_size = generic_get_file_size,
439 #ifdef CONFIG_PWRITEV
440 static struct ioengine_ops ioengine_pvrw = {
442 .version = FIO_IOOPS_VERSION,
443 .init = fio_vsyncio_init,
444 .cleanup = fio_vsyncio_cleanup,
445 .queue = fio_pvsyncio_queue,
446 .open_file = generic_open_file,
447 .close_file = generic_close_file,
448 .get_file_size = generic_get_file_size,
453 #ifdef FIO_HAVE_PWRITEV2
454 static struct ioengine_ops ioengine_pvrw2 = {
456 .version = FIO_IOOPS_VERSION,
457 .init = fio_vsyncio_init,
458 .cleanup = fio_vsyncio_cleanup,
459 .queue = fio_pvsyncio2_queue,
460 .open_file = generic_open_file,
461 .close_file = generic_close_file,
462 .get_file_size = generic_get_file_size,
465 .option_struct_size = sizeof(struct psyncv2_options),
469 static void fio_init fio_syncio_register(void)
471 register_ioengine(&ioengine_rw);
472 register_ioengine(&ioengine_prw);
473 register_ioengine(&ioengine_vrw);
474 #ifdef CONFIG_PWRITEV
475 register_ioengine(&ioengine_pvrw);
477 #ifdef FIO_HAVE_PWRITEV2
478 register_ioengine(&ioengine_pvrw2);
482 static void fio_exit fio_syncio_unregister(void)
484 unregister_ioengine(&ioengine_rw);
485 unregister_ioengine(&ioengine_prw);
486 unregister_ioengine(&ioengine_vrw);
487 #ifdef CONFIG_PWRITEV
488 unregister_ioengine(&ioengine_pvrw);
490 #ifdef FIO_HAVE_PWRITEV2
491 unregister_ioengine(&ioengine_pvrw2);