4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
20 unsigned long queued_bytes;
22 unsigned long long last_offset;
23 struct fio_file *last_file;
24 enum fio_ddir last_ddir;
27 static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
29 struct fio_file *f = io_u->file;
31 if (io_u->ddir == DDIR_SYNC)
34 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
35 td_verror(td, errno, "lseek");
42 static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
44 if (ret != (int) io_u->xfer_buflen) {
46 io_u->resid = io_u->xfer_buflen - ret;
48 return FIO_Q_COMPLETED;
54 td_verror(td, io_u->error, "xfer");
56 return FIO_Q_COMPLETED;
59 static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
61 struct fio_file *f = io_u->file;
64 fio_ro_check(td, io_u);
66 if (io_u->ddir == DDIR_READ)
67 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
68 else if (io_u->ddir == DDIR_WRITE)
69 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
73 return fio_io_end(td, io_u, ret);
76 static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
78 struct fio_file *f = io_u->file;
81 fio_ro_check(td, io_u);
83 if (io_u->ddir == DDIR_READ)
84 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
85 else if (io_u->ddir == DDIR_WRITE)
86 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
90 return fio_io_end(td, io_u, ret);
93 static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
95 struct timespec fio_unused *t)
97 struct syncio_data *sd = td->io_ops->data;
106 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
110 static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
112 struct syncio_data *sd = td->io_ops->data;
114 return sd->io_us[event];
117 static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
119 struct syncio_data *sd = td->io_ops->data;
121 if (io_u->ddir == DDIR_SYNC)
124 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
125 io_u->ddir == sd->last_ddir)
131 static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
134 sd->io_us[index] = io_u;
135 sd->iovecs[index].iov_base = io_u->xfer_buf;
136 sd->iovecs[index].iov_len = io_u->xfer_buflen;
137 sd->last_offset = io_u->offset + io_u->xfer_buflen;
138 sd->last_file = io_u->file;
139 sd->last_ddir = io_u->ddir;
140 sd->queued_bytes += io_u->xfer_buflen;
144 static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
146 struct syncio_data *sd = td->io_ops->data;
148 fio_ro_check(td, io_u);
150 if (!fio_vsyncio_append(td, io_u)) {
151 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
153 * If we can't append and have stuff queued, tell fio to
154 * commit those first and then retry this io
158 if (io_u->ddir == DDIR_SYNC) {
159 int ret = fsync(io_u->file->fd);
161 return fio_io_end(td, io_u, ret);
165 sd->queued_bytes = 0;
166 fio_vsyncio_set_iov(sd, io_u, 0);
168 if (sd->queued == td->o.iodepth) {
169 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
173 dprint(FD_IO, "vsyncio_queue: append\n");
174 fio_vsyncio_set_iov(sd, io_u, sd->queued);
177 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
182 * Check that we transferred all bytes, or saw an error, etc
184 static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
186 struct syncio_data *sd = td->io_ops->data;
192 * transferred everything, perfect
194 if (bytes == sd->queued_bytes)
198 for (i = 0; i < sd->queued; i++) {
204 unsigned int this_io;
207 if (this_io > io_u->xfer_buflen)
208 this_io = io_u->xfer_buflen;
210 io_u->resid = io_u->xfer_buflen - this_io;
217 td_verror(td, err, "xfer vsync");
224 static int fio_vsyncio_commit(struct thread_data *td)
226 struct syncio_data *sd = td->io_ops->data;
233 io_u_mark_submit(td, sd->queued);
236 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
239 td_verror(td, errno, "lseek");
243 if (sd->last_ddir == DDIR_READ)
244 ret = readv(f->fd, sd->iovecs, sd->queued);
246 ret = writev(f->fd, sd->iovecs, sd->queued);
248 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
249 return fio_vsyncio_end(td, ret);
252 static int fio_vsyncio_init(struct thread_data *td)
254 struct syncio_data *sd;
256 sd = malloc(sizeof(*sd));
257 memset(sd, 0, sizeof(*sd));
258 sd->last_offset = -1ULL;
259 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
260 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
262 td->io_ops->data = sd;
266 static void fio_vsyncio_cleanup(struct thread_data *td)
268 struct syncio_data *sd = td->io_ops->data;
273 td->io_ops->data = NULL;
276 static struct ioengine_ops ioengine_rw = {
278 .version = FIO_IOOPS_VERSION,
279 .prep = fio_syncio_prep,
280 .queue = fio_syncio_queue,
281 .open_file = generic_open_file,
282 .close_file = generic_close_file,
286 static struct ioengine_ops ioengine_prw = {
288 .version = FIO_IOOPS_VERSION,
289 .queue = fio_psyncio_queue,
290 .open_file = generic_open_file,
291 .close_file = generic_close_file,
295 static struct ioengine_ops ioengine_vrw = {
297 .version = FIO_IOOPS_VERSION,
298 .init = fio_vsyncio_init,
299 .cleanup = fio_vsyncio_cleanup,
300 .queue = fio_vsyncio_queue,
301 .commit = fio_vsyncio_commit,
302 .event = fio_vsyncio_event,
303 .getevents = fio_vsyncio_getevents,
304 .open_file = generic_open_file,
305 .close_file = generic_close_file,
309 static void fio_init fio_syncio_register(void)
311 register_ioengine(&ioengine_rw);
312 register_ioengine(&ioengine_prw);
313 register_ioengine(&ioengine_vrw);
316 static void fio_exit fio_syncio_unregister(void)
318 unregister_ioengine(&ioengine_rw);
319 unregister_ioengine(&ioengine_prw);
320 unregister_ioengine(&ioengine_vrw);