4 * IO engine that does regular read(2)/write(2) with lseek(2) to transfer
5 * data and IO engine that does regular pread(2)/pwrite(2) to transfer data.
21 unsigned long queued_bytes;
23 unsigned long long last_offset;
24 struct fio_file *last_file;
25 enum fio_ddir last_ddir;
28 static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
30 struct fio_file *f = io_u->file;
32 if (io_u->ddir == DDIR_SYNC)
35 if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
36 td_verror(td, errno, "lseek");
43 static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret)
45 if (ret != (int) io_u->xfer_buflen) {
47 io_u->resid = io_u->xfer_buflen - ret;
49 return FIO_Q_COMPLETED;
55 td_verror(td, io_u->error, "xfer");
57 return FIO_Q_COMPLETED;
60 static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u)
62 struct fio_file *f = io_u->file;
65 fio_ro_check(td, io_u);
67 if (io_u->ddir == DDIR_READ)
68 ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
69 else if (io_u->ddir == DDIR_WRITE)
70 ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset);
74 return fio_io_end(td, io_u, ret);
77 static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
79 struct fio_file *f = io_u->file;
82 fio_ro_check(td, io_u);
84 if (io_u->ddir == DDIR_READ)
85 ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
86 else if (io_u->ddir == DDIR_WRITE)
87 ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
91 return fio_io_end(td, io_u, ret);
94 static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min,
96 struct timespec fio_unused *t)
98 struct syncio_data *sd = td->io_ops->data;
107 dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret);
111 static struct io_u *fio_vsyncio_event(struct thread_data *td, int event)
113 struct syncio_data *sd = td->io_ops->data;
115 return sd->io_us[event];
118 static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u)
120 struct syncio_data *sd = td->io_ops->data;
122 if (io_u->ddir == DDIR_SYNC)
125 if (io_u->offset == sd->last_offset && io_u->file == sd->last_file &&
126 io_u->ddir == sd->last_ddir)
132 static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u,
135 sd->io_us[index] = io_u;
136 sd->iovecs[index].iov_base = io_u->xfer_buf;
137 sd->iovecs[index].iov_len = io_u->xfer_buflen;
138 sd->last_offset = io_u->offset + io_u->xfer_buflen;
139 sd->last_file = io_u->file;
140 sd->last_ddir = io_u->ddir;
141 sd->queued_bytes += io_u->xfer_buflen;
145 static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u)
147 struct syncio_data *sd = td->io_ops->data;
149 fio_ro_check(td, io_u);
151 if (!fio_vsyncio_append(td, io_u)) {
152 dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued);
154 * If we can't append and have stuff queued, tell fio to
155 * commit those first and then retry this io
159 if (io_u->ddir == DDIR_SYNC) {
160 int ret = fsync(io_u->file->fd);
162 return fio_io_end(td, io_u, ret);
166 sd->queued_bytes = 0;
167 fio_vsyncio_set_iov(sd, io_u, 0);
169 if (sd->queued == td->o.iodepth) {
170 dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued);
174 dprint(FD_IO, "vsyncio_queue: append\n");
175 fio_vsyncio_set_iov(sd, io_u, sd->queued);
178 dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued);
183 * Check that we transferred all bytes, or saw an error, etc
185 static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes)
187 struct syncio_data *sd = td->io_ops->data;
193 * transferred everything, perfect
195 if (bytes == sd->queued_bytes)
199 for (i = 0; i < sd->queued; i++) {
205 unsigned int this_io;
208 if (this_io > io_u->xfer_buflen)
209 this_io = io_u->xfer_buflen;
211 io_u->resid = io_u->xfer_buflen - this_io;
218 td_verror(td, err, "xfer vsync");
225 static int fio_vsyncio_commit(struct thread_data *td)
227 struct syncio_data *sd = td->io_ops->data;
234 io_u_mark_submit(td, sd->queued);
237 if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) {
240 td_verror(td, errno, "lseek");
244 if (sd->last_ddir == DDIR_READ)
245 ret = readv(f->fd, sd->iovecs, sd->queued);
247 ret = writev(f->fd, sd->iovecs, sd->queued);
249 dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret);
250 return fio_vsyncio_end(td, ret);
253 static int fio_vsyncio_init(struct thread_data *td)
255 struct syncio_data *sd;
257 sd = malloc(sizeof(*sd));
258 memset(sd, 0, sizeof(*sd));
259 sd->last_offset = -1ULL;
260 sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec));
261 sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *));
263 td->io_ops->data = sd;
267 static void fio_vsyncio_cleanup(struct thread_data *td)
269 struct syncio_data *sd = td->io_ops->data;
274 td->io_ops->data = NULL;
277 static struct ioengine_ops ioengine_rw = {
279 .version = FIO_IOOPS_VERSION,
280 .prep = fio_syncio_prep,
281 .queue = fio_syncio_queue,
282 .open_file = generic_open_file,
283 .close_file = generic_close_file,
287 static struct ioengine_ops ioengine_prw = {
289 .version = FIO_IOOPS_VERSION,
290 .queue = fio_psyncio_queue,
291 .open_file = generic_open_file,
292 .close_file = generic_close_file,
296 static struct ioengine_ops ioengine_vrw = {
298 .version = FIO_IOOPS_VERSION,
299 .init = fio_vsyncio_init,
300 .cleanup = fio_vsyncio_cleanup,
301 .queue = fio_vsyncio_queue,
302 .commit = fio_vsyncio_commit,
303 .event = fio_vsyncio_event,
304 .getevents = fio_vsyncio_getevents,
305 .open_file = generic_open_file,
306 .close_file = generic_close_file,
310 static void fio_init fio_syncio_register(void)
312 register_ioengine(&ioengine_rw);
313 register_ioengine(&ioengine_prw);
314 register_ioengine(&ioengine_vrw);
317 static void fio_exit fio_syncio_unregister(void)
319 unregister_ioengine(&ioengine_rw);
320 unregister_ioengine(&ioengine_prw);
321 unregister_ioengine(&ioengine_vrw);