X-Git-Url: https://git.kernel.dk/?p=fio.git;a=blobdiff_plain;f=engines%2Fsync.c;h=7c1cca63c700c4f318d1dd2003cc0fca5d52653f;hp=c7ddd4c52956125bd408e4c6f2a0d86f95908041;hb=a1c58075279454a91ec43366846b93e8dcf9753c;hpb=f8fe35e8c9e88dd681ea151251d75f6116a958b4 diff --git a/engines/sync.c b/engines/sync.c index c7ddd4c5..7c1cca63 100644 --- a/engines/sync.c +++ b/engines/sync.c @@ -1,123 +1,331 @@ /* - * regular read/write sync io engine + * sync/psync engine + * + * IO engine that does regular read(2)/write(2) with lseek(2) to transfer + * data and IO engine that does regular pread(2)/pwrite(2) to transfer data. * */ #include #include #include +#include #include #include #include "../fio.h" -#include "../os.h" struct syncio_data { - struct io_u *last_io_u; + struct iovec *iovecs; + struct io_u **io_us; + unsigned int queued; + unsigned int events; + unsigned long queued_bytes; + + unsigned long long last_offset; + struct fio_file *last_file; + enum fio_ddir last_ddir; }; -static int fio_syncio_getevents(struct thread_data *td, int fio_unused min, - int max, struct timespec fio_unused *t) +static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u) { - assert(max <= 1); + struct fio_file *f = io_u->file; - /* - * we can only have one finished io_u for sync io, since the depth - * is always 1 - */ - if (list_empty(&td->io_u_busylist)) + if (ddir_sync(io_u->ddir)) return 0; - return 1; + if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) { + td_verror(td, errno, "lseek"); + return 1; + } + + return 0; } -static struct io_u *fio_syncio_event(struct thread_data *td, int event) +static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret) { - struct syncio_data *sd = td->io_ops->data; + if (ret != (int) io_u->xfer_buflen) { + if (ret >= 0) { + io_u->resid = io_u->xfer_buflen - ret; + io_u->error = 0; + return FIO_Q_COMPLETED; + } else + io_u->error = errno; + } - assert(event == 0); + if (io_u->error) + td_verror(td, io_u->error, "xfer"); - return sd->last_io_u; + return FIO_Q_COMPLETED; } -static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u) +static int fio_psyncio_queue(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; + int ret; - if (io_u->ddir == DDIR_SYNC) - return 0; - if (io_u->offset == f->last_completed_pos) - return 0; + fio_ro_check(td, io_u); - if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) { - td_verror(td, errno); - return 1; - } + if (io_u->ddir == DDIR_READ) + ret = pread(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); + else if (io_u->ddir == DDIR_WRITE) + ret = pwrite(f->fd, io_u->xfer_buf, io_u->xfer_buflen, io_u->offset); + else + ret = fsync(f->fd); - return 0; + return fio_io_end(td, io_u, ret); } static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u) { - struct syncio_data *sd = td->io_ops->data; struct fio_file *f = io_u->file; - unsigned int ret; + int ret; + + fio_ro_check(td, io_u); if (io_u->ddir == DDIR_READ) - ret = read(f->fd, io_u->buf, io_u->buflen); + ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen); else if (io_u->ddir == DDIR_WRITE) - ret = write(f->fd, io_u->buf, io_u->buflen); + ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen); else ret = fsync(f->fd); - if (ret != io_u->buflen) { - if (ret > 0) { - io_u->resid = io_u->buflen - ret; - io_u->error = EIO; - } else - io_u->error = errno; + return fio_io_end(td, io_u, ret); +} + +static int fio_vsyncio_getevents(struct thread_data *td, unsigned int min, + unsigned int max, + struct timespec fio_unused *t) +{ + struct syncio_data *sd = td->io_ops->data; + int ret; + + if (min) { + ret = sd->events; + sd->events = 0; + } else + ret = 0; + + dprint(FD_IO, "vsyncio_getevents: min=%d,max=%d: %d\n", min, max, ret); + return ret; +} + +static struct io_u *fio_vsyncio_event(struct thread_data *td, int event) +{ + struct syncio_data *sd = td->io_ops->data; + + return sd->io_us[event]; +} + +static int fio_vsyncio_append(struct thread_data *td, struct io_u *io_u) +{ + struct syncio_data *sd = td->io_ops->data; + + if (ddir_sync(io_u->ddir)) + return 0; + + if (io_u->offset == sd->last_offset && io_u->file == sd->last_file && + io_u->ddir == sd->last_ddir) + return 1; + + return 0; +} + +static void fio_vsyncio_set_iov(struct syncio_data *sd, struct io_u *io_u, + int index) +{ + sd->io_us[index] = io_u; + sd->iovecs[index].iov_base = io_u->xfer_buf; + sd->iovecs[index].iov_len = io_u->xfer_buflen; + sd->last_offset = io_u->offset + io_u->xfer_buflen; + sd->last_file = io_u->file; + sd->last_ddir = io_u->ddir; + sd->queued_bytes += io_u->xfer_buflen; + sd->queued++; +} + +static int fio_vsyncio_queue(struct thread_data *td, struct io_u *io_u) +{ + struct syncio_data *sd = td->io_ops->data; + + fio_ro_check(td, io_u); + + if (!fio_vsyncio_append(td, io_u)) { + dprint(FD_IO, "vsyncio_queue: no append (%d)\n", sd->queued); + /* + * If we can't append and have stuff queued, tell fio to + * commit those first and then retry this io + */ + if (sd->queued) + return FIO_Q_BUSY; + if (io_u->ddir == DDIR_SYNC) { + int ret = fsync(io_u->file->fd); + + return fio_io_end(td, io_u, ret); + } else if (io_u->ddir == DDIR_DATASYNC) { + int ret = fdatasync(io_u->file->fd); + + return fio_io_end(td, io_u, ret); + } + + sd->queued = 0; + sd->queued_bytes = 0; + fio_vsyncio_set_iov(sd, io_u, 0); + } else { + if (sd->queued == td->o.iodepth) { + dprint(FD_IO, "vsyncio_queue: max depth %d\n", sd->queued); + return FIO_Q_BUSY; + } + + dprint(FD_IO, "vsyncio_queue: append\n"); + fio_vsyncio_set_iov(sd, io_u, sd->queued); + } + + dprint(FD_IO, "vsyncio_queue: depth now %d\n", sd->queued); + return FIO_Q_QUEUED; +} + +/* + * Check that we transferred all bytes, or saw an error, etc + */ +static int fio_vsyncio_end(struct thread_data *td, ssize_t bytes) +{ + struct syncio_data *sd = td->io_ops->data; + struct io_u *io_u; + unsigned int i; + int err; + + /* + * transferred everything, perfect + */ + if (bytes == sd->queued_bytes) + return 0; + + err = errno; + for (i = 0; i < sd->queued; i++) { + io_u = sd->io_us[i]; + + if (bytes == -1) { + io_u->error = err; + } else { + unsigned int this_io; + + this_io = bytes; + if (this_io > io_u->xfer_buflen) + this_io = io_u->xfer_buflen; + + io_u->resid = io_u->xfer_buflen - this_io; + io_u->error = 0; + bytes -= this_io; + } } - if (!io_u->error) - sd->last_io_u = io_u; + if (bytes == -1) { + td_verror(td, err, "xfer vsync"); + return -err; + } - return io_u->error; + return 0; } -static void fio_syncio_cleanup(struct thread_data *td) +static int fio_vsyncio_commit(struct thread_data *td) { - if (td->io_ops->data) { - free(td->io_ops->data); - td->io_ops->data = NULL; + struct syncio_data *sd = td->io_ops->data; + struct fio_file *f; + ssize_t ret; + + if (!sd->queued) + return 0; + + io_u_mark_submit(td, sd->queued); + f = sd->last_file; + + if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) { + int err = -errno; + + td_verror(td, errno, "lseek"); + return err; } + + if (sd->last_ddir == DDIR_READ) + ret = readv(f->fd, sd->iovecs, sd->queued); + else + ret = writev(f->fd, sd->iovecs, sd->queued); + + dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret); + sd->events = sd->queued; + sd->queued = 0; + return fio_vsyncio_end(td, ret); } -static int fio_syncio_init(struct thread_data *td) +static int fio_vsyncio_init(struct thread_data *td) { - struct syncio_data *sd = malloc(sizeof(*sd)); + struct syncio_data *sd; + + sd = malloc(sizeof(*sd)); + memset(sd, 0, sizeof(*sd)); + sd->last_offset = -1ULL; + sd->iovecs = malloc(td->o.iodepth * sizeof(struct iovec)); + sd->io_us = malloc(td->o.iodepth * sizeof(struct io_u *)); - sd->last_io_u = NULL; td->io_ops->data = sd; return 0; } -static struct ioengine_ops ioengine = { +static void fio_vsyncio_cleanup(struct thread_data *td) +{ + struct syncio_data *sd = td->io_ops->data; + + free(sd->iovecs); + free(sd->io_us); + free(sd); +} + +static struct ioengine_ops ioengine_rw = { .name = "sync", .version = FIO_IOOPS_VERSION, - .init = fio_syncio_init, .prep = fio_syncio_prep, .queue = fio_syncio_queue, - .getevents = fio_syncio_getevents, - .event = fio_syncio_event, - .cleanup = fio_syncio_cleanup, + .open_file = generic_open_file, + .close_file = generic_close_file, + .get_file_size = generic_get_file_size, + .flags = FIO_SYNCIO, +}; + +static struct ioengine_ops ioengine_prw = { + .name = "psync", + .version = FIO_IOOPS_VERSION, + .queue = fio_psyncio_queue, + .open_file = generic_open_file, + .close_file = generic_close_file, + .get_file_size = generic_get_file_size, + .flags = FIO_SYNCIO, +}; + +static struct ioengine_ops ioengine_vrw = { + .name = "vsync", + .version = FIO_IOOPS_VERSION, + .init = fio_vsyncio_init, + .cleanup = fio_vsyncio_cleanup, + .queue = fio_vsyncio_queue, + .commit = fio_vsyncio_commit, + .event = fio_vsyncio_event, + .getevents = fio_vsyncio_getevents, + .open_file = generic_open_file, + .close_file = generic_close_file, + .get_file_size = generic_get_file_size, .flags = FIO_SYNCIO, }; static void fio_init fio_syncio_register(void) { - register_ioengine(&ioengine); + register_ioengine(&ioengine_rw); + register_ioengine(&ioengine_prw); + register_ioengine(&ioengine_vrw); } static void fio_exit fio_syncio_unregister(void) { - unregister_ioengine(&ioengine); + unregister_ioengine(&ioengine_rw); + unregister_ioengine(&ioengine_prw); + unregister_ioengine(&ioengine_vrw); }