OBJS = gettime.o fio.o ioengines.o init.o stat.o log.o time.o md5.o crc32.o \
filesetup.o eta.o verify.o memory.o io_u.o parse.o
-OBJS += engines/fio-engine-cpu.o
-OBJS += engines/fio-engine-libaio.o
-OBJS += engines/fio-engine-mmap.o
-OBJS += engines/fio-engine-posixaio.o
-OBJS += engines/fio-engine-sg.o
-OBJS += engines/fio-engine-splice.o
-OBJS += engines/fio-engine-sync.o
-OBJS += engines/fio-engine-null.o
+OBJS += engines/cpu.o
+OBJS += engines/libaio.o
+OBJS += engines/mmap.o
+OBJS += engines/posixaio.o
+OBJS += engines/sg.o
+OBJS += engines/splice.o
+OBJS += engines/sync.o
+OBJS += engines/null.o
INSTALL = install
prefix = /usr/local
OBJS = gettime.o fio.o ioengines.o init.o stat.o log.o time.o md5.o crc32.o \
filesetup.o eta.o verify.o memory.o io_u.o parse.o
-OBJS += engines/fio-engine-cpu.o
-OBJS += engines/fio-engine-mmap.o
-OBJS += engines/fio-engine-posixaio.o
-OBJS += engines/fio-engine-sync.o
-OBJS += engines/fio-engine-null.o
+OBJS += engines/cpu.o
+OBJS += engines/mmap.o
+OBJS += engines/posixaio.o
+OBJS += engines/sync.o
+OBJS += engines/null.o
all: depend $(PROGS) $(SCRIPTS)
OBJS = gettime.o fio.o ioengines.o init.o stat.o log.o time.o md5.o crc32.o \
filesetup.o eta.o verify.o memory.o io_u.o parse.o
-OBJS += engines/fio-engine-cpu.o
-OBJS += engines/fio-engine-mmap.o
-OBJS += engines/fio-engine-posixaio.o
-OBJS += engines/fio-engine-sync.o
-OBJS += engines/fio-engine-null.o
+OBJS += engines/cpu.o
+OBJS += engines/mmap.o
+OBJS += engines/posixaio.o
+OBJS += engines/sync.o
+OBJS += engines/null.o
all: depend $(PROGS) $(SCRIPTS)
--- /dev/null
+#include "../fio.h"
+#include "../os.h"
+
+static int fio_cpuio_setup(struct thread_data fio_unused *td)
+{
+ return 0;
+}
+
+static int fio_cpuio_init(struct thread_data *td)
+{
+ if (!td->cpuload) {
+ td_vmsg(td, EINVAL, "cpu thread needs rate");
+ return 1;
+ } else if (td->cpuload > 100)
+ td->cpuload = 100;
+
+ td->nr_files = 0;
+
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "cpuio",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_cpuio_init,
+ .setup = fio_cpuio_setup,
+ .flags = FIO_CPUIO,
+};
+
+static void fio_init fio_cpuio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_cpuio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
+++ /dev/null
-#include "../fio.h"
-#include "../os.h"
-
-static int fio_cpuio_setup(struct thread_data fio_unused *td)
-{
- return 0;
-}
-
-static int fio_cpuio_init(struct thread_data *td)
-{
- if (!td->cpuload) {
- td_vmsg(td, EINVAL, "cpu thread needs rate");
- return 1;
- } else if (td->cpuload > 100)
- td->cpuload = 100;
-
- td->nr_files = 0;
-
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "cpuio",
- .version = FIO_IOOPS_VERSION,
- .init = fio_cpuio_init,
- .setup = fio_cpuio_setup,
- .flags = FIO_CPUIO,
-};
-
-static void fio_init fio_cpuio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_cpuio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * native linux aio io engine
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-#ifdef FIO_HAVE_LIBAIO
-
-#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
-
-struct libaio_data {
- io_context_t aio_ctx;
- struct io_event *aio_events;
-};
-
-static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
-{
- struct fio_file *f = io_u->file;
-
- if (io_u->ddir == DDIR_READ)
- io_prep_pread(&io_u->iocb, f->fd, io_u->buf, io_u->buflen, io_u->offset);
- else if (io_u->ddir == DDIR_WRITE)
- io_prep_pwrite(&io_u->iocb, f->fd, io_u->buf, io_u->buflen, io_u->offset);
- else if (io_u->ddir == DDIR_SYNC)
- io_prep_fsync(&io_u->iocb, f->fd);
- else
- return 1;
-
- return 0;
-}
-
-static struct io_u *fio_libaio_event(struct thread_data *td, int event)
-{
- struct libaio_data *ld = td->io_ops->data;
-
- return ev_to_iou(ld->aio_events + event);
-}
-
-static int fio_libaio_getevents(struct thread_data *td, int min, int max,
- struct timespec *t)
-{
- struct libaio_data *ld = td->io_ops->data;
- long r;
-
- do {
- r = io_getevents(ld->aio_ctx, min, max, ld->aio_events, t);
- if (r >= min)
- break;
- else if (r == -EAGAIN) {
- usleep(100);
- continue;
- } else if (r == -EINTR)
- continue;
- else if (r != 0)
- break;
- } while (1);
-
- if (r < 0)
- r = -r;
-
- return (int) r;
-}
-
-static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
-{
- struct libaio_data *ld = td->io_ops->data;
- struct iocb *iocb = &io_u->iocb;
- long ret;
-
- do {
- ret = io_submit(ld->aio_ctx, 1, &iocb);
- if (ret == 1)
- return 0;
- else if (ret == -EAGAIN || !ret)
- usleep(100);
- else if (ret == -EINTR)
- continue;
- else
- break;
- } while (1);
-
- if (ret <= 0) {
- io_u->resid = io_u->buflen;
- io_u->error = -ret;
- return 1;
- }
-
- return 0;
-}
-
-static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
-{
- struct libaio_data *ld = td->io_ops->data;
-
- return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
-}
-
-static void fio_libaio_cleanup(struct thread_data *td)
-{
- struct libaio_data *ld = td->io_ops->data;
-
- if (ld) {
- io_destroy(ld->aio_ctx);
- if (ld->aio_events)
- free(ld->aio_events);
-
- free(ld);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_libaio_init(struct thread_data *td)
-{
- struct libaio_data *ld = malloc(sizeof(*ld));
-
- memset(ld, 0, sizeof(*ld));
- if (io_queue_init(td->iodepth, &ld->aio_ctx)) {
- td_verror(td, errno);
- free(ld);
- return 1;
- }
-
- ld->aio_events = malloc(td->iodepth * sizeof(struct io_event));
- memset(ld->aio_events, 0, td->iodepth * sizeof(struct io_event));
- td->io_ops->data = ld;
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "libaio",
- .version = FIO_IOOPS_VERSION,
- .init = fio_libaio_init,
- .prep = fio_libaio_prep,
- .queue = fio_libaio_queue,
- .cancel = fio_libaio_cancel,
- .getevents = fio_libaio_getevents,
- .event = fio_libaio_event,
- .cleanup = fio_libaio_cleanup,
-};
-
-#else /* FIO_HAVE_LIBAIO */
-
-/*
- * When we have a proper configure system in place, we simply wont build
- * and install this io engine. For now install a crippled version that
- * just complains and fails to load.
- */
-static int fio_libaio_init(struct thread_data fio_unused *td)
-{
- fprintf(stderr, "fio: libaio not available\n");
- return 1;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "libaio",
- .version = FIO_IOOPS_VERSION,
- .init = fio_libaio_init,
-};
-
-#endif
-
-static void fio_init fio_libaio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_libaio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * regular read/write sync io engine
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-#include <sys/mman.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-struct mmapio_data {
- struct io_u *last_io_u;
-};
-
-static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- /*
- * we can only have one finished io_u for sync io, since the depth
- * is always 1
- */
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
-{
- struct mmapio_data *sd = td->io_ops->data;
-
- assert(event == 0);
-
- return sd->last_io_u;
-}
-
-
-static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
-{
- struct fio_file *f = io_u->file;
- unsigned long long real_off = io_u->offset - f->file_offset;
- struct mmapio_data *sd = td->io_ops->data;
-
- if (io_u->ddir == DDIR_READ)
- memcpy(io_u->buf, f->mmap + real_off, io_u->buflen);
- else if (io_u->ddir == DDIR_WRITE)
- memcpy(f->mmap + real_off, io_u->buf, io_u->buflen);
- else if (io_u->ddir == DDIR_SYNC) {
- if (msync(f->mmap, f->file_size, MS_SYNC))
- io_u->error = errno;
- }
-
- /*
- * not really direct, but should drop the pages from the cache
- */
- if (td->odirect && io_u->ddir != DDIR_SYNC) {
- if (msync(f->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
- io_u->error = errno;
- if (madvise(f->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
- io_u->error = errno;
- }
-
- if (!io_u->error)
- sd->last_io_u = io_u;
-
- return io_u->error;
-}
-
-static void fio_mmapio_cleanup(struct thread_data *td)
-{
- if (td->io_ops->data) {
- free(td->io_ops->data);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_mmapio_init(struct thread_data *td)
-{
- struct mmapio_data *sd = malloc(sizeof(*sd));
-
- sd->last_io_u = NULL;
- td->io_ops->data = sd;
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "mmap",
- .version = FIO_IOOPS_VERSION,
- .init = fio_mmapio_init,
- .queue = fio_mmapio_queue,
- .getevents = fio_mmapio_getevents,
- .event = fio_mmapio_event,
- .cleanup = fio_mmapio_cleanup,
- .flags = FIO_SYNCIO | FIO_MMAPIO,
-};
-
-static void fio_init fio_mmapio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_mmapio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * null engine - doesn't do any transfers. Used to test fio.
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-struct null_data {
- struct io_u *last_io_u;
-};
-
-static int fio_null_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-static struct io_u *fio_null_event(struct thread_data *td, int event)
-{
- struct null_data *nd = td->io_ops->data;
-
- assert(event == 0);
-
- return nd->last_io_u;
-}
-
-static int fio_null_queue(struct thread_data *td, struct io_u *io_u)
-{
- struct null_data *nd = td->io_ops->data;
-
- io_u->resid = 0;
- io_u->error = 0;
- nd->last_io_u = io_u;
- return 0;
-}
-
-static void fio_null_cleanup(struct thread_data *td)
-{
- if (td->io_ops->data) {
- free(td->io_ops->data);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_null_init(struct thread_data *td)
-{
- struct null_data *nd = malloc(sizeof(*nd));
-
- nd->last_io_u = NULL;
- td->io_ops->data = nd;
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "null",
- .version = FIO_IOOPS_VERSION,
- .init = fio_null_init,
- .queue = fio_null_queue,
- .getevents = fio_null_getevents,
- .event = fio_null_event,
- .cleanup = fio_null_cleanup,
- .flags = FIO_SYNCIO,
-};
-
-static void fio_init fio_null_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_null_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * posix aio io engine
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-#ifdef FIO_HAVE_POSIXAIO
-
-struct posixaio_data {
- struct io_u **aio_events;
-};
-
-static int fill_timespec(struct timespec *ts)
-{
-#ifdef _POSIX_TIMERS
- if (!clock_gettime(CLOCK_MONOTONIC, ts))
- return 0;
-
- perror("clock_gettime");
-#endif
- return 1;
-}
-
-static unsigned long long ts_utime_since_now(struct timespec *t)
-{
- long long sec, nsec;
- struct timespec now;
-
- if (fill_timespec(&now))
- return 0;
-
- sec = now.tv_sec - t->tv_sec;
- nsec = now.tv_nsec - t->tv_nsec;
- if (sec > 0 && nsec < 0) {
- sec--;
- nsec += 1000000000;
- }
-
- sec *= 1000000;
- nsec /= 1000;
- return sec + nsec;
-}
-
-static int fio_posixaio_cancel(struct thread_data fio_unused *td,
- struct io_u *io_u)
-{
- struct fio_file *f = io_u->file;
- int r = aio_cancel(f->fd, &io_u->aiocb);
-
- if (r == 1 || r == AIO_CANCELED)
- return 0;
-
- return 1;
-}
-
-static int fio_posixaio_prep(struct thread_data fio_unused *td,
- struct io_u *io_u)
-{
- struct aiocb *aiocb = &io_u->aiocb;
- struct fio_file *f = io_u->file;
-
- aiocb->aio_fildes = f->fd;
- aiocb->aio_buf = io_u->buf;
- aiocb->aio_nbytes = io_u->buflen;
- aiocb->aio_offset = io_u->offset;
-
- io_u->seen = 0;
- return 0;
-}
-
-static int fio_posixaio_getevents(struct thread_data *td, int min, int max,
- struct timespec *t)
-{
- struct posixaio_data *pd = td->io_ops->data;
- struct list_head *entry;
- struct timespec start;
- int r, have_timeout = 0;
-
- if (t && !fill_timespec(&start))
- have_timeout = 1;
-
- r = 0;
-restart:
- list_for_each(entry, &td->io_u_busylist) {
- struct io_u *io_u = list_entry(entry, struct io_u, list);
- int err;
-
- if (io_u->seen)
- continue;
-
- err = aio_error(&io_u->aiocb);
- switch (err) {
- default:
- io_u->error = err;
- case ECANCELED:
- case 0:
- pd->aio_events[r++] = io_u;
- io_u->seen = 1;
- break;
- case EINPROGRESS:
- break;
- }
-
- if (r >= max)
- break;
- }
-
- if (r >= min)
- return r;
-
- if (have_timeout) {
- unsigned long long usec;
-
- usec = (t->tv_sec * 1000000) + (t->tv_nsec / 1000);
- if (ts_utime_since_now(&start) > usec)
- return r;
- }
-
- /*
- * hrmpf, we need to wait for more. we should use aio_suspend, for
- * now just sleep a little and recheck status of busy-and-not-seen
- */
- usleep(1000);
- goto restart;
-}
-
-static struct io_u *fio_posixaio_event(struct thread_data *td, int event)
-{
- struct posixaio_data *pd = td->io_ops->data;
-
- return pd->aio_events[event];
-}
-
-static int fio_posixaio_queue(struct thread_data fio_unused *td,
- struct io_u *io_u)
-{
- struct aiocb *aiocb = &io_u->aiocb;
- int ret;
-
- if (io_u->ddir == DDIR_READ)
- ret = aio_read(aiocb);
- else if (io_u->ddir == DDIR_WRITE)
- ret = aio_write(aiocb);
- else
- ret = aio_fsync(O_SYNC, aiocb);
-
- if (ret)
- io_u->error = errno;
-
- return io_u->error;
-}
-
-static void fio_posixaio_cleanup(struct thread_data *td)
-{
- struct posixaio_data *pd = td->io_ops->data;
-
- if (pd) {
- free(pd->aio_events);
- free(pd);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_posixaio_init(struct thread_data *td)
-{
- struct posixaio_data *pd = malloc(sizeof(*pd));
-
- memset(pd, 0, sizeof(*pd));
- pd->aio_events = malloc(td->iodepth * sizeof(struct io_u *));
- memset(pd->aio_events, 0, td->iodepth * sizeof(struct io_u *));
-
- td->io_ops->data = pd;
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "posixaio",
- .version = FIO_IOOPS_VERSION,
- .init = fio_posixaio_init,
- .prep = fio_posixaio_prep,
- .queue = fio_posixaio_queue,
- .cancel = fio_posixaio_cancel,
- .getevents = fio_posixaio_getevents,
- .event = fio_posixaio_event,
- .cleanup = fio_posixaio_cleanup,
-};
-
-#else /* FIO_HAVE_POSIXAIO */
-
-/*
- * When we have a proper configure system in place, we simply wont build
- * and install this io engine. For now install a crippled version that
- * just complains and fails to load.
- */
-static int fio_posixaio_init(struct thread_data fio_unused *td)
-{
- fprintf(stderr, "fio: posixaio not available\n");
- return 1;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "posixaio",
- .version = FIO_IOOPS_VERSION,
- .init = fio_posixaio_init,
-};
-
-#endif
-
-static void fio_init fio_posixaio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_posixaio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * scsi generic sg v3 io engine
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-#include <sys/poll.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-#ifdef FIO_HAVE_SGIO
-
-struct sgio_cmd {
- unsigned char cdb[10];
- int nr;
-};
-
-struct sgio_data {
- struct sgio_cmd *cmds;
- struct io_u **events;
- unsigned int bs;
-};
-
-static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
- struct io_u *io_u, int fs)
-{
- struct sgio_cmd *sc = &sd->cmds[io_u->index];
-
- memset(hdr, 0, sizeof(*hdr));
- memset(sc->cdb, 0, sizeof(sc->cdb));
-
- hdr->interface_id = 'S';
- hdr->cmdp = sc->cdb;
- hdr->cmd_len = sizeof(sc->cdb);
- hdr->pack_id = io_u->index;
- hdr->usr_ptr = io_u;
-
- if (fs) {
- hdr->dxferp = io_u->buf;
- hdr->dxfer_len = io_u->buflen;
- }
-}
-
-static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- /*
- * we can only have one finished io_u for sync io, since the depth
- * is always 1
- */
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-
-static int fio_sgio_getevents(struct thread_data *td, int min, int max,
- struct timespec fio_unused *t)
-{
- struct fio_file *f = &td->files[0];
- struct sgio_data *sd = td->io_ops->data;
- struct pollfd pfd = { .fd = f->fd, .events = POLLIN };
- void *buf = malloc(max * sizeof(struct sg_io_hdr));
- int left = max, ret, events, i, r = 0, fl = 0;
-
- /*
- * don't block for !events
- */
- if (!min) {
- fl = fcntl(f->fd, F_GETFL);
- fcntl(f->fd, F_SETFL, fl | O_NONBLOCK);
- }
-
- while (left) {
- do {
- if (!min)
- break;
- poll(&pfd, 1, -1);
- if (pfd.revents & POLLIN)
- break;
- } while (1);
-
- ret = read(f->fd, buf, left * sizeof(struct sg_io_hdr));
- if (ret < 0) {
- if (errno == EAGAIN)
- break;
- td_verror(td, errno);
- r = -1;
- break;
- } else if (!ret)
- break;
-
- events = ret / sizeof(struct sg_io_hdr);
- left -= events;
- r += events;
-
- for (i = 0; i < events; i++) {
- struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
-
- sd->events[i] = hdr->usr_ptr;
- }
- }
-
- if (!min)
- fcntl(f->fd, F_SETFL, fl);
-
- free(buf);
- return r;
-}
-
-static int fio_sgio_ioctl_doio(struct thread_data *td,
- struct fio_file *f, struct io_u *io_u)
-{
- struct sgio_data *sd = td->io_ops->data;
- struct sg_io_hdr *hdr = &io_u->hdr;
-
- sd->events[0] = io_u;
-
- return ioctl(f->fd, SG_IO, hdr);
-}
-
-static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
-{
- struct sg_io_hdr *hdr = &io_u->hdr;
- int ret;
-
- ret = write(f->fd, hdr, sizeof(*hdr));
- if (ret < 0)
- return errno;
-
- if (sync) {
- ret = read(f->fd, hdr, sizeof(*hdr));
- if (ret < 0)
- return errno;
- }
-
- return 0;
-}
-
-static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
-{
- struct fio_file *f = io_u->file;
-
- if (td->filetype == FIO_TYPE_BD)
- return fio_sgio_ioctl_doio(td, f, io_u);
-
- return fio_sgio_rw_doio(f, io_u, sync);
-}
-
-static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
-{
- struct sg_io_hdr *hdr = &io_u->hdr;
- struct sgio_data *sd = td->io_ops->data;
- int nr_blocks, lba;
-
- if (io_u->buflen & (sd->bs - 1)) {
- log_err("read/write not sector aligned\n");
- return EINVAL;
- }
-
- if (io_u->ddir == DDIR_READ) {
- sgio_hdr_init(sd, hdr, io_u, 1);
-
- hdr->dxfer_direction = SG_DXFER_FROM_DEV;
- hdr->cmdp[0] = 0x28;
- } else if (io_u->ddir == DDIR_WRITE) {
- sgio_hdr_init(sd, hdr, io_u, 1);
-
- hdr->dxfer_direction = SG_DXFER_TO_DEV;
- hdr->cmdp[0] = 0x2a;
- } else {
- sgio_hdr_init(sd, hdr, io_u, 0);
-
- hdr->dxfer_direction = SG_DXFER_NONE;
- hdr->cmdp[0] = 0x35;
- }
-
- if (hdr->dxfer_direction != SG_DXFER_NONE) {
- nr_blocks = io_u->buflen / sd->bs;
- lba = io_u->offset / sd->bs;
- hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
- hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
- hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
- hdr->cmdp[5] = (unsigned char) (lba & 0xff);
- hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
- hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
- }
-
- return 0;
-}
-
-static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
-{
- struct sg_io_hdr *hdr = &io_u->hdr;
- int ret;
-
- ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
-
- if (ret < 0)
- io_u->error = errno;
- else if (hdr->status) {
- io_u->resid = hdr->resid;
- io_u->error = EIO;
- }
-
- return io_u->error;
-}
-
-static struct io_u *fio_sgio_event(struct thread_data *td, int event)
-{
- struct sgio_data *sd = td->io_ops->data;
-
- return sd->events[event];
-}
-
-static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
-{
- struct sgio_data *sd = td->io_ops->data;
- struct io_u *io_u;
- struct sg_io_hdr *hdr;
- unsigned char buf[8];
- int ret;
-
- io_u = __get_io_u(td);
- assert(io_u);
-
- hdr = &io_u->hdr;
- sgio_hdr_init(sd, hdr, io_u, 0);
- memset(buf, 0, sizeof(buf));
-
- hdr->cmdp[0] = 0x25;
- hdr->dxfer_direction = SG_DXFER_FROM_DEV;
- hdr->dxferp = buf;
- hdr->dxfer_len = sizeof(buf);
-
- ret = fio_sgio_doio(td, io_u, 1);
- if (ret) {
- put_io_u(td, io_u);
- return ret;
- }
-
- *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
- put_io_u(td, io_u);
- return 0;
-}
-
-static void fio_sgio_cleanup(struct thread_data *td)
-{
- if (td->io_ops->data) {
- free(td->io_ops->data);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_sgio_init(struct thread_data *td)
-{
- struct fio_file *f = &td->files[0];
- struct sgio_data *sd;
- unsigned int bs;
- int ret;
-
- sd = malloc(sizeof(*sd));
- memset(sd, 0, sizeof(*sd));
- sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
- memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
- sd->events = malloc(td->iodepth * sizeof(struct io_u *));
- memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
- td->io_ops->data = sd;
-
- if (td->filetype == FIO_TYPE_BD) {
- if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
- td_verror(td, errno);
- goto err;
- }
- } else if (td->filetype == FIO_TYPE_CHAR) {
- int version;
-
- if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
- td_verror(td, errno);
- goto err;
- }
-
- ret = fio_sgio_get_bs(td, &bs);
- if (ret)
- goto err;
- } else {
- log_err("ioengine sgio only works on block devices\n");
- goto err;
- }
-
- sd->bs = bs;
-
- if (td->filetype == FIO_TYPE_BD)
- td->io_ops->getevents = fio_sgio_ioctl_getevents;
- else
- td->io_ops->getevents = fio_sgio_getevents;
-
- /*
- * we want to do it, regardless of whether odirect is set or not
- */
- td->override_sync = 1;
- return 0;
-err:
- free(sd->events);
- free(sd->cmds);
- free(sd);
- td->io_ops->data = NULL;
- return 1;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "sg",
- .version = FIO_IOOPS_VERSION,
- .init = fio_sgio_init,
- .prep = fio_sgio_prep,
- .queue = fio_sgio_queue,
- .getevents = fio_sgio_getevents,
- .event = fio_sgio_event,
- .cleanup = fio_sgio_cleanup,
- .flags = FIO_SYNCIO | FIO_RAWIO,
-};
-
-#else /* FIO_HAVE_SGIO */
-
-/*
- * When we have a proper configure system in place, we simply wont build
- * and install this io engine. For now install a crippled version that
- * just complains and fails to load.
- */
-static int fio_sgio_init(struct thread_data fio_unused *td)
-{
- fprintf(stderr, "fio: sgio not available\n");
- return 1;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "sgio",
- .version = FIO_IOOPS_VERSION,
- .init = fio_sgio_init,
-};
-
-#endif
-
-static void fio_init fio_sgio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_sgio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * Skeleton for a sample external io engine
- *
- * Should be compiled with:
- *
- * gcc -Wall -O2 -g -shared -rdynamic -fPIC -o engine.o engine.c
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-/*
- * The core of the module is identical to the ones included with fio,
- * read those. You cannot use register_ioengine() and unregister_ioengine()
- * for external modules, they should be gotten through dlsym()
- */
-
-/*
- * The ->event() hook is called to match an event number with an io_u.
- * After the core has called ->getevents() and it has returned eg 3,
- * the ->event() hook must return the 3 events that have completed for
- * subsequent calls to ->event() with [0-2]. Required.
- */
-static struct io_u *fio_skeleton_event(struct thread_data *td, int event)
-{
- return NULL;
-}
-
-/*
- * The ->getevents() hook is used to reap completion events from an async
- * io engine. It returns the number of completed events since the last call,
- * which may then be retrieved by calling the ->event() hook with the event
- * numbers. Required.
- */
-static int fio_skeleton_getevents(struct thread_data *td, int min, int max,
- struct timespec *t)
-{
- return 0;
-}
-
-/*
- * The ->cancel() hook attempts to cancel the io_u. Only relevant for
- * async io engines, and need not be supported.
- */
-static int fio_skeleton_cancel(struct thread_data *td, struct io_u *io_u)
-{
- return 0;
-}
-
-/*
- * The ->queue() hook is responsible for initiating io on the io_u
- * being passed in. If the io engine is a synchronous one, io may complete
- * before ->queue() returns. Required.
- */
-static int fio_skeleton_queue(struct thread_data *td, struct io_u *io_u)
-{
- return 0;
-}
-
-/*
- * The ->prep() function is called for each io_u prior to being submitted
- * with ->queue(). This hook allows the io engine to perform any
- * preperatory actions on the io_u, before being submitted. Not required.
- */
-static int fio_skeleton_prep(struct thread_data *td, struct io_u *io_u)
-{
- return 0;
-}
-
-/*
- * The init function is called once per thread/process, and should set up
- * any structures that this io engine requires to keep track of io. Not
- * required.
- */
-static int fio_skeleton_init(struct thread_data *td)
-{
- return 0;
-}
-
-/*
- * This is paired with the ->init() funtion and is called when a thread is
- * done doing io. Should tear down anything setup by the ->init() function.
- * Not required.
- */
-static void fio_skeleton_cleanup(struct thread_data *td)
-{
-}
-
-/*
- * Note that the structure is exported, so that fio can get it via
- * dlsym(..., "ioengine");
- */
-struct ioengine_ops ioengine = {
- .name = "engine_name",
- .version = FIO_IOOPS_VERSION,
- .init = fio_skeleton_init,
- .prep = fio_skeleton_prep,
- .queue = fio_skeleton_queue,
- .cancel = fio_skeleton_cancel,
- .getevents = fio_skeleton_getevents,
- .event = fio_skeleton_event,
- .cleanup = fio_skeleton_cleanup,
-};
+++ /dev/null
-/*
- * splice io engine
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-#include <sys/poll.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-#ifdef FIO_HAVE_SPLICE
-
-struct spliceio_data {
- struct io_u *last_io_u;
- int pipe[2];
-};
-
-static int fio_spliceio_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- /*
- * we can only have one finished io_u for sync io, since the depth
- * is always 1
- */
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-static struct io_u *fio_spliceio_event(struct thread_data *td, int event)
-{
- struct spliceio_data *sd = td->io_ops->data;
-
- assert(event == 0);
-
- return sd->last_io_u;
-}
-
-/*
- * For splice reading, we unfortunately cannot (yet) vmsplice the other way.
- * So just splice the data from the file into the pipe, and use regular
- * read to fill the buffer. Doesn't make a lot of sense, but...
- */
-static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
-{
- struct spliceio_data *sd = td->io_ops->data;
- struct fio_file *f = io_u->file;
- int ret, ret2, buflen;
- off_t offset;
- void *p;
-
- offset = io_u->offset;
- buflen = io_u->buflen;
- p = io_u->buf;
- while (buflen) {
- int this_len = buflen;
-
- if (this_len > SPLICE_DEF_SIZE)
- this_len = SPLICE_DEF_SIZE;
-
- ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
- if (ret < 0) {
- if (errno == ENODATA || errno == EAGAIN)
- continue;
-
- return errno;
- }
-
- buflen -= ret;
-
- while (ret) {
- ret2 = read(sd->pipe[0], p, ret);
- if (ret2 < 0)
- return errno;
-
- ret -= ret2;
- p += ret2;
- }
- }
-
- return io_u->buflen;
-}
-
-/*
- * For splice writing, we can vmsplice our data buffer directly into a
- * pipe and then splice that to a file.
- */
-static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
-{
- struct spliceio_data *sd = td->io_ops->data;
- struct iovec iov[1] = {
- {
- .iov_base = io_u->buf,
- .iov_len = io_u->buflen,
- }
- };
- struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
- struct fio_file *f = io_u->file;
- off_t off = io_u->offset;
- int ret, ret2;
-
- while (iov[0].iov_len) {
- if (poll(&pfd, 1, -1) < 0)
- return errno;
-
- ret = vmsplice(sd->pipe[1], iov, 1, SPLICE_F_NONBLOCK);
- if (ret < 0)
- return errno;
-
- iov[0].iov_len -= ret;
- iov[0].iov_base += ret;
-
- while (ret) {
- ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
- if (ret2 < 0)
- return errno;
-
- ret -= ret2;
- }
- }
-
- return io_u->buflen;
-}
-
-static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
-{
- struct spliceio_data *sd = td->io_ops->data;
- unsigned int ret;
-
- if (io_u->ddir == DDIR_READ)
- ret = fio_splice_read(td, io_u);
- else if (io_u->ddir == DDIR_WRITE)
- ret = fio_splice_write(td, io_u);
- else
- ret = fsync(io_u->file->fd);
-
- if (ret != io_u->buflen) {
- if (ret > 0) {
- io_u->resid = io_u->buflen - ret;
- io_u->error = ENODATA;
- } else
- io_u->error = errno;
- }
-
- if (!io_u->error)
- sd->last_io_u = io_u;
-
- return io_u->error;
-}
-
-static void fio_spliceio_cleanup(struct thread_data *td)
-{
- struct spliceio_data *sd = td->io_ops->data;
-
- if (sd) {
- close(sd->pipe[0]);
- close(sd->pipe[1]);
- free(sd);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_spliceio_init(struct thread_data *td)
-{
- struct spliceio_data *sd = malloc(sizeof(*sd));
-
- sd->last_io_u = NULL;
- if (pipe(sd->pipe) < 0) {
- td_verror(td, errno);
- free(sd);
- return 1;
- }
-
- td->io_ops->data = sd;
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "splice",
- .version = FIO_IOOPS_VERSION,
- .init = fio_spliceio_init,
- .queue = fio_spliceio_queue,
- .getevents = fio_spliceio_getevents,
- .event = fio_spliceio_event,
- .cleanup = fio_spliceio_cleanup,
- .flags = FIO_SYNCIO,
-};
-
-#else /* FIO_HAVE_SPLICE */
-
-/*
- * When we have a proper configure system in place, we simply wont build
- * and install this io engine. For now install a crippled version that
- * just complains and fails to load.
- */
-static int fio_spliceio_init(struct thread_data fio_unused *td)
-{
- fprintf(stderr, "fio: splice not available\n");
- return 1;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "splice",
- .version = FIO_IOOPS_VERSION,
- .init = fio_spliceio_init,
-};
-
-#endif
-
-static void fio_init fio_spliceio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_spliceio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
+++ /dev/null
-/*
- * regular read/write sync io engine
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <errno.h>
-#include <assert.h>
-
-#include "../fio.h"
-#include "../os.h"
-
-struct syncio_data {
- struct io_u *last_io_u;
-};
-
-static int fio_syncio_getevents(struct thread_data *td, int fio_unused min,
- int max, struct timespec fio_unused *t)
-{
- assert(max <= 1);
-
- /*
- * we can only have one finished io_u for sync io, since the depth
- * is always 1
- */
- if (list_empty(&td->io_u_busylist))
- return 0;
-
- return 1;
-}
-
-static struct io_u *fio_syncio_event(struct thread_data *td, int event)
-{
- struct syncio_data *sd = td->io_ops->data;
-
- assert(event == 0);
-
- return sd->last_io_u;
-}
-
-static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
-{
- struct fio_file *f = io_u->file;
-
- if (io_u->ddir == DDIR_SYNC)
- return 0;
- if (io_u->offset == f->last_completed_pos)
- return 0;
-
- if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
- td_verror(td, errno);
- return 1;
- }
-
- return 0;
-}
-
-static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
-{
- struct syncio_data *sd = td->io_ops->data;
- struct fio_file *f = io_u->file;
- unsigned int ret;
-
- if (io_u->ddir == DDIR_READ)
- ret = read(f->fd, io_u->buf, io_u->buflen);
- else if (io_u->ddir == DDIR_WRITE)
- ret = write(f->fd, io_u->buf, io_u->buflen);
- else
- ret = fsync(f->fd);
-
- if (ret != io_u->buflen) {
- if (ret > 0) {
- io_u->resid = io_u->buflen - ret;
- io_u->error = EIO;
- } else
- io_u->error = errno;
- }
-
- if (!io_u->error)
- sd->last_io_u = io_u;
-
- return io_u->error;
-}
-
-static void fio_syncio_cleanup(struct thread_data *td)
-{
- if (td->io_ops->data) {
- free(td->io_ops->data);
- td->io_ops->data = NULL;
- }
-}
-
-static int fio_syncio_init(struct thread_data *td)
-{
- struct syncio_data *sd = malloc(sizeof(*sd));
-
- sd->last_io_u = NULL;
- td->io_ops->data = sd;
- return 0;
-}
-
-static struct ioengine_ops ioengine = {
- .name = "sync",
- .version = FIO_IOOPS_VERSION,
- .init = fio_syncio_init,
- .prep = fio_syncio_prep,
- .queue = fio_syncio_queue,
- .getevents = fio_syncio_getevents,
- .event = fio_syncio_event,
- .cleanup = fio_syncio_cleanup,
- .flags = FIO_SYNCIO,
-};
-
-static void fio_init fio_syncio_register(void)
-{
- register_ioengine(&ioengine);
-}
-
-static void fio_exit fio_syncio_unregister(void)
-{
- unregister_ioengine(&ioengine);
-}
--- /dev/null
+/*
+ * native linux aio io engine
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+#ifdef FIO_HAVE_LIBAIO
+
+#define ev_to_iou(ev) (struct io_u *) ((unsigned long) (ev)->obj)
+
+struct libaio_data {
+ io_context_t aio_ctx;
+ struct io_event *aio_events;
+};
+
+static int fio_libaio_prep(struct thread_data fio_unused *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+
+ if (io_u->ddir == DDIR_READ)
+ io_prep_pread(&io_u->iocb, f->fd, io_u->buf, io_u->buflen, io_u->offset);
+ else if (io_u->ddir == DDIR_WRITE)
+ io_prep_pwrite(&io_u->iocb, f->fd, io_u->buf, io_u->buflen, io_u->offset);
+ else if (io_u->ddir == DDIR_SYNC)
+ io_prep_fsync(&io_u->iocb, f->fd);
+ else
+ return 1;
+
+ return 0;
+}
+
+static struct io_u *fio_libaio_event(struct thread_data *td, int event)
+{
+ struct libaio_data *ld = td->io_ops->data;
+
+ return ev_to_iou(ld->aio_events + event);
+}
+
+static int fio_libaio_getevents(struct thread_data *td, int min, int max,
+ struct timespec *t)
+{
+ struct libaio_data *ld = td->io_ops->data;
+ long r;
+
+ do {
+ r = io_getevents(ld->aio_ctx, min, max, ld->aio_events, t);
+ if (r >= min)
+ break;
+ else if (r == -EAGAIN) {
+ usleep(100);
+ continue;
+ } else if (r == -EINTR)
+ continue;
+ else if (r != 0)
+ break;
+ } while (1);
+
+ if (r < 0)
+ r = -r;
+
+ return (int) r;
+}
+
+static int fio_libaio_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct libaio_data *ld = td->io_ops->data;
+ struct iocb *iocb = &io_u->iocb;
+ long ret;
+
+ do {
+ ret = io_submit(ld->aio_ctx, 1, &iocb);
+ if (ret == 1)
+ return 0;
+ else if (ret == -EAGAIN || !ret)
+ usleep(100);
+ else if (ret == -EINTR)
+ continue;
+ else
+ break;
+ } while (1);
+
+ if (ret <= 0) {
+ io_u->resid = io_u->buflen;
+ io_u->error = -ret;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int fio_libaio_cancel(struct thread_data *td, struct io_u *io_u)
+{
+ struct libaio_data *ld = td->io_ops->data;
+
+ return io_cancel(ld->aio_ctx, &io_u->iocb, ld->aio_events);
+}
+
+static void fio_libaio_cleanup(struct thread_data *td)
+{
+ struct libaio_data *ld = td->io_ops->data;
+
+ if (ld) {
+ io_destroy(ld->aio_ctx);
+ if (ld->aio_events)
+ free(ld->aio_events);
+
+ free(ld);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_libaio_init(struct thread_data *td)
+{
+ struct libaio_data *ld = malloc(sizeof(*ld));
+
+ memset(ld, 0, sizeof(*ld));
+ if (io_queue_init(td->iodepth, &ld->aio_ctx)) {
+ td_verror(td, errno);
+ free(ld);
+ return 1;
+ }
+
+ ld->aio_events = malloc(td->iodepth * sizeof(struct io_event));
+ memset(ld->aio_events, 0, td->iodepth * sizeof(struct io_event));
+ td->io_ops->data = ld;
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "libaio",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_libaio_init,
+ .prep = fio_libaio_prep,
+ .queue = fio_libaio_queue,
+ .cancel = fio_libaio_cancel,
+ .getevents = fio_libaio_getevents,
+ .event = fio_libaio_event,
+ .cleanup = fio_libaio_cleanup,
+};
+
+#else /* FIO_HAVE_LIBAIO */
+
+/*
+ * When we have a proper configure system in place, we simply wont build
+ * and install this io engine. For now install a crippled version that
+ * just complains and fails to load.
+ */
+static int fio_libaio_init(struct thread_data fio_unused *td)
+{
+ fprintf(stderr, "fio: libaio not available\n");
+ return 1;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "libaio",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_libaio_init,
+};
+
+#endif
+
+static void fio_init fio_libaio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_libaio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * regular read/write sync io engine
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/mman.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+struct mmapio_data {
+ struct io_u *last_io_u;
+};
+
+static int fio_mmapio_getevents(struct thread_data *td, int fio_unused min,
+ int max, struct timespec fio_unused *t)
+{
+ assert(max <= 1);
+
+ /*
+ * we can only have one finished io_u for sync io, since the depth
+ * is always 1
+ */
+ if (list_empty(&td->io_u_busylist))
+ return 0;
+
+ return 1;
+}
+
+static struct io_u *fio_mmapio_event(struct thread_data *td, int event)
+{
+ struct mmapio_data *sd = td->io_ops->data;
+
+ assert(event == 0);
+
+ return sd->last_io_u;
+}
+
+
+static int fio_mmapio_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ unsigned long long real_off = io_u->offset - f->file_offset;
+ struct mmapio_data *sd = td->io_ops->data;
+
+ if (io_u->ddir == DDIR_READ)
+ memcpy(io_u->buf, f->mmap + real_off, io_u->buflen);
+ else if (io_u->ddir == DDIR_WRITE)
+ memcpy(f->mmap + real_off, io_u->buf, io_u->buflen);
+ else if (io_u->ddir == DDIR_SYNC) {
+ if (msync(f->mmap, f->file_size, MS_SYNC))
+ io_u->error = errno;
+ }
+
+ /*
+ * not really direct, but should drop the pages from the cache
+ */
+ if (td->odirect && io_u->ddir != DDIR_SYNC) {
+ if (msync(f->mmap + real_off, io_u->buflen, MS_SYNC) < 0)
+ io_u->error = errno;
+ if (madvise(f->mmap + real_off, io_u->buflen, MADV_DONTNEED) < 0)
+ io_u->error = errno;
+ }
+
+ if (!io_u->error)
+ sd->last_io_u = io_u;
+
+ return io_u->error;
+}
+
+static void fio_mmapio_cleanup(struct thread_data *td)
+{
+ if (td->io_ops->data) {
+ free(td->io_ops->data);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_mmapio_init(struct thread_data *td)
+{
+ struct mmapio_data *sd = malloc(sizeof(*sd));
+
+ sd->last_io_u = NULL;
+ td->io_ops->data = sd;
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "mmap",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_mmapio_init,
+ .queue = fio_mmapio_queue,
+ .getevents = fio_mmapio_getevents,
+ .event = fio_mmapio_event,
+ .cleanup = fio_mmapio_cleanup,
+ .flags = FIO_SYNCIO | FIO_MMAPIO,
+};
+
+static void fio_init fio_mmapio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_mmapio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * null engine - doesn't do any transfers. Used to test fio.
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+struct null_data {
+ struct io_u *last_io_u;
+};
+
+static int fio_null_getevents(struct thread_data *td, int fio_unused min,
+ int max, struct timespec fio_unused *t)
+{
+ assert(max <= 1);
+
+ if (list_empty(&td->io_u_busylist))
+ return 0;
+
+ return 1;
+}
+
+static struct io_u *fio_null_event(struct thread_data *td, int event)
+{
+ struct null_data *nd = td->io_ops->data;
+
+ assert(event == 0);
+
+ return nd->last_io_u;
+}
+
+static int fio_null_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct null_data *nd = td->io_ops->data;
+
+ io_u->resid = 0;
+ io_u->error = 0;
+ nd->last_io_u = io_u;
+ return 0;
+}
+
+static void fio_null_cleanup(struct thread_data *td)
+{
+ if (td->io_ops->data) {
+ free(td->io_ops->data);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_null_init(struct thread_data *td)
+{
+ struct null_data *nd = malloc(sizeof(*nd));
+
+ nd->last_io_u = NULL;
+ td->io_ops->data = nd;
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "null",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_null_init,
+ .queue = fio_null_queue,
+ .getevents = fio_null_getevents,
+ .event = fio_null_event,
+ .cleanup = fio_null_cleanup,
+ .flags = FIO_SYNCIO,
+};
+
+static void fio_init fio_null_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_null_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * posix aio io engine
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+#ifdef FIO_HAVE_POSIXAIO
+
+struct posixaio_data {
+ struct io_u **aio_events;
+};
+
+static int fill_timespec(struct timespec *ts)
+{
+#ifdef _POSIX_TIMERS
+ if (!clock_gettime(CLOCK_MONOTONIC, ts))
+ return 0;
+
+ perror("clock_gettime");
+#endif
+ return 1;
+}
+
+static unsigned long long ts_utime_since_now(struct timespec *t)
+{
+ long long sec, nsec;
+ struct timespec now;
+
+ if (fill_timespec(&now))
+ return 0;
+
+ sec = now.tv_sec - t->tv_sec;
+ nsec = now.tv_nsec - t->tv_nsec;
+ if (sec > 0 && nsec < 0) {
+ sec--;
+ nsec += 1000000000;
+ }
+
+ sec *= 1000000;
+ nsec /= 1000;
+ return sec + nsec;
+}
+
+static int fio_posixaio_cancel(struct thread_data fio_unused *td,
+ struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ int r = aio_cancel(f->fd, &io_u->aiocb);
+
+ if (r == 1 || r == AIO_CANCELED)
+ return 0;
+
+ return 1;
+}
+
+static int fio_posixaio_prep(struct thread_data fio_unused *td,
+ struct io_u *io_u)
+{
+ struct aiocb *aiocb = &io_u->aiocb;
+ struct fio_file *f = io_u->file;
+
+ aiocb->aio_fildes = f->fd;
+ aiocb->aio_buf = io_u->buf;
+ aiocb->aio_nbytes = io_u->buflen;
+ aiocb->aio_offset = io_u->offset;
+
+ io_u->seen = 0;
+ return 0;
+}
+
+static int fio_posixaio_getevents(struct thread_data *td, int min, int max,
+ struct timespec *t)
+{
+ struct posixaio_data *pd = td->io_ops->data;
+ struct list_head *entry;
+ struct timespec start;
+ int r, have_timeout = 0;
+
+ if (t && !fill_timespec(&start))
+ have_timeout = 1;
+
+ r = 0;
+restart:
+ list_for_each(entry, &td->io_u_busylist) {
+ struct io_u *io_u = list_entry(entry, struct io_u, list);
+ int err;
+
+ if (io_u->seen)
+ continue;
+
+ err = aio_error(&io_u->aiocb);
+ switch (err) {
+ default:
+ io_u->error = err;
+ case ECANCELED:
+ case 0:
+ pd->aio_events[r++] = io_u;
+ io_u->seen = 1;
+ break;
+ case EINPROGRESS:
+ break;
+ }
+
+ if (r >= max)
+ break;
+ }
+
+ if (r >= min)
+ return r;
+
+ if (have_timeout) {
+ unsigned long long usec;
+
+ usec = (t->tv_sec * 1000000) + (t->tv_nsec / 1000);
+ if (ts_utime_since_now(&start) > usec)
+ return r;
+ }
+
+ /*
+ * hrmpf, we need to wait for more. we should use aio_suspend, for
+ * now just sleep a little and recheck status of busy-and-not-seen
+ */
+ usleep(1000);
+ goto restart;
+}
+
+static struct io_u *fio_posixaio_event(struct thread_data *td, int event)
+{
+ struct posixaio_data *pd = td->io_ops->data;
+
+ return pd->aio_events[event];
+}
+
+static int fio_posixaio_queue(struct thread_data fio_unused *td,
+ struct io_u *io_u)
+{
+ struct aiocb *aiocb = &io_u->aiocb;
+ int ret;
+
+ if (io_u->ddir == DDIR_READ)
+ ret = aio_read(aiocb);
+ else if (io_u->ddir == DDIR_WRITE)
+ ret = aio_write(aiocb);
+ else
+ ret = aio_fsync(O_SYNC, aiocb);
+
+ if (ret)
+ io_u->error = errno;
+
+ return io_u->error;
+}
+
+static void fio_posixaio_cleanup(struct thread_data *td)
+{
+ struct posixaio_data *pd = td->io_ops->data;
+
+ if (pd) {
+ free(pd->aio_events);
+ free(pd);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_posixaio_init(struct thread_data *td)
+{
+ struct posixaio_data *pd = malloc(sizeof(*pd));
+
+ memset(pd, 0, sizeof(*pd));
+ pd->aio_events = malloc(td->iodepth * sizeof(struct io_u *));
+ memset(pd->aio_events, 0, td->iodepth * sizeof(struct io_u *));
+
+ td->io_ops->data = pd;
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "posixaio",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_posixaio_init,
+ .prep = fio_posixaio_prep,
+ .queue = fio_posixaio_queue,
+ .cancel = fio_posixaio_cancel,
+ .getevents = fio_posixaio_getevents,
+ .event = fio_posixaio_event,
+ .cleanup = fio_posixaio_cleanup,
+};
+
+#else /* FIO_HAVE_POSIXAIO */
+
+/*
+ * When we have a proper configure system in place, we simply wont build
+ * and install this io engine. For now install a crippled version that
+ * just complains and fails to load.
+ */
+static int fio_posixaio_init(struct thread_data fio_unused *td)
+{
+ fprintf(stderr, "fio: posixaio not available\n");
+ return 1;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "posixaio",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_posixaio_init,
+};
+
+#endif
+
+static void fio_init fio_posixaio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_posixaio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * scsi generic sg v3 io engine
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/poll.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+#ifdef FIO_HAVE_SGIO
+
+struct sgio_cmd {
+ unsigned char cdb[10];
+ int nr;
+};
+
+struct sgio_data {
+ struct sgio_cmd *cmds;
+ struct io_u **events;
+ unsigned int bs;
+};
+
+static void sgio_hdr_init(struct sgio_data *sd, struct sg_io_hdr *hdr,
+ struct io_u *io_u, int fs)
+{
+ struct sgio_cmd *sc = &sd->cmds[io_u->index];
+
+ memset(hdr, 0, sizeof(*hdr));
+ memset(sc->cdb, 0, sizeof(sc->cdb));
+
+ hdr->interface_id = 'S';
+ hdr->cmdp = sc->cdb;
+ hdr->cmd_len = sizeof(sc->cdb);
+ hdr->pack_id = io_u->index;
+ hdr->usr_ptr = io_u;
+
+ if (fs) {
+ hdr->dxferp = io_u->buf;
+ hdr->dxfer_len = io_u->buflen;
+ }
+}
+
+static int fio_sgio_ioctl_getevents(struct thread_data *td, int fio_unused min,
+ int max, struct timespec fio_unused *t)
+{
+ assert(max <= 1);
+
+ /*
+ * we can only have one finished io_u for sync io, since the depth
+ * is always 1
+ */
+ if (list_empty(&td->io_u_busylist))
+ return 0;
+
+ return 1;
+}
+
+
+static int fio_sgio_getevents(struct thread_data *td, int min, int max,
+ struct timespec fio_unused *t)
+{
+ struct fio_file *f = &td->files[0];
+ struct sgio_data *sd = td->io_ops->data;
+ struct pollfd pfd = { .fd = f->fd, .events = POLLIN };
+ void *buf = malloc(max * sizeof(struct sg_io_hdr));
+ int left = max, ret, events, i, r = 0, fl = 0;
+
+ /*
+ * don't block for !events
+ */
+ if (!min) {
+ fl = fcntl(f->fd, F_GETFL);
+ fcntl(f->fd, F_SETFL, fl | O_NONBLOCK);
+ }
+
+ while (left) {
+ do {
+ if (!min)
+ break;
+ poll(&pfd, 1, -1);
+ if (pfd.revents & POLLIN)
+ break;
+ } while (1);
+
+ ret = read(f->fd, buf, left * sizeof(struct sg_io_hdr));
+ if (ret < 0) {
+ if (errno == EAGAIN)
+ break;
+ td_verror(td, errno);
+ r = -1;
+ break;
+ } else if (!ret)
+ break;
+
+ events = ret / sizeof(struct sg_io_hdr);
+ left -= events;
+ r += events;
+
+ for (i = 0; i < events; i++) {
+ struct sg_io_hdr *hdr = (struct sg_io_hdr *) buf + i;
+
+ sd->events[i] = hdr->usr_ptr;
+ }
+ }
+
+ if (!min)
+ fcntl(f->fd, F_SETFL, fl);
+
+ free(buf);
+ return r;
+}
+
+static int fio_sgio_ioctl_doio(struct thread_data *td,
+ struct fio_file *f, struct io_u *io_u)
+{
+ struct sgio_data *sd = td->io_ops->data;
+ struct sg_io_hdr *hdr = &io_u->hdr;
+
+ sd->events[0] = io_u;
+
+ return ioctl(f->fd, SG_IO, hdr);
+}
+
+static int fio_sgio_rw_doio(struct fio_file *f, struct io_u *io_u, int sync)
+{
+ struct sg_io_hdr *hdr = &io_u->hdr;
+ int ret;
+
+ ret = write(f->fd, hdr, sizeof(*hdr));
+ if (ret < 0)
+ return errno;
+
+ if (sync) {
+ ret = read(f->fd, hdr, sizeof(*hdr));
+ if (ret < 0)
+ return errno;
+ }
+
+ return 0;
+}
+
+static int fio_sgio_doio(struct thread_data *td, struct io_u *io_u, int sync)
+{
+ struct fio_file *f = io_u->file;
+
+ if (td->filetype == FIO_TYPE_BD)
+ return fio_sgio_ioctl_doio(td, f, io_u);
+
+ return fio_sgio_rw_doio(f, io_u, sync);
+}
+
+static int fio_sgio_prep(struct thread_data *td, struct io_u *io_u)
+{
+ struct sg_io_hdr *hdr = &io_u->hdr;
+ struct sgio_data *sd = td->io_ops->data;
+ int nr_blocks, lba;
+
+ if (io_u->buflen & (sd->bs - 1)) {
+ log_err("read/write not sector aligned\n");
+ return EINVAL;
+ }
+
+ if (io_u->ddir == DDIR_READ) {
+ sgio_hdr_init(sd, hdr, io_u, 1);
+
+ hdr->dxfer_direction = SG_DXFER_FROM_DEV;
+ hdr->cmdp[0] = 0x28;
+ } else if (io_u->ddir == DDIR_WRITE) {
+ sgio_hdr_init(sd, hdr, io_u, 1);
+
+ hdr->dxfer_direction = SG_DXFER_TO_DEV;
+ hdr->cmdp[0] = 0x2a;
+ } else {
+ sgio_hdr_init(sd, hdr, io_u, 0);
+
+ hdr->dxfer_direction = SG_DXFER_NONE;
+ hdr->cmdp[0] = 0x35;
+ }
+
+ if (hdr->dxfer_direction != SG_DXFER_NONE) {
+ nr_blocks = io_u->buflen / sd->bs;
+ lba = io_u->offset / sd->bs;
+ hdr->cmdp[2] = (unsigned char) ((lba >> 24) & 0xff);
+ hdr->cmdp[3] = (unsigned char) ((lba >> 16) & 0xff);
+ hdr->cmdp[4] = (unsigned char) ((lba >> 8) & 0xff);
+ hdr->cmdp[5] = (unsigned char) (lba & 0xff);
+ hdr->cmdp[7] = (unsigned char) ((nr_blocks >> 8) & 0xff);
+ hdr->cmdp[8] = (unsigned char) (nr_blocks & 0xff);
+ }
+
+ return 0;
+}
+
+static int fio_sgio_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct sg_io_hdr *hdr = &io_u->hdr;
+ int ret;
+
+ ret = fio_sgio_doio(td, io_u, io_u->ddir == DDIR_SYNC);
+
+ if (ret < 0)
+ io_u->error = errno;
+ else if (hdr->status) {
+ io_u->resid = hdr->resid;
+ io_u->error = EIO;
+ }
+
+ return io_u->error;
+}
+
+static struct io_u *fio_sgio_event(struct thread_data *td, int event)
+{
+ struct sgio_data *sd = td->io_ops->data;
+
+ return sd->events[event];
+}
+
+static int fio_sgio_get_bs(struct thread_data *td, unsigned int *bs)
+{
+ struct sgio_data *sd = td->io_ops->data;
+ struct io_u *io_u;
+ struct sg_io_hdr *hdr;
+ unsigned char buf[8];
+ int ret;
+
+ io_u = __get_io_u(td);
+ assert(io_u);
+
+ hdr = &io_u->hdr;
+ sgio_hdr_init(sd, hdr, io_u, 0);
+ memset(buf, 0, sizeof(buf));
+
+ hdr->cmdp[0] = 0x25;
+ hdr->dxfer_direction = SG_DXFER_FROM_DEV;
+ hdr->dxferp = buf;
+ hdr->dxfer_len = sizeof(buf);
+
+ ret = fio_sgio_doio(td, io_u, 1);
+ if (ret) {
+ put_io_u(td, io_u);
+ return ret;
+ }
+
+ *bs = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
+ put_io_u(td, io_u);
+ return 0;
+}
+
+static void fio_sgio_cleanup(struct thread_data *td)
+{
+ if (td->io_ops->data) {
+ free(td->io_ops->data);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_sgio_init(struct thread_data *td)
+{
+ struct fio_file *f = &td->files[0];
+ struct sgio_data *sd;
+ unsigned int bs;
+ int ret;
+
+ sd = malloc(sizeof(*sd));
+ memset(sd, 0, sizeof(*sd));
+ sd->cmds = malloc(td->iodepth * sizeof(struct sgio_cmd));
+ memset(sd->cmds, 0, td->iodepth * sizeof(struct sgio_cmd));
+ sd->events = malloc(td->iodepth * sizeof(struct io_u *));
+ memset(sd->events, 0, td->iodepth * sizeof(struct io_u *));
+ td->io_ops->data = sd;
+
+ if (td->filetype == FIO_TYPE_BD) {
+ if (ioctl(f->fd, BLKSSZGET, &bs) < 0) {
+ td_verror(td, errno);
+ goto err;
+ }
+ } else if (td->filetype == FIO_TYPE_CHAR) {
+ int version;
+
+ if (ioctl(f->fd, SG_GET_VERSION_NUM, &version) < 0) {
+ td_verror(td, errno);
+ goto err;
+ }
+
+ ret = fio_sgio_get_bs(td, &bs);
+ if (ret)
+ goto err;
+ } else {
+ log_err("ioengine sgio only works on block devices\n");
+ goto err;
+ }
+
+ sd->bs = bs;
+
+ if (td->filetype == FIO_TYPE_BD)
+ td->io_ops->getevents = fio_sgio_ioctl_getevents;
+ else
+ td->io_ops->getevents = fio_sgio_getevents;
+
+ /*
+ * we want to do it, regardless of whether odirect is set or not
+ */
+ td->override_sync = 1;
+ return 0;
+err:
+ free(sd->events);
+ free(sd->cmds);
+ free(sd);
+ td->io_ops->data = NULL;
+ return 1;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "sg",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_sgio_init,
+ .prep = fio_sgio_prep,
+ .queue = fio_sgio_queue,
+ .getevents = fio_sgio_getevents,
+ .event = fio_sgio_event,
+ .cleanup = fio_sgio_cleanup,
+ .flags = FIO_SYNCIO | FIO_RAWIO,
+};
+
+#else /* FIO_HAVE_SGIO */
+
+/*
+ * When we have a proper configure system in place, we simply wont build
+ * and install this io engine. For now install a crippled version that
+ * just complains and fails to load.
+ */
+static int fio_sgio_init(struct thread_data fio_unused *td)
+{
+ fprintf(stderr, "fio: sgio not available\n");
+ return 1;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "sgio",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_sgio_init,
+};
+
+#endif
+
+static void fio_init fio_sgio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_sgio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * Skeleton for a sample external io engine
+ *
+ * Should be compiled with:
+ *
+ * gcc -Wall -O2 -g -shared -rdynamic -fPIC -o engine.o engine.c
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+/*
+ * The core of the module is identical to the ones included with fio,
+ * read those. You cannot use register_ioengine() and unregister_ioengine()
+ * for external modules, they should be gotten through dlsym()
+ */
+
+/*
+ * The ->event() hook is called to match an event number with an io_u.
+ * After the core has called ->getevents() and it has returned eg 3,
+ * the ->event() hook must return the 3 events that have completed for
+ * subsequent calls to ->event() with [0-2]. Required.
+ */
+static struct io_u *fio_skeleton_event(struct thread_data *td, int event)
+{
+ return NULL;
+}
+
+/*
+ * The ->getevents() hook is used to reap completion events from an async
+ * io engine. It returns the number of completed events since the last call,
+ * which may then be retrieved by calling the ->event() hook with the event
+ * numbers. Required.
+ */
+static int fio_skeleton_getevents(struct thread_data *td, int min, int max,
+ struct timespec *t)
+{
+ return 0;
+}
+
+/*
+ * The ->cancel() hook attempts to cancel the io_u. Only relevant for
+ * async io engines, and need not be supported.
+ */
+static int fio_skeleton_cancel(struct thread_data *td, struct io_u *io_u)
+{
+ return 0;
+}
+
+/*
+ * The ->queue() hook is responsible for initiating io on the io_u
+ * being passed in. If the io engine is a synchronous one, io may complete
+ * before ->queue() returns. Required.
+ */
+static int fio_skeleton_queue(struct thread_data *td, struct io_u *io_u)
+{
+ return 0;
+}
+
+/*
+ * The ->prep() function is called for each io_u prior to being submitted
+ * with ->queue(). This hook allows the io engine to perform any
+ * preperatory actions on the io_u, before being submitted. Not required.
+ */
+static int fio_skeleton_prep(struct thread_data *td, struct io_u *io_u)
+{
+ return 0;
+}
+
+/*
+ * The init function is called once per thread/process, and should set up
+ * any structures that this io engine requires to keep track of io. Not
+ * required.
+ */
+static int fio_skeleton_init(struct thread_data *td)
+{
+ return 0;
+}
+
+/*
+ * This is paired with the ->init() funtion and is called when a thread is
+ * done doing io. Should tear down anything setup by the ->init() function.
+ * Not required.
+ */
+static void fio_skeleton_cleanup(struct thread_data *td)
+{
+}
+
+/*
+ * Note that the structure is exported, so that fio can get it via
+ * dlsym(..., "ioengine");
+ */
+struct ioengine_ops ioengine = {
+ .name = "engine_name",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_skeleton_init,
+ .prep = fio_skeleton_prep,
+ .queue = fio_skeleton_queue,
+ .cancel = fio_skeleton_cancel,
+ .getevents = fio_skeleton_getevents,
+ .event = fio_skeleton_event,
+ .cleanup = fio_skeleton_cleanup,
+};
--- /dev/null
+/*
+ * splice io engine
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/poll.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+#ifdef FIO_HAVE_SPLICE
+
+struct spliceio_data {
+ struct io_u *last_io_u;
+ int pipe[2];
+};
+
+static int fio_spliceio_getevents(struct thread_data *td, int fio_unused min,
+ int max, struct timespec fio_unused *t)
+{
+ assert(max <= 1);
+
+ /*
+ * we can only have one finished io_u for sync io, since the depth
+ * is always 1
+ */
+ if (list_empty(&td->io_u_busylist))
+ return 0;
+
+ return 1;
+}
+
+static struct io_u *fio_spliceio_event(struct thread_data *td, int event)
+{
+ struct spliceio_data *sd = td->io_ops->data;
+
+ assert(event == 0);
+
+ return sd->last_io_u;
+}
+
+/*
+ * For splice reading, we unfortunately cannot (yet) vmsplice the other way.
+ * So just splice the data from the file into the pipe, and use regular
+ * read to fill the buffer. Doesn't make a lot of sense, but...
+ */
+static int fio_splice_read(struct thread_data *td, struct io_u *io_u)
+{
+ struct spliceio_data *sd = td->io_ops->data;
+ struct fio_file *f = io_u->file;
+ int ret, ret2, buflen;
+ off_t offset;
+ void *p;
+
+ offset = io_u->offset;
+ buflen = io_u->buflen;
+ p = io_u->buf;
+ while (buflen) {
+ int this_len = buflen;
+
+ if (this_len > SPLICE_DEF_SIZE)
+ this_len = SPLICE_DEF_SIZE;
+
+ ret = splice(f->fd, &offset, sd->pipe[1], NULL, this_len, SPLICE_F_MORE);
+ if (ret < 0) {
+ if (errno == ENODATA || errno == EAGAIN)
+ continue;
+
+ return errno;
+ }
+
+ buflen -= ret;
+
+ while (ret) {
+ ret2 = read(sd->pipe[0], p, ret);
+ if (ret2 < 0)
+ return errno;
+
+ ret -= ret2;
+ p += ret2;
+ }
+ }
+
+ return io_u->buflen;
+}
+
+/*
+ * For splice writing, we can vmsplice our data buffer directly into a
+ * pipe and then splice that to a file.
+ */
+static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
+{
+ struct spliceio_data *sd = td->io_ops->data;
+ struct iovec iov[1] = {
+ {
+ .iov_base = io_u->buf,
+ .iov_len = io_u->buflen,
+ }
+ };
+ struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
+ struct fio_file *f = io_u->file;
+ off_t off = io_u->offset;
+ int ret, ret2;
+
+ while (iov[0].iov_len) {
+ if (poll(&pfd, 1, -1) < 0)
+ return errno;
+
+ ret = vmsplice(sd->pipe[1], iov, 1, SPLICE_F_NONBLOCK);
+ if (ret < 0)
+ return errno;
+
+ iov[0].iov_len -= ret;
+ iov[0].iov_base += ret;
+
+ while (ret) {
+ ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
+ if (ret2 < 0)
+ return errno;
+
+ ret -= ret2;
+ }
+ }
+
+ return io_u->buflen;
+}
+
+static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct spliceio_data *sd = td->io_ops->data;
+ unsigned int ret;
+
+ if (io_u->ddir == DDIR_READ)
+ ret = fio_splice_read(td, io_u);
+ else if (io_u->ddir == DDIR_WRITE)
+ ret = fio_splice_write(td, io_u);
+ else
+ ret = fsync(io_u->file->fd);
+
+ if (ret != io_u->buflen) {
+ if (ret > 0) {
+ io_u->resid = io_u->buflen - ret;
+ io_u->error = ENODATA;
+ } else
+ io_u->error = errno;
+ }
+
+ if (!io_u->error)
+ sd->last_io_u = io_u;
+
+ return io_u->error;
+}
+
+static void fio_spliceio_cleanup(struct thread_data *td)
+{
+ struct spliceio_data *sd = td->io_ops->data;
+
+ if (sd) {
+ close(sd->pipe[0]);
+ close(sd->pipe[1]);
+ free(sd);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_spliceio_init(struct thread_data *td)
+{
+ struct spliceio_data *sd = malloc(sizeof(*sd));
+
+ sd->last_io_u = NULL;
+ if (pipe(sd->pipe) < 0) {
+ td_verror(td, errno);
+ free(sd);
+ return 1;
+ }
+
+ td->io_ops->data = sd;
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "splice",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_spliceio_init,
+ .queue = fio_spliceio_queue,
+ .getevents = fio_spliceio_getevents,
+ .event = fio_spliceio_event,
+ .cleanup = fio_spliceio_cleanup,
+ .flags = FIO_SYNCIO,
+};
+
+#else /* FIO_HAVE_SPLICE */
+
+/*
+ * When we have a proper configure system in place, we simply wont build
+ * and install this io engine. For now install a crippled version that
+ * just complains and fails to load.
+ */
+static int fio_spliceio_init(struct thread_data fio_unused *td)
+{
+ fprintf(stderr, "fio: splice not available\n");
+ return 1;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "splice",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_spliceio_init,
+};
+
+#endif
+
+static void fio_init fio_spliceio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_spliceio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}
--- /dev/null
+/*
+ * regular read/write sync io engine
+ *
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "../fio.h"
+#include "../os.h"
+
+struct syncio_data {
+ struct io_u *last_io_u;
+};
+
+static int fio_syncio_getevents(struct thread_data *td, int fio_unused min,
+ int max, struct timespec fio_unused *t)
+{
+ assert(max <= 1);
+
+ /*
+ * we can only have one finished io_u for sync io, since the depth
+ * is always 1
+ */
+ if (list_empty(&td->io_u_busylist))
+ return 0;
+
+ return 1;
+}
+
+static struct io_u *fio_syncio_event(struct thread_data *td, int event)
+{
+ struct syncio_data *sd = td->io_ops->data;
+
+ assert(event == 0);
+
+ return sd->last_io_u;
+}
+
+static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+
+ if (io_u->ddir == DDIR_SYNC)
+ return 0;
+ if (io_u->offset == f->last_completed_pos)
+ return 0;
+
+ if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) {
+ td_verror(td, errno);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
+{
+ struct syncio_data *sd = td->io_ops->data;
+ struct fio_file *f = io_u->file;
+ unsigned int ret;
+
+ if (io_u->ddir == DDIR_READ)
+ ret = read(f->fd, io_u->buf, io_u->buflen);
+ else if (io_u->ddir == DDIR_WRITE)
+ ret = write(f->fd, io_u->buf, io_u->buflen);
+ else
+ ret = fsync(f->fd);
+
+ if (ret != io_u->buflen) {
+ if (ret > 0) {
+ io_u->resid = io_u->buflen - ret;
+ io_u->error = EIO;
+ } else
+ io_u->error = errno;
+ }
+
+ if (!io_u->error)
+ sd->last_io_u = io_u;
+
+ return io_u->error;
+}
+
+static void fio_syncio_cleanup(struct thread_data *td)
+{
+ if (td->io_ops->data) {
+ free(td->io_ops->data);
+ td->io_ops->data = NULL;
+ }
+}
+
+static int fio_syncio_init(struct thread_data *td)
+{
+ struct syncio_data *sd = malloc(sizeof(*sd));
+
+ sd->last_io_u = NULL;
+ td->io_ops->data = sd;
+ return 0;
+}
+
+static struct ioengine_ops ioengine = {
+ .name = "sync",
+ .version = FIO_IOOPS_VERSION,
+ .init = fio_syncio_init,
+ .prep = fio_syncio_prep,
+ .queue = fio_syncio_queue,
+ .getevents = fio_syncio_getevents,
+ .event = fio_syncio_event,
+ .cleanup = fio_syncio_cleanup,
+ .flags = FIO_SYNCIO,
+};
+
+static void fio_init fio_syncio_register(void)
+{
+ register_ioengine(&ioengine);
+}
+
+static void fio_exit fio_syncio_unregister(void)
+{
+ unregister_ioengine(&ioengine);
+}