#ifdef ARCH_HAVE_IOURING
#include "../lib/types.h"
-#include "../os/io_uring.h"
+#include "../os/linux/io_uring.h"
struct io_sq_ring {
unsigned *head;
struct ioring_data {
int ring_fd;
- struct io_u **io_us;
struct io_u **io_u_index;
+ int *fds;
+
struct io_sq_ring sq_ring;
struct io_uring_sqe *sqes;
struct iovec *iovecs;
int cq_ring_off;
unsigned iodepth;
- uint64_t cachehit;
- uint64_t cachemiss;
-
struct ioring_mmap mmap[3];
};
void *pad;
unsigned int hipri;
unsigned int fixedbufs;
- unsigned int sqthread;
- unsigned int sqthread_set;
- unsigned int sqthread_poll;
- unsigned int sqwq;
+ unsigned int registerfiles;
+ unsigned int sqpoll_thread;
+ unsigned int sqpoll_set;
+ unsigned int sqpoll_cpu;
+ unsigned int nonvectored;
+ unsigned int uncached;
+};
+
+static const int ddir_to_op[2][2] = {
+ { IORING_OP_READV, IORING_OP_READ },
+ { IORING_OP_WRITEV, IORING_OP_WRITE }
};
-static int fio_ioring_sqthread_cb(void *data, unsigned long long *val)
+static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
{
struct ioring_options *o = data;
- o->sqthread = *val;
- o->sqthread_set = 1;
+ o->sqpoll_cpu = *val;
+ o->sqpoll_set = 1;
return 0;
}
.off1 = offsetof(struct ioring_options, hipri),
.help = "Use polled IO completions",
.category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_LIBAIO,
+ .group = FIO_OPT_G_IOURING,
},
{
.name = "fixedbufs",
.off1 = offsetof(struct ioring_options, fixedbufs),
.help = "Pre map IO buffers",
.category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_LIBAIO,
+ .group = FIO_OPT_G_IOURING,
},
{
- .name = "sqthread",
- .lname = "Use kernel SQ thread on this CPU",
- .type = FIO_OPT_INT,
- .cb = fio_ioring_sqthread_cb,
- .help = "Offload submission to kernel thread",
+ .name = "registerfiles",
+ .lname = "Register file set",
+ .type = FIO_OPT_STR_SET,
+ .off1 = offsetof(struct ioring_options, registerfiles),
+ .help = "Pre-open/register files",
.category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_LIBAIO,
+ .group = FIO_OPT_G_IOURING,
},
{
.name = "sqthread_poll",
- .lname = "Kernel SQ thread should poll",
- .type = FIO_OPT_STR_SET,
- .off1 = offsetof(struct ioring_options, sqthread_poll),
- .help = "Used with sqthread, enables kernel side polling",
+ .lname = "Kernel SQ thread polling",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, sqpoll_thread),
+ .help = "Offload submission/completion to kernel thread",
.category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_LIBAIO,
+ .group = FIO_OPT_G_IOURING,
},
{
- .name = "sqwq",
- .lname = "Offload submission to kernel workqueue",
- .type = FIO_OPT_STR_SET,
- .off1 = offsetof(struct ioring_options, sqwq),
- .help = "Offload submission to kernel workqueue",
+ .name = "sqthread_poll_cpu",
+ .lname = "SQ Thread Poll CPU",
+ .type = FIO_OPT_INT,
+ .cb = fio_ioring_sqpoll_cb,
+ .help = "What CPU to run SQ thread polling on",
.category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_LIBAIO,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "nonvectored",
+ .lname = "Non-vectored",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, nonvectored),
+ .help = "Use non-vectored read/write commands",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "uncached",
+ .lname = "Uncached",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, uncached),
+ .help = "Use RWF_UNCACHED for buffered read/writes",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
},
{
.name = NULL,
unsigned int min_complete, unsigned int flags)
{
return syscall(__NR_sys_io_uring_enter, ld->ring_fd, to_submit,
- min_complete, flags);
+ min_complete, flags, NULL, 0);
}
static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
struct io_uring_sqe *sqe;
sqe = &ld->sqes[io_u->index];
- sqe->fd = f->fd;
- sqe->flags = 0;
- sqe->ioprio = 0;
+
+ /* zero out fields not used in this submission */
+ memset(sqe, 0, sizeof(*sqe));
+
+ if (o->registerfiles) {
+ sqe->fd = f->engine_pos;
+ sqe->flags = IOSQE_FIXED_FILE;
+ } else {
+ sqe->fd = f->fd;
+ }
if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
+ sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
if (o->fixedbufs) {
- if (io_u->ddir == DDIR_READ)
- sqe->opcode = IORING_OP_READ_FIXED;
- else
- sqe->opcode = IORING_OP_WRITE_FIXED;
- sqe->addr = io_u->xfer_buf;
+ sqe->addr = (unsigned long) io_u->xfer_buf;
sqe->len = io_u->xfer_buflen;
- sqe->index = io_u->index;
+ sqe->buf_index = io_u->index;
} else {
- if (io_u->ddir == DDIR_READ)
- sqe->opcode = IORING_OP_READV;
- else
- sqe->opcode = IORING_OP_WRITEV;
- sqe->addr = &ld->iovecs[io_u->index];
- sqe->len = 1;
+ if (o->nonvectored) {
+ sqe->addr = (unsigned long)
+ ld->iovecs[io_u->index].iov_base;
+ sqe->len = ld->iovecs[io_u->index].iov_len;
+ } else {
+ sqe->addr = (unsigned long) &ld->iovecs[io_u->index];
+ sqe->len = 1;
+ }
}
+ if (!td->o.odirect && o->uncached)
+ sqe->rw_flags = RWF_UNCACHED;
+ if (fio_option_is_set(&td->o, ioprio_class))
+ sqe->ioprio = td->o.ioprio_class << 13;
+ if (fio_option_is_set(&td->o, ioprio))
+ sqe->ioprio |= td->o.ioprio;
sqe->off = io_u->offset;
- } else if (ddir_sync(io_u->ddir))
- sqe->opcode = IORING_OP_FSYNC;
+ } else if (ddir_sync(io_u->ddir)) {
+ if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
+ sqe->off = f->first_write;
+ sqe->len = f->last_write - f->first_write;
+ sqe->sync_range_flags = td->o.sync_file_range;
+ sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
+ } else {
+ if (io_u->ddir == DDIR_DATASYNC)
+ sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
+ sqe->opcode = IORING_OP_FSYNC;
+ }
+ }
- sqe->data = (unsigned long) io_u;
+ sqe->user_data = (unsigned long) io_u;
return 0;
}
index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
cqe = &ld->cq_ring.cqes[index];
- io_u = (struct io_u *) cqe->data;
+ io_u = (struct io_u *) (uintptr_t) cqe->user_data;
if (cqe->res != io_u->xfer_buflen) {
if (cqe->res > io_u->xfer_buflen)
} else
io_u->error = 0;
- if (io_u->ddir == DDIR_READ) {
- if (cqe->flags & IOCQE_FLAG_CACHEHIT)
- ld->cachehit++;
- else
- ld->cachemiss++;
- }
-
return io_u;
}
r = fio_ioring_cqring_reap(td, events, max);
if (r) {
events += r;
+ if (actual_min != 0)
+ actual_min -= r;
continue;
}
- if (!o->sqthread_poll) {
+ if (!o->sqpoll_thread) {
r = io_uring_enter(ld, 0, actual_min,
IORING_ENTER_GETEVENTS);
if (r < 0) {
- if (errno == EAGAIN)
+ if (errno == EAGAIN || errno == EINTR)
continue;
td_verror(td, errno, "io_uring_enter");
break;
if (next_tail == *ring->head)
return FIO_Q_BUSY;
+ /* ensure sqe stores are ordered with tail update */
+ write_barrier();
ring->array[tail & ld->sq_ring_mask] = io_u->index;
*ring->tail = next_tail;
write_barrier();
if (!ld->queued)
return 0;
- /* Nothing to do */
- if (o->sqthread_poll) {
+ /*
+ * Kernel side does submission. just need to check if the ring is
+ * flagged as needing a kick, if so, call io_uring_enter(). This
+ * only happens if we've been idle too long.
+ */
+ if (o->sqpoll_thread) {
struct io_sq_ring *ring = &ld->sq_ring;
+ read_barrier();
if (*ring->flags & IORING_SQ_NEED_WAKEUP)
- io_uring_enter(ld, ld->queued, 0, 0);
+ io_uring_enter(ld, ld->queued, 0,
+ IORING_ENTER_SQ_WAKEUP);
ld->queued = 0;
return 0;
}
io_u_mark_submit(td, ret);
continue;
} else {
- if (errno == EAGAIN) {
+ if (errno == EAGAIN || errno == EINTR) {
ret = fio_ioring_cqring_reap(td, 0, ld->queued);
if (ret)
continue;
struct ioring_data *ld = td->io_ops_data;
if (ld) {
- td->ts.cachehit += ld->cachehit;
- td->ts.cachemiss += ld->cachemiss;
-
if (!(td->flags & TD_F_CHILD))
fio_ioring_unmap(ld);
free(ld->io_u_index);
- free(ld->io_us);
free(ld->iovecs);
+ free(ld->fds);
free(ld);
}
}
struct ioring_data *ld = td->io_ops_data;
struct ioring_options *o = td->eo;
int depth = td->o.iodepth;
- struct iovec *vecs = NULL;
struct io_uring_params p;
int ret;
if (o->hipri)
p.flags |= IORING_SETUP_IOPOLL;
- if (o->sqthread_set) {
- p.sq_thread_cpu = o->sqthread;
- p.flags |= IORING_SETUP_SQTHREAD;
- if (o->sqthread_poll)
- p.flags |= IORING_SETUP_SQPOLL;
+ if (o->sqpoll_thread) {
+ p.flags |= IORING_SETUP_SQPOLL;
+ if (o->sqpoll_set) {
+ p.flags |= IORING_SETUP_SQ_AFF;
+ p.sq_thread_cpu = o->sqpoll_cpu;
+ }
}
- if (o->sqwq)
- p.flags |= IORING_SETUP_SQWQ;
+
+ ret = syscall(__NR_sys_io_uring_setup, depth, &p);
+ if (ret < 0)
+ return ret;
+
+ ld->ring_fd = ret;
if (o->fixedbufs) {
struct rlimit rlim = {
.rlim_max = RLIM_INFINITY,
};
- setrlimit(RLIMIT_MEMLOCK, &rlim);
- vecs = ld->iovecs;
- }
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0)
+ return -1;
- ret = syscall(__NR_sys_io_uring_setup, depth, vecs, &p);
- if (ret < 0)
- return ret;
+ ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
+ IORING_REGISTER_BUFFERS, ld->iovecs, depth);
+ if (ret < 0)
+ return ret;
+ }
- ld->ring_fd = ret;
return fio_ioring_mmap(ld, &p);
}
+static int fio_ioring_register_files(struct thread_data *td)
+{
+ struct ioring_data *ld = td->io_ops_data;
+ struct fio_file *f;
+ unsigned int i;
+ int ret;
+
+ ld->fds = calloc(td->o.nr_files, sizeof(int));
+
+ for_each_file(td, f, i) {
+ ret = generic_open_file(td, f);
+ if (ret)
+ goto err;
+ ld->fds[i] = f->fd;
+ f->engine_pos = i;
+ }
+
+ ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
+ IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
+ if (ret) {
+err:
+ free(ld->fds);
+ ld->fds = NULL;
+ }
+
+ /*
+ * Pretend the file is closed again, and really close it if we hit
+ * an error.
+ */
+ for_each_file(td, f, i) {
+ if (ret) {
+ int fio_unused ret2;
+ ret2 = generic_close_file(td, f);
+ } else
+ f->fd = -1;
+ }
+
+ return ret;
+}
+
static int fio_ioring_post_init(struct thread_data *td)
{
struct ioring_data *ld = td->io_ops_data;
+ struct ioring_options *o = td->eo;
struct io_u *io_u;
int err, i;
return 1;
}
+ if (o->registerfiles) {
+ err = fio_ioring_register_files(td);
+ if (err) {
+ td_verror(td, errno, "ioring_register_files");
+ return 1;
+ }
+ }
+
return 0;
}
static int fio_ioring_init(struct thread_data *td)
{
+ struct ioring_options *o = td->eo;
struct ioring_data *ld;
+ /* sqthread submission requires registered files */
+ if (o->sqpoll_thread)
+ o->registerfiles = 1;
+
+ if (o->registerfiles && td->o.nr_files != td->o.open_files) {
+ log_err("fio: io_uring registered files require nr_files to "
+ "be identical to open_files\n");
+ return 1;
+ }
+
ld = calloc(1, sizeof(*ld));
/* ring depth must be a power-of-2 */
/* io_u index */
ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
- ld->io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
td->io_ops_data = ld;
return 0;
}
+static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
+{
+ struct ioring_data *ld = td->io_ops_data;
+ struct ioring_options *o = td->eo;
+
+ if (!ld || !o->registerfiles)
+ return generic_open_file(td, f);
+
+ f->fd = ld->fds[f->engine_pos];
+ return 0;
+}
+
+static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
+{
+ struct ioring_data *ld = td->io_ops_data;
+ struct ioring_options *o = td->eo;
+
+ if (!ld || !o->registerfiles)
+ return generic_close_file(td, f);
+
+ f->fd = -1;
+ return 0;
+}
+
static struct ioengine_ops ioengine = {
.name = "io_uring",
.version = FIO_IOOPS_VERSION,
+ .flags = FIO_ASYNCIO_SYNC_TRIM,
.init = fio_ioring_init,
.post_init = fio_ioring_post_init,
.io_u_init = fio_ioring_io_u_init,
.getevents = fio_ioring_getevents,
.event = fio_ioring_event,
.cleanup = fio_ioring_cleanup,
- .open_file = generic_open_file,
- .close_file = generic_close_file,
+ .open_file = fio_ioring_open_file,
+ .close_file = fio_ioring_close_file,
.get_file_size = generic_get_file_size,
.options = options,
.option_struct_size = sizeof(struct ioring_options),