#ifdef ARCH_HAVE_IOURING
#include "../lib/types.h"
-#include "../os/io_uring.h"
+#include "../os/linux/io_uring.h"
struct io_sq_ring {
unsigned *head;
struct ioring_data {
int ring_fd;
- struct io_u **io_us;
struct io_u **io_u_index;
struct io_sq_ring sq_ring;
void *pad;
unsigned int hipri;
unsigned int fixedbufs;
- unsigned int sqthread;
- unsigned int sqthread_set;
- unsigned int sqthread_poll;
- unsigned int sqwq;
+ unsigned int sqpoll_thread;
+ unsigned int sqpoll_set;
+ unsigned int sqpoll_cpu;
};
-static int fio_ioring_sqthread_cb(void *data, unsigned long long *val)
+static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
{
struct ioring_options *o = data;
- o->sqthread = *val;
- o->sqthread_set = 1;
+ o->sqpoll_cpu = *val;
+ o->sqpoll_set = 1;
return 0;
}
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_LIBAIO,
},
- {
- .name = "sqthread",
- .lname = "Use kernel SQ thread on this CPU",
- .type = FIO_OPT_INT,
- .cb = fio_ioring_sqthread_cb,
- .help = "Offload submission to kernel thread",
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_LIBAIO,
- },
{
.name = "sqthread_poll",
- .lname = "Kernel SQ thread should poll",
- .type = FIO_OPT_STR_SET,
- .off1 = offsetof(struct ioring_options, sqthread_poll),
- .help = "Used with sqthread, enables kernel side polling",
+ .lname = "Kernel SQ thread polling",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, sqpoll_thread),
+ .help = "Offload submission/completion to kernel thread",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_LIBAIO,
},
{
- .name = "sqwq",
- .lname = "Offload submission to kernel workqueue",
- .type = FIO_OPT_STR_SET,
- .off1 = offsetof(struct ioring_options, sqwq),
- .help = "Offload submission to kernel workqueue",
+ .name = "sqthread_poll_cpu",
+ .lname = "SQ Thread Poll CPU",
+ .type = FIO_OPT_INT,
+ .cb = fio_ioring_sqpoll_cb,
+ .help = "What CPU to run SQ thread polling on",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_LIBAIO,
},
unsigned int min_complete, unsigned int flags)
{
return syscall(__NR_sys_io_uring_enter, ld->ring_fd, to_submit,
- min_complete, flags);
+ min_complete, flags, NULL, 0);
}
static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
sqe->fd = f->fd;
sqe->flags = 0;
sqe->ioprio = 0;
+ sqe->buf_index = 0;
if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
if (o->fixedbufs) {
sqe->opcode = IORING_OP_READ_FIXED;
else
sqe->opcode = IORING_OP_WRITE_FIXED;
- sqe->addr = io_u->xfer_buf;
+ sqe->addr = (unsigned long) io_u->xfer_buf;
sqe->len = io_u->xfer_buflen;
- sqe->index = io_u->index;
+ sqe->buf_index = io_u->index;
} else {
if (io_u->ddir == DDIR_READ)
sqe->opcode = IORING_OP_READV;
else
sqe->opcode = IORING_OP_WRITEV;
- sqe->addr = &ld->iovecs[io_u->index];
+ sqe->addr = (unsigned long) &ld->iovecs[io_u->index];
sqe->len = 1;
}
sqe->off = io_u->offset;
- } else if (ddir_sync(io_u->ddir))
+ } else if (ddir_sync(io_u->ddir)) {
+ sqe->fsync_flags = 0;
+ if (io_u->ddir == DDIR_DATASYNC)
+ sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
sqe->opcode = IORING_OP_FSYNC;
+ }
- sqe->data = (unsigned long) io_u;
+ sqe->user_data = (unsigned long) io_u;
return 0;
}
index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
cqe = &ld->cq_ring.cqes[index];
- io_u = (struct io_u *) cqe->data;
+ io_u = (struct io_u *) (uintptr_t) cqe->user_data;
if (cqe->res != io_u->xfer_buflen) {
if (cqe->res > io_u->xfer_buflen)
continue;
}
- if (!o->sqthread_poll) {
+ if (!o->sqpoll_thread) {
r = io_uring_enter(ld, 0, actual_min,
IORING_ENTER_GETEVENTS);
if (r < 0) {
if (next_tail == *ring->head)
return FIO_Q_BUSY;
+ /* ensure sqe stores are ordered with tail update */
+ write_barrier();
ring->array[tail & ld->sq_ring_mask] = io_u->index;
*ring->tail = next_tail;
write_barrier();
if (!ld->queued)
return 0;
- /* Nothing to do */
- if (o->sqthread_poll) {
+ /*
+ * Kernel side does submission. just need to check if the ring is
+ * flagged as needing a kick, if so, call io_uring_enter(). This
+ * only happens if we've been idle too long.
+ */
+ if (o->sqpoll_thread) {
struct io_sq_ring *ring = &ld->sq_ring;
+ read_barrier();
if (*ring->flags & IORING_SQ_NEED_WAKEUP)
- io_uring_enter(ld, ld->queued, 0, 0);
+ io_uring_enter(ld, ld->queued, 0,
+ IORING_ENTER_SQ_WAKEUP);
ld->queued = 0;
return 0;
}
fio_ioring_unmap(ld);
free(ld->io_u_index);
- free(ld->io_us);
free(ld->iovecs);
free(ld);
}
struct ioring_data *ld = td->io_ops_data;
struct ioring_options *o = td->eo;
int depth = td->o.iodepth;
- struct iovec *vecs = NULL;
struct io_uring_params p;
int ret;
if (o->hipri)
p.flags |= IORING_SETUP_IOPOLL;
- if (o->sqthread_set) {
- p.sq_thread_cpu = o->sqthread;
- p.flags |= IORING_SETUP_SQTHREAD;
- if (o->sqthread_poll)
- p.flags |= IORING_SETUP_SQPOLL;
+ if (o->sqpoll_thread) {
+ p.flags |= IORING_SETUP_SQPOLL;
+ if (o->sqpoll_set) {
+ p.flags |= IORING_SETUP_SQ_AFF;
+ p.sq_thread_cpu = o->sqpoll_cpu;
+ }
}
- if (o->sqwq)
- p.flags |= IORING_SETUP_SQWQ;
+
+ ret = syscall(__NR_sys_io_uring_setup, depth, &p);
+ if (ret < 0)
+ return ret;
+
+ ld->ring_fd = ret;
if (o->fixedbufs) {
struct rlimit rlim = {
.rlim_max = RLIM_INFINITY,
};
- setrlimit(RLIMIT_MEMLOCK, &rlim);
- vecs = ld->iovecs;
- }
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0)
+ return -1;
- ret = syscall(__NR_sys_io_uring_setup, depth, vecs, depth, &p);
- if (ret < 0)
- return ret;
+ ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
+ IORING_REGISTER_BUFFERS, ld->iovecs, depth);
+ if (ret < 0)
+ return ret;
+ }
- ld->ring_fd = ret;
return fio_ioring_mmap(ld, &p);
}
/* io_u index */
ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
- ld->io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
td->io_ops_data = ld;