#ifdef ARCH_HAVE_IOURING
#include "../lib/types.h"
-#include "../os/io_uring.h"
+#include "../os/linux/io_uring.h"
struct io_sq_ring {
unsigned *head;
struct ioring_data {
int ring_fd;
- struct io_u **io_us;
struct io_u **io_u_index;
struct io_sq_ring sq_ring;
void *pad;
unsigned int hipri;
unsigned int fixedbufs;
+ unsigned int sqpoll_thread;
unsigned int sqpoll_set;
unsigned int sqpoll_cpu;
};
},
{
.name = "sqthread_poll",
- .lname = "Kernel SQ thread should poll",
+ .lname = "Kernel SQ thread polling",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, sqpoll_thread),
+ .help = "Offload submission/completion to kernel thread",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_LIBAIO,
+ },
+ {
+ .name = "sqthread_poll_cpu",
+ .lname = "SQ Thread Poll CPU",
.type = FIO_OPT_INT,
.cb = fio_ioring_sqpoll_cb,
- .help = "Offload submission to kernel thread",
+ .help = "What CPU to run SQ thread polling on",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_LIBAIO,
},
sqe->buf_index = 0;
if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
- if (io_u->ddir == DDIR_READ)
- sqe->opcode = IORING_OP_READV;
- else
- sqe->opcode = IORING_OP_WRITEV;
-
if (o->fixedbufs) {
- sqe->flags |= IOSQE_FIXED_BUFFER;
+ if (io_u->ddir == DDIR_READ)
+ sqe->opcode = IORING_OP_READ_FIXED;
+ else
+ sqe->opcode = IORING_OP_WRITE_FIXED;
sqe->addr = io_u->xfer_buf;
sqe->len = io_u->xfer_buflen;
sqe->buf_index = io_u->index;
} else {
+ if (io_u->ddir == DDIR_READ)
+ sqe->opcode = IORING_OP_READV;
+ else
+ sqe->opcode = IORING_OP_WRITEV;
sqe->addr = &ld->iovecs[io_u->index];
sqe->len = 1;
}
sqe->off = io_u->offset;
- } else if (ddir_sync(io_u->ddir))
+ } else if (ddir_sync(io_u->ddir)) {
+ sqe->fsync_flags = 0;
+ if (io_u->ddir == DDIR_DATASYNC)
+ sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
sqe->opcode = IORING_OP_FSYNC;
+ }
- sqe->data = (unsigned long) io_u;
+ sqe->user_data = (unsigned long) io_u;
return 0;
}
index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
cqe = &ld->cq_ring.cqes[index];
- io_u = (struct io_u *) cqe->data;
+ io_u = (struct io_u *) cqe->user_data;
if (cqe->res != io_u->xfer_buflen) {
if (cqe->res > io_u->xfer_buflen)
continue;
}
- if (!o->sqpoll_set) {
+ if (!o->sqpoll_thread) {
r = io_uring_enter(ld, 0, actual_min,
IORING_ENTER_GETEVENTS);
if (r < 0) {
if (!ld->queued)
return 0;
- /* Nothing to do */
- if (o->sqpoll_set) {
+ /*
+ * Kernel side does submission. just need to check if the ring is
+ * flagged as needing a kick, if so, call io_uring_enter(). This
+ * only happens if we've been idle too long.
+ */
+ if (o->sqpoll_thread) {
struct io_sq_ring *ring = &ld->sq_ring;
read_barrier();
fio_ioring_unmap(ld);
free(ld->io_u_index);
- free(ld->io_us);
free(ld->iovecs);
free(ld);
}
if (o->hipri)
p.flags |= IORING_SETUP_IOPOLL;
- if (o->sqpoll_set) {
- p.flags |= IORING_SETUP_SQPOLL | IORING_SETUP_SQ_AFF;
- p.sq_thread_cpu = o->sqpoll_cpu;
+ if (o->sqpoll_thread) {
+ p.flags |= IORING_SETUP_SQPOLL;
+ if (o->sqpoll_set) {
+ p.flags |= IORING_SETUP_SQ_AFF;
+ p.sq_thread_cpu = o->sqpoll_cpu;
+ }
}
if (o->fixedbufs) {
/* io_u index */
ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
- ld->io_us = calloc(td->o.iodepth, sizeof(struct io_u *));
ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
td->io_ops_data = ld;