#include "../optgroup.h"
#include "../lib/memalign.h"
#include "../lib/fls.h"
+#include "../lib/roundup.h"
#ifdef ARCH_HAVE_IOURING
int queued;
int cq_ring_off;
unsigned iodepth;
+ bool ioprio_class_set;
+ bool ioprio_set;
+ int prepped;
struct ioring_mmap mmap[3];
};
struct ioring_options {
void *pad;
unsigned int hipri;
+ unsigned int cmdprio_percentage;
unsigned int fixedbufs;
unsigned int registerfiles;
unsigned int sqpoll_thread;
unsigned int sqpoll_set;
unsigned int sqpoll_cpu;
+ unsigned int nonvectored;
+ unsigned int uncached;
+ unsigned int nowait;
+ unsigned int force_async;
+};
+
+static const int ddir_to_op[2][2] = {
+ { IORING_OP_READV, IORING_OP_READ },
+ { IORING_OP_WRITEV, IORING_OP_WRITE }
+};
+
+static const int fixed_ddir_to_op[2] = {
+ IORING_OP_READ_FIXED,
+ IORING_OP_WRITE_FIXED
};
static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
+#ifdef FIO_HAVE_IOPRIO_CLASS
+ {
+ .name = "cmdprio_percentage",
+ .lname = "high priority percentage",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, cmdprio_percentage),
+ .minval = 1,
+ .maxval = 100,
+ .help = "Send high priority I/O this percentage of the time",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+#else
+ {
+ .name = "cmdprio_percentage",
+ .lname = "high priority percentage",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support I/O priority classes",
+ },
+#endif
{
.name = "fixedbufs",
.lname = "Fixed (pre-mapped) IO buffers",
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
+ {
+ .name = "nonvectored",
+ .lname = "Non-vectored",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, nonvectored),
+ .def = "-1",
+ .help = "Use non-vectored read/write commands",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "uncached",
+ .lname = "Uncached",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, uncached),
+ .help = "Use RWF_UNCACHED for buffered read/writes",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "nowait",
+ .lname = "RWF_NOWAIT",
+ .type = FIO_OPT_BOOL,
+ .off1 = offsetof(struct ioring_options, nowait),
+ .help = "Use RWF_NOWAIT for reads/writes",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
+ {
+ .name = "force_async",
+ .lname = "Force async",
+ .type = FIO_OPT_INT,
+ .off1 = offsetof(struct ioring_options, force_async),
+ .help = "Set IOSQE_ASYNC every N requests",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
{
.name = NULL,
},
static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
unsigned int min_complete, unsigned int flags)
{
- return syscall(__NR_sys_io_uring_enter, ld->ring_fd, to_submit,
+ return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
min_complete, flags, NULL, 0);
}
struct io_uring_sqe *sqe;
sqe = &ld->sqes[io_u->index];
+
if (o->registerfiles) {
sqe->fd = f->engine_pos;
sqe->flags = IOSQE_FIXED_FILE;
} else {
sqe->fd = f->fd;
- sqe->flags = 0;
}
- sqe->ioprio = 0;
- sqe->buf_index = 0;
if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
if (o->fixedbufs) {
- if (io_u->ddir == DDIR_READ)
- sqe->opcode = IORING_OP_READ_FIXED;
- else
- sqe->opcode = IORING_OP_WRITE_FIXED;
+ sqe->opcode = fixed_ddir_to_op[io_u->ddir];
sqe->addr = (unsigned long) io_u->xfer_buf;
sqe->len = io_u->xfer_buflen;
sqe->buf_index = io_u->index;
} else {
- if (io_u->ddir == DDIR_READ)
- sqe->opcode = IORING_OP_READV;
- else
- sqe->opcode = IORING_OP_WRITEV;
- sqe->addr = (unsigned long) &ld->iovecs[io_u->index];
- sqe->len = 1;
+ struct iovec *iov = &ld->iovecs[io_u->index];
+
+ /*
+ * Update based on actual io_u, requeue could have
+ * adjusted these
+ */
+ iov->iov_base = io_u->xfer_buf;
+ iov->iov_len = io_u->xfer_buflen;
+
+ sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
+ if (o->nonvectored) {
+ sqe->addr = (unsigned long) iov->iov_base;
+ sqe->len = iov->iov_len;
+ } else {
+ sqe->addr = (unsigned long) iov;
+ sqe->len = 1;
+ }
}
+ if (!td->o.odirect && o->uncached)
+ sqe->rw_flags = RWF_UNCACHED;
+ if (o->nowait)
+ sqe->rw_flags |= RWF_NOWAIT;
+ if (ld->ioprio_class_set)
+ sqe->ioprio = td->o.ioprio_class << 13;
+ if (ld->ioprio_set)
+ sqe->ioprio |= td->o.ioprio;
sqe->off = io_u->offset;
+ sqe->rw_flags = 0;
} else if (ddir_sync(io_u->ddir)) {
- sqe->fsync_flags = 0;
- if (io_u->ddir == DDIR_DATASYNC)
- sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
- sqe->opcode = IORING_OP_FSYNC;
+ sqe->ioprio = 0;
+ if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
+ sqe->off = f->first_write;
+ sqe->len = f->last_write - f->first_write;
+ sqe->sync_range_flags = td->o.sync_file_range;
+ sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
+ } else {
+ sqe->off = 0;
+ sqe->addr = 0;
+ sqe->len = 0;
+ if (io_u->ddir == DDIR_DATASYNC)
+ sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
+ sqe->opcode = IORING_OP_FSYNC;
+ }
+ }
+
+ if (o->force_async && ++ld->prepped == o->force_async) {
+ ld->prepped = 0;
+ sqe->flags |= IOSQE_ASYNC;
}
sqe->user_data = (unsigned long) io_u;
head = *ring->head;
do {
- read_barrier();
- if (head == *ring->tail)
+ if (head == atomic_load_acquire(ring->tail))
break;
reaped++;
head++;
} while (reaped + events < max);
- *ring->head = head;
- write_barrier();
+ if (reaped)
+ atomic_store_release(ring->head, head);
+
return reaped;
}
r = io_uring_enter(ld, 0, actual_min,
IORING_ENTER_GETEVENTS);
if (r < 0) {
- if (errno == EAGAIN)
+ if (errno == EAGAIN || errno == EINTR)
continue;
td_verror(td, errno, "io_uring_enter");
break;
return r < 0 ? r : events;
}
+static void fio_ioring_prio_prep(struct thread_data *td, struct io_u *io_u)
+{
+ struct ioring_options *o = td->eo;
+ struct ioring_data *ld = td->io_ops_data;
+ if (rand_between(&td->prio_state, 0, 99) < o->cmdprio_percentage) {
+ ld->sqes[io_u->index].ioprio = IOPRIO_CLASS_RT << IOPRIO_CLASS_SHIFT;
+ io_u->flags |= IO_U_F_PRIORITY;
+ }
+ return;
+}
+
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
struct io_u *io_u)
{
struct ioring_data *ld = td->io_ops_data;
struct io_sq_ring *ring = &ld->sq_ring;
+ struct ioring_options *o = td->eo;
unsigned tail, next_tail;
fio_ro_check(td, io_u);
tail = *ring->tail;
next_tail = tail + 1;
- read_barrier();
- if (next_tail == *ring->head)
+ if (next_tail == atomic_load_acquire(ring->head))
return FIO_Q_BUSY;
- /* ensure sqe stores are ordered with tail update */
- write_barrier();
+ if (o->cmdprio_percentage)
+ fio_ioring_prio_prep(td, io_u);
ring->array[tail & ld->sq_ring_mask] = io_u->index;
- *ring->tail = next_tail;
- write_barrier();
+ atomic_store_release(ring->tail, next_tail);
ld->queued++;
return FIO_Q_QUEUED;
*/
if (o->sqpoll_thread) {
struct io_sq_ring *ring = &ld->sq_ring;
+ unsigned flags;
- read_barrier();
- if (*ring->flags & IORING_SQ_NEED_WAKEUP)
+ flags = atomic_load_acquire(ring->flags);
+ if (flags & IORING_SQ_NEED_WAKEUP)
io_uring_enter(ld, ld->queued, 0,
IORING_ENTER_SQ_WAKEUP);
ld->queued = 0;
io_u_mark_submit(td, ret);
continue;
} else {
- if (errno == EAGAIN) {
+ if (errno == EAGAIN || errno == EINTR) {
ret = fio_ioring_cqring_reap(td, 0, ld->queued);
if (ret)
continue;
{
int i;
- for (i = 0; i < ARRAY_SIZE(ld->mmap); i++)
+ for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
munmap(ld->mmap[i].ptr, ld->mmap[i].len);
close(ld->ring_fd);
}
return 0;
}
+static void fio_ioring_probe(struct thread_data *td)
+{
+ struct ioring_data *ld = td->io_ops_data;
+ struct ioring_options *o = td->eo;
+ struct io_uring_probe *p;
+ int ret;
+
+ /* already set by user, don't touch */
+ if (o->nonvectored != -1)
+ return;
+
+ /* default to off, as that's always safe */
+ o->nonvectored = 0;
+
+ p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ if (!p)
+ return;
+
+ memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ ret = syscall(__NR_io_uring_register, ld->ring_fd,
+ IORING_REGISTER_PROBE, p, 256);
+ if (ret < 0)
+ goto out;
+
+ if (IORING_OP_WRITE > p->ops_len)
+ goto out;
+
+ if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
+ (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
+ o->nonvectored = 1;
+out:
+ free(p);
+}
+
static int fio_ioring_queue_init(struct thread_data *td)
{
struct ioring_data *ld = td->io_ops_data;
}
}
- ret = syscall(__NR_sys_io_uring_setup, depth, &p);
+ ret = syscall(__NR_io_uring_setup, depth, &p);
if (ret < 0)
return ret;
ld->ring_fd = ret;
- if (o->fixedbufs) {
- struct rlimit rlim = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- if (setrlimit(RLIMIT_MEMLOCK, &rlim) < 0)
- return -1;
+ fio_ioring_probe(td);
- ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
+ if (o->fixedbufs) {
+ ret = syscall(__NR_io_uring_register, ld->ring_fd,
IORING_REGISTER_BUFFERS, ld->iovecs, depth);
if (ret < 0)
return ret;
f->engine_pos = i;
}
- ret = syscall(__NR_sys_io_uring_register, ld->ring_fd,
+ ret = syscall(__NR_io_uring_register, ld->ring_fd,
IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
if (ret) {
err:
err = fio_ioring_queue_init(td);
if (err) {
- td_verror(td, errno, "io_queue_init");
+ int init_err = errno;
+
+ if (init_err == ENOSYS)
+ log_err("fio: your kernel doesn't support io_uring\n");
+ td_verror(td, init_err, "io_queue_init");
return 1;
}
- printf("files=%d\n", o->registerfiles);
+ for (i = 0; i < td->o.iodepth; i++) {
+ struct io_uring_sqe *sqe;
+
+ sqe = &ld->sqes[i];
+ memset(sqe, 0, sizeof(*sqe));
+ }
+
if (o->registerfiles) {
err = fio_ioring_register_files(td);
if (err) {
return 0;
}
-static unsigned roundup_pow2(unsigned depth)
-{
- return 1UL << __fls(depth - 1);
-}
-
static int fio_ioring_init(struct thread_data *td)
{
struct ioring_options *o = td->eo;
struct ioring_data *ld;
+ struct thread_options *to = &td->o;
+
+ if (to->io_submit_mode == IO_MODE_OFFLOAD) {
+ log_err("fio: io_submit_mode=offload is not compatible (or "
+ "useful) with io_uring\n");
+ return 1;
+ }
/* sqthread submission requires registered files */
if (o->sqpoll_thread)
ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
td->io_ops_data = ld;
+
+ /*
+ * Check for option conflicts
+ */
+ if ((fio_option_is_set(to, ioprio) || fio_option_is_set(to, ioprio_class)) &&
+ o->cmdprio_percentage != 0) {
+ log_err("%s: cmdprio_percentage option and mutually exclusive "
+ "prio or prioclass option is set, exiting\n", to->name);
+ td_verror(td, EINVAL, "fio_io_uring_init");
+ return 1;
+ }
+
+ if (fio_option_is_set(&td->o, ioprio_class))
+ ld->ioprio_class_set = true;
+ if (fio_option_is_set(&td->o, ioprio))
+ ld->ioprio_set = true;
+
return 0;
}
static struct ioengine_ops ioengine = {
.name = "io_uring",
.version = FIO_IOOPS_VERSION,
- .flags = FIO_ASYNCIO_SYNC_TRIM,
+ .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD,
.init = fio_ioring_init,
.post_init = fio_ioring_post_init,
.io_u_init = fio_ioring_io_u_init,