int queued;
int cq_ring_off;
unsigned iodepth;
- bool ioprio_class_set;
- bool ioprio_set;
int prepped;
struct ioring_mmap mmap[3];
- bool use_cmdprio;
+ struct cmdprio cmdprio;
};
struct ioring_options {
- void *pad;
+ struct thread_data *td;
unsigned int hipri;
- struct cmdprio cmdprio;
+ struct cmdprio_options cmdprio_options;
unsigned int fixedbufs;
unsigned int registerfiles;
unsigned int sqpoll_thread;
.lname = "high priority percentage",
.type = FIO_OPT_INT,
.off1 = offsetof(struct ioring_options,
- cmdprio.percentage[DDIR_READ]),
+ cmdprio_options.percentage[DDIR_READ]),
.off2 = offsetof(struct ioring_options,
- cmdprio.percentage[DDIR_WRITE]),
+ cmdprio_options.percentage[DDIR_WRITE]),
.minval = 0,
.maxval = 100,
.help = "Send high priority I/O this percentage of the time",
.lname = "Asynchronous I/O priority class",
.type = FIO_OPT_INT,
.off1 = offsetof(struct ioring_options,
- cmdprio.class[DDIR_READ]),
+ cmdprio_options.class[DDIR_READ]),
.off2 = offsetof(struct ioring_options,
- cmdprio.class[DDIR_WRITE]),
+ cmdprio_options.class[DDIR_WRITE]),
.help = "Set asynchronous IO priority class",
.minval = IOPRIO_MIN_PRIO_CLASS + 1,
.maxval = IOPRIO_MAX_PRIO_CLASS,
.lname = "Asynchronous I/O priority level",
.type = FIO_OPT_INT,
.off1 = offsetof(struct ioring_options,
- cmdprio.level[DDIR_READ]),
+ cmdprio_options.level[DDIR_READ]),
.off2 = offsetof(struct ioring_options,
- cmdprio.level[DDIR_WRITE]),
+ cmdprio_options.level[DDIR_WRITE]),
.help = "Set asynchronous IO priority level",
.minval = IOPRIO_MIN_PRIO,
.maxval = IOPRIO_MAX_PRIO,
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
+ {
+ .name = "cmdprio_bssplit",
+ .lname = "Priority percentage block size split",
+ .type = FIO_OPT_STR_STORE,
+ .off1 = offsetof(struct ioring_options,
+ cmdprio_options.bssplit_str),
+ .help = "Set priority percentages for different block sizes",
+ .category = FIO_OPT_C_ENGINE,
+ .group = FIO_OPT_G_IOURING,
+ },
#else
{
.name = "cmdprio_percentage",
.type = FIO_OPT_UNSUPPORTED,
.help = "Your platform does not support I/O priority classes",
},
+ {
+ .name = "cmdprio_bssplit",
+ .lname = "Priority percentage block size split",
+ .type = FIO_OPT_UNSUPPORTED,
+ .help = "Your platform does not support I/O priority classes",
+ },
#endif
{
.name = "fixedbufs",
sqe->rw_flags |= RWF_UNCACHED;
if (o->nowait)
sqe->rw_flags |= RWF_NOWAIT;
- if (ld->ioprio_class_set)
- sqe->ioprio = td->o.ioprio_class << 13;
- if (ld->ioprio_set)
- sqe->ioprio |= td->o.ioprio;
+
+ /*
+ * Since io_uring can have a submission context (sqthread_poll)
+ * that is different from the process context, we cannot rely on
+ * the IO priority set by ioprio_set() (option prio/prioclass)
+ * to be inherited.
+ * td->ioprio will have the value of the "default prio", so set
+ * this unconditionally. This value might get overridden by
+ * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
+ * cmdprio_bssplit is used.
+ */
+ sqe->ioprio = td->ioprio;
sqe->off = io_u->offset;
} else if (ddir_sync(io_u->ddir)) {
sqe->ioprio = 0;
return r < 0 ? r : events;
}
-static void fio_ioring_prio_prep(struct thread_data *td, struct io_u *io_u)
+static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
+ struct io_u *io_u)
{
- struct ioring_options *o = td->eo;
struct ioring_data *ld = td->io_ops_data;
- struct io_uring_sqe *sqe = &ld->sqes[io_u->index];
- struct cmdprio *cmdprio = &o->cmdprio;
- enum fio_ddir ddir = io_u->ddir;
- unsigned int p = cmdprio->percentage[ddir];
-
- if (p && rand_between(&td->prio_state, 0, 99) < p) {
- sqe->ioprio =
- ioprio_value(cmdprio->class[ddir], cmdprio->level[ddir]);
- io_u->flags |= IO_U_F_PRIORITY;
- } else {
- sqe->ioprio = 0;
- }
+ struct cmdprio *cmdprio = &ld->cmdprio;
+
+ if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
+ ld->sqes[io_u->index].ioprio = io_u->ioprio;
}
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
if (next_tail == atomic_load_acquire(ring->head))
return FIO_Q_BUSY;
- if (ld->use_cmdprio)
- fio_ioring_prio_prep(td, io_u);
+ if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
+ fio_ioring_cmdprio_prep(td, io_u);
+
ring->array[tail & ld->sq_ring_mask] = io_u->index;
atomic_store_release(ring->tail, next_tail);
if (!(td->flags & TD_F_CHILD))
fio_ioring_unmap(ld);
+ fio_cmdprio_cleanup(&ld->cmdprio);
free(ld->io_u_index);
free(ld->iovecs);
free(ld->fds);
}
}
+ /*
+ * Clamp CQ ring size at our SQ ring size, we don't need more entries
+ * than that.
+ */
+ p.flags |= IORING_SETUP_CQSIZE;
+ p.cq_entries = depth;
+
+retry:
ret = syscall(__NR_io_uring_setup, depth, &p);
- if (ret < 0)
+ if (ret < 0) {
+ if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
+ p.flags &= ~IORING_SETUP_CQSIZE;
+ goto retry;
+ }
return ret;
+ }
ld->ring_fd = ret;
{
struct ioring_options *o = td->eo;
struct ioring_data *ld;
- struct cmdprio *cmdprio = &o->cmdprio;
int ret;
/* sqthread submission requires registered files */
td->io_ops_data = ld;
- ret = fio_cmdprio_init(td, cmdprio, &ld->use_cmdprio);
+ ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
if (ret) {
td_verror(td, EINVAL, "fio_ioring_init");
return 1;
}
- if (fio_option_is_set(&td->o, ioprio_class))
- ld->ioprio_class_set = true;
- if (fio_option_is_set(&td->o, ioprio))
- ld->ioprio_set = true;
-
return 0;
}