#include "../lib/types.h"
#include "../os/linux/io_uring.h"
#include "cmdprio.h"
+#include "zbd.h"
#include "nvme.h"
#include <sys/stat.h>
struct ioring_mmap mmap[3];
struct cmdprio cmdprio;
+
+ struct nvme_dsm_range *dsm;
};
struct ioring_options {
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
-#ifdef FIO_HAVE_IOPRIO_CLASS
- {
- .name = "cmdprio_percentage",
- .lname = "high priority percentage",
- .type = FIO_OPT_INT,
- .off1 = offsetof(struct ioring_options,
- cmdprio_options.percentage[DDIR_READ]),
- .off2 = offsetof(struct ioring_options,
- cmdprio_options.percentage[DDIR_WRITE]),
- .minval = 0,
- .maxval = 100,
- .help = "Send high priority I/O this percentage of the time",
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_IOURING,
- },
- {
- .name = "cmdprio_class",
- .lname = "Asynchronous I/O priority class",
- .type = FIO_OPT_INT,
- .off1 = offsetof(struct ioring_options,
- cmdprio_options.class[DDIR_READ]),
- .off2 = offsetof(struct ioring_options,
- cmdprio_options.class[DDIR_WRITE]),
- .help = "Set asynchronous IO priority class",
- .minval = IOPRIO_MIN_PRIO_CLASS + 1,
- .maxval = IOPRIO_MAX_PRIO_CLASS,
- .interval = 1,
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_IOURING,
- },
- {
- .name = "cmdprio",
- .lname = "Asynchronous I/O priority level",
- .type = FIO_OPT_INT,
- .off1 = offsetof(struct ioring_options,
- cmdprio_options.level[DDIR_READ]),
- .off2 = offsetof(struct ioring_options,
- cmdprio_options.level[DDIR_WRITE]),
- .help = "Set asynchronous IO priority level",
- .minval = IOPRIO_MIN_PRIO,
- .maxval = IOPRIO_MAX_PRIO,
- .interval = 1,
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_IOURING,
- },
- {
- .name = "cmdprio_bssplit",
- .lname = "Priority percentage block size split",
- .type = FIO_OPT_STR_STORE,
- .off1 = offsetof(struct ioring_options,
- cmdprio_options.bssplit_str),
- .help = "Set priority percentages for different block sizes",
- .category = FIO_OPT_C_ENGINE,
- .group = FIO_OPT_G_IOURING,
- },
-#else
- {
- .name = "cmdprio_percentage",
- .lname = "high priority percentage",
- .type = FIO_OPT_UNSUPPORTED,
- .help = "Your platform does not support I/O priority classes",
- },
- {
- .name = "cmdprio_class",
- .lname = "Asynchronous I/O priority class",
- .type = FIO_OPT_UNSUPPORTED,
- .help = "Your platform does not support I/O priority classes",
- },
- {
- .name = "cmdprio",
- .lname = "Asynchronous I/O priority level",
- .type = FIO_OPT_UNSUPPORTED,
- .help = "Your platform does not support I/O priority classes",
- },
- {
- .name = "cmdprio_bssplit",
- .lname = "Priority percentage block size split",
- .type = FIO_OPT_UNSUPPORTED,
- .help = "Your platform does not support I/O priority classes",
- },
-#endif
{
.name = "fixedbufs",
.lname = "Fixed (pre-mapped) IO buffers",
{
.name = "sqthread_poll",
.lname = "Kernel SQ thread polling",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_STR_SET,
.off1 = offsetof(struct ioring_options, sqpoll_thread),
.help = "Offload submission/completion to kernel thread",
.category = FIO_OPT_C_ENGINE,
.category = FIO_OPT_C_ENGINE,
.group = FIO_OPT_G_IOURING,
},
+ CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
{
.name = NULL,
},
/*
* Since io_uring can have a submission context (sqthread_poll)
* that is different from the process context, we cannot rely on
- * the IO priority set by ioprio_set() (option prio/prioclass)
- * to be inherited.
+ * the IO priority set by ioprio_set() (options prio, prioclass,
+ * and priohint) to be inherited.
* td->ioprio will have the value of the "default prio", so set
* this unconditionally. This value might get overridden by
* fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
if (o->cmd_type != FIO_URING_CMD_NVME)
return -EINVAL;
+ if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
+ return 0;
+
sqe = &ld->sqes[(io_u->index) << 1];
if (o->registerfiles) {
ld->prepped = 0;
sqe->flags |= IOSQE_ASYNC;
}
+ if (o->fixedbufs) {
+ sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
+ sqe->buf_index = io_u->index;
+ }
cmd = (struct nvme_uring_cmd *)sqe->cmd;
return fio_nvme_uring_cmd_prep(cmd, io_u,
- o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
+ o->nonvectored ? NULL : &ld->iovecs[io_u->index],
+ &ld->dsm[io_u->index]);
}
static struct io_u *fio_ioring_event(struct thread_data *td, int event)
r = fio_ioring_cqring_reap(td, events, max);
if (r) {
events += r;
+ max -= r;
if (actual_min != 0)
actual_min -= r;
continue;
if (ld->queued == ld->iodepth)
return FIO_Q_BUSY;
- if (io_u->ddir == DDIR_TRIM) {
+ if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
if (ld->queued)
return FIO_Q_BUSY;
do_io_u_trim(td, io_u);
+
io_u_mark_submit(td, 1);
io_u_mark_complete(td, 1);
return FIO_Q_COMPLETED;
*/
if (o->sqpoll_thread) {
struct io_sq_ring *ring = &ld->sq_ring;
+ unsigned start = *ld->sq_ring.tail - ld->queued;
unsigned flags;
flags = atomic_load_acquire(ring->flags);
if (flags & IORING_SQ_NEED_WAKEUP)
io_uring_enter(ld, ld->queued, 0,
IORING_ENTER_SQ_WAKEUP);
+ fio_ioring_queued(td, start, ld->queued);
+ io_u_mark_submit(td, ld->queued);
+
ld->queued = 0;
return 0;
}
free(ld->io_u_index);
free(ld->iovecs);
free(ld->fds);
+ free(ld->dsm);
free(ld);
}
}
/* default to off, as that's always safe */
o->nonvectored = 0;
- p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
if (!p)
return;
- memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
ret = syscall(__NR_io_uring_register, ld->ring_fd,
IORING_REGISTER_PROBE, p, 256);
if (ret < 0)
p.flags |= IORING_SETUP_SQ_AFF;
p.sq_thread_cpu = o->sqpoll_cpu;
}
+
+ /*
+ * Submission latency for sqpoll_thread is just the time it
+ * takes to fill in the SQ ring entries, and any syscall if
+ * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
+ * separately.
+ */
+ td->o.disable_slat = 1;
}
/*
p.flags |= IORING_SETUP_CQSIZE;
p.cq_entries = depth;
+ /*
+ * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+ * completing IO operations.
+ */
+ p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+ /*
+ * io_uring is always a single issuer, and we can defer task_work
+ * runs until we reap events.
+ */
+ p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
retry:
ret = syscall(__NR_io_uring_setup, depth, &p);
if (ret < 0) {
+ if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+ p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+ p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+ goto retry;
+ }
+ if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+ p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+ goto retry;
+ }
if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
p.flags &= ~IORING_SETUP_CQSIZE;
goto retry;
p.flags |= IORING_SETUP_SQ_AFF;
p.sq_thread_cpu = o->sqpoll_cpu;
}
+
+ /*
+ * Submission latency for sqpoll_thread is just the time it
+ * takes to fill in the SQ ring entries, and any syscall if
+ * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
+ * separately.
+ */
+ td->o.disable_slat = 1;
}
if (o->cmd_type == FIO_URING_CMD_NVME) {
p.flags |= IORING_SETUP_SQE128;
p.flags |= IORING_SETUP_CQSIZE;
p.cq_entries = depth;
+ /*
+ * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+ * completing IO operations.
+ */
+ p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+ /*
+ * io_uring is always a single issuer, and we can defer task_work
+ * runs until we reap events.
+ */
+ p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
retry:
ret = syscall(__NR_io_uring_setup, depth, &p);
if (ret < 0) {
+ if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+ p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+ p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+ goto retry;
+ }
+ if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+ p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+ goto retry;
+ }
if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
p.flags &= ~IORING_SETUP_CQSIZE;
goto retry;
return 1;
}
+ /*
+ * For io_uring_cmd, trims are async operations unless we are operating
+ * in zbd mode where trim means zone reset.
+ */
+ if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
+ td->o.zone_mode == ZONE_MODE_ZBD)
+ td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
+ else
+ ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
+
return 0;
}
if (o->cmd_type == FIO_URING_CMD_NVME) {
struct nvme_data *data = NULL;
unsigned int nsid, lba_size = 0;
- unsigned long long nlba = 0;
+ __u32 ms = 0;
+ __u64 nlba = 0;
int ret;
/* Store the namespace-id and lba size. */
data = FILE_ENG_DATA(f);
if (data == NULL) {
- ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
+ ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
if (ret)
return ret;
data = calloc(1, sizeof(struct nvme_data));
data->nsid = nsid;
- data->lba_shift = ilog2(lba_size);
+ if (ms)
+ data->lba_ext = lba_size + ms;
+ else
+ data->lba_shift = ilog2(lba_size);
FILE_SET_ENG_DATA(f, data);
}
+
+ assert(data->lba_shift < 32);
+ lba_size = data->lba_ext ? data->lba_ext : (1U << data->lba_shift);
+
+ for_each_rw_ddir(ddir) {
+ if (td->o.min_bs[ddir] % lba_size ||
+ td->o.max_bs[ddir] % lba_size) {
+ if (data->lba_ext)
+ log_err("block size must be a multiple of "
+ "(LBA data size + Metadata size)\n");
+ else
+ log_err("block size must be a multiple of LBA data size\n");
+ return 1;
+ }
+ }
}
if (!ld || !o->registerfiles)
return generic_open_file(td, f);
if (o->cmd_type == FIO_URING_CMD_NVME) {
struct nvme_data *data = NULL;
unsigned int nsid, lba_size = 0;
- unsigned long long nlba = 0;
+ __u32 ms = 0;
+ __u64 nlba = 0;
int ret;
- ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
+ ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
if (ret)
return ret;
data = calloc(1, sizeof(struct nvme_data));
data->nsid = nsid;
- data->lba_shift = ilog2(lba_size);
+ if (ms)
+ data->lba_ext = lba_size + ms;
+ else
+ data->lba_shift = ilog2(lba_size);
f->real_file_size = lba_size * nlba;
fio_file_set_size_known(f);
return fio_nvme_get_max_open_zones(td, f, max_open_zones);
}
+static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
+ struct fio_ruhs_info *fruhs_info)
+{
+ struct nvme_fdp_ruh_status *ruhs;
+ int bytes, ret, i;
+
+ bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
+ ruhs = scalloc(1, bytes);
+ if (!ruhs)
+ return -ENOMEM;
+
+ ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
+ if (ret)
+ goto free;
+
+ fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
+ for (i = 0; i < fruhs_info->nr_ruhs; i++)
+ fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
+free:
+ sfree(ruhs);
+ return ret;
+}
+
static struct ioengine_ops ioengine_uring = {
.name = "io_uring",
.version = FIO_IOOPS_VERSION,
static struct ioengine_ops ioengine_uring_cmd = {
.name = "io_uring_cmd",
.version = FIO_IOOPS_VERSION,
- .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
- FIO_MEMALIGN | FIO_RAWIO |
+ .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
FIO_ASYNCIO_SETS_ISSUE_TIME,
.init = fio_ioring_init,
.post_init = fio_ioring_cmd_post_init,
.get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
.options = options,
.option_struct_size = sizeof(struct ioring_options),
+ .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
};
static void fio_init fio_ioring_register(void)