#include "../lib/types.h"
#include "../os/linux/io_uring.h"
#include "cmdprio.h"
+#include "zbd.h"
#include "nvme.h"
#include <sys/stat.h>
{
.name = "sqthread_poll",
.lname = "Kernel SQ thread polling",
- .type = FIO_OPT_INT,
+ .type = FIO_OPT_STR_SET,
.off1 = offsetof(struct ioring_options, sqpoll_thread),
.help = "Offload submission/completion to kernel thread",
.category = FIO_OPT_C_ENGINE,
if (o->cmd_type != FIO_URING_CMD_NVME)
return -EINVAL;
+ if (io_u->ddir == DDIR_TRIM)
+ return 0;
+
sqe = &ld->sqes[(io_u->index) << 1];
if (o->registerfiles) {
ld->prepped = 0;
sqe->flags |= IOSQE_ASYNC;
}
+ if (o->fixedbufs) {
+ sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
+ sqe->buf_index = io_u->index;
+ }
cmd = (struct nvme_uring_cmd *)sqe->cmd;
return fio_nvme_uring_cmd_prep(cmd, io_u,
struct io_uring_cqe *cqe;
struct io_u *io_u;
unsigned index;
- static int eio;
index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
cqe = &ld->cq_ring.cqes[index];
io_u = (struct io_u *) (uintptr_t) cqe->user_data;
- if (eio++ == 5) {
- printf("mark EIO\n");
- cqe->res = -EIO;
- }
-
if (cqe->res != io_u->xfer_buflen) {
if (cqe->res > io_u->xfer_buflen)
io_u->error = -cqe->res;
r = fio_ioring_cqring_reap(td, events, max);
if (r) {
events += r;
+ max -= r;
if (actual_min != 0)
actual_min -= r;
continue;
ld->sqes[io_u->index].ioprio = io_u->ioprio;
}
+static int fio_ioring_cmd_io_u_trim(const struct thread_data *td,
+ struct io_u *io_u)
+{
+ struct fio_file *f = io_u->file;
+ int ret;
+
+ if (td->o.zone_mode == ZONE_MODE_ZBD) {
+ ret = zbd_do_io_u_trim(td, io_u);
+ if (ret == io_u_completed)
+ return io_u->xfer_buflen;
+ if (ret)
+ goto err;
+ }
+
+ return fio_nvme_trim(td, f, io_u->offset, io_u->xfer_buflen);
+
+err:
+ io_u->error = ret;
+ return 0;
+}
+
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
struct io_u *io_u)
{
if (ld->queued)
return FIO_Q_BUSY;
- do_io_u_trim(td, io_u);
+ if (!strcmp(td->io_ops->name, "io_uring_cmd"))
+ fio_ioring_cmd_io_u_trim(td, io_u);
+ else
+ do_io_u_trim(td, io_u);
+
io_u_mark_submit(td, 1);
io_u_mark_complete(td, 1);
return FIO_Q_COMPLETED;
*/
if (o->sqpoll_thread) {
struct io_sq_ring *ring = &ld->sq_ring;
+ unsigned start = *ld->sq_ring.head;
unsigned flags;
flags = atomic_load_acquire(ring->flags);
if (flags & IORING_SQ_NEED_WAKEUP)
io_uring_enter(ld, ld->queued, 0,
IORING_ENTER_SQ_WAKEUP);
+ fio_ioring_queued(td, start, ld->queued);
+ io_u_mark_submit(td, ld->queued);
+
ld->queued = 0;
return 0;
}
/* default to off, as that's always safe */
o->nonvectored = 0;
- p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
+ p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
if (!p)
return;
- memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
ret = syscall(__NR_io_uring_register, ld->ring_fd,
IORING_REGISTER_PROBE, p, 256);
if (ret < 0)
p.flags |= IORING_SETUP_SQ_AFF;
p.sq_thread_cpu = o->sqpoll_cpu;
}
+
+ /*
+ * Submission latency for sqpoll_thread is just the time it
+ * takes to fill in the SQ ring entries, and any syscall if
+ * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
+ * separately.
+ */
+ td->o.disable_slat = 1;
}
/*
p.flags |= IORING_SETUP_CQSIZE;
p.cq_entries = depth;
+ /*
+ * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+ * completing IO operations.
+ */
+ p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+ /*
+ * io_uring is always a single issuer, and we can defer task_work
+ * runs until we reap events.
+ */
+ p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
retry:
ret = syscall(__NR_io_uring_setup, depth, &p);
if (ret < 0) {
+ if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+ p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+ p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+ goto retry;
+ }
+ if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+ p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+ goto retry;
+ }
if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
p.flags &= ~IORING_SETUP_CQSIZE;
goto retry;
p.flags |= IORING_SETUP_SQ_AFF;
p.sq_thread_cpu = o->sqpoll_cpu;
}
+
+ /*
+ * Submission latency for sqpoll_thread is just the time it
+ * takes to fill in the SQ ring entries, and any syscall if
+ * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
+ * separately.
+ */
+ td->o.disable_slat = 1;
}
if (o->cmd_type == FIO_URING_CMD_NVME) {
p.flags |= IORING_SETUP_SQE128;
p.flags |= IORING_SETUP_CQSIZE;
p.cq_entries = depth;
+ /*
+ * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+ * completing IO operations.
+ */
+ p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+ /*
+ * io_uring is always a single issuer, and we can defer task_work
+ * runs until we reap events.
+ */
+ p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
retry:
ret = syscall(__NR_io_uring_setup, depth, &p);
if (ret < 0) {
+ if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+ p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+ p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+ goto retry;
+ }
+ if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+ p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+ goto retry;
+ }
if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
p.flags &= ~IORING_SETUP_CQSIZE;
goto retry;
if (o->cmd_type == FIO_URING_CMD_NVME) {
struct nvme_data *data = NULL;
unsigned int nsid, lba_size = 0;
- unsigned long long nlba = 0;
+ __u32 ms = 0;
+ __u64 nlba = 0;
int ret;
/* Store the namespace-id and lba size. */
data = FILE_ENG_DATA(f);
if (data == NULL) {
- ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
+ ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
if (ret)
return ret;
data = calloc(1, sizeof(struct nvme_data));
data->nsid = nsid;
- data->lba_shift = ilog2(lba_size);
+ if (ms)
+ data->lba_ext = lba_size + ms;
+ else
+ data->lba_shift = ilog2(lba_size);
FILE_SET_ENG_DATA(f, data);
}
+
+ lba_size = data->lba_ext ? data->lba_ext : (1 << data->lba_shift);
+
+ for_each_rw_ddir(ddir) {
+ if (td->o.min_bs[ddir] % lba_size ||
+ td->o.max_bs[ddir] % lba_size) {
+ if (data->lba_ext)
+ log_err("block size must be a multiple of "
+ "(LBA data size + Metadata size)\n");
+ else
+ log_err("block size must be a multiple of LBA data size\n");
+ return 1;
+ }
+ }
}
if (!ld || !o->registerfiles)
return generic_open_file(td, f);
if (o->cmd_type == FIO_URING_CMD_NVME) {
struct nvme_data *data = NULL;
unsigned int nsid, lba_size = 0;
- unsigned long long nlba = 0;
+ __u32 ms = 0;
+ __u64 nlba = 0;
int ret;
- ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
+ ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
if (ret)
return ret;
data = calloc(1, sizeof(struct nvme_data));
data->nsid = nsid;
- data->lba_shift = ilog2(lba_size);
+ if (ms)
+ data->lba_ext = lba_size + ms;
+ else
+ data->lba_shift = ilog2(lba_size);
f->real_file_size = lba_size * nlba;
fio_file_set_size_known(f);
return fio_nvme_get_max_open_zones(td, f, max_open_zones);
}
+static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
+ struct fio_ruhs_info *fruhs_info)
+{
+ struct nvme_fdp_ruh_status *ruhs;
+ int bytes, ret, i;
+
+ bytes = sizeof(*ruhs) + 128 * sizeof(struct nvme_fdp_ruh_status_desc);
+ ruhs = scalloc(1, bytes);
+ if (!ruhs)
+ return -ENOMEM;
+
+ ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
+ if (ret)
+ goto free;
+
+ fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
+ for (i = 0; i < fruhs_info->nr_ruhs; i++)
+ fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
+free:
+ sfree(ruhs);
+ return ret;
+}
+
static struct ioengine_ops ioengine_uring = {
.name = "io_uring",
.version = FIO_IOOPS_VERSION,
.get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
.options = options,
.option_struct_size = sizeof(struct ioring_options),
+ .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
};
static void fio_init fio_ioring_register(void)