engines/libaio: fix io_getevents min/max events arguments
[fio.git] / engines / io_uring.c
index cffc73710ddd6654399ab051dd929aec02b1e646..f10a45933ff5bb1877b072538547ee2e637a69d5 100644 (file)
@@ -24,6 +24,7 @@
 #include "../lib/types.h"
 #include "../os/linux/io_uring.h"
 #include "cmdprio.h"
+#include "zbd.h"
 #include "nvme.h"
 
 #include <sys/stat.h>
@@ -226,7 +227,7 @@ static struct fio_option options[] = {
        {
                .name   = "sqthread_poll",
                .lname  = "Kernel SQ thread polling",
-               .type   = FIO_OPT_INT,
+               .type   = FIO_OPT_STR_SET,
                .off1   = offsetof(struct ioring_options, sqpoll_thread),
                .help   = "Offload submission/completion to kernel thread",
                .category = FIO_OPT_C_ENGINE,
@@ -409,6 +410,9 @@ static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
        if (o->cmd_type != FIO_URING_CMD_NVME)
                return -EINVAL;
 
+       if (io_u->ddir == DDIR_TRIM)
+               return 0;
+
        sqe = &ld->sqes[(io_u->index) << 1];
 
        if (o->registerfiles) {
@@ -433,6 +437,10 @@ static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
                ld->prepped = 0;
                sqe->flags |= IOSQE_ASYNC;
        }
+       if (o->fixedbufs) {
+               sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
+               sqe->buf_index = io_u->index;
+       }
 
        cmd = (struct nvme_uring_cmd *)sqe->cmd;
        return fio_nvme_uring_cmd_prep(cmd, io_u,
@@ -532,6 +540,7 @@ static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
                        if (r < 0) {
                                if (errno == EAGAIN || errno == EINTR)
                                        continue;
+                               r = -errno;
                                td_verror(td, errno, "io_uring_enter");
                                break;
                        }
@@ -551,6 +560,27 @@ static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
                ld->sqes[io_u->index].ioprio = io_u->ioprio;
 }
 
+static int fio_ioring_cmd_io_u_trim(const struct thread_data *td,
+                                   struct io_u *io_u)
+{
+       struct fio_file *f = io_u->file;
+       int ret;
+
+       if (td->o.zone_mode == ZONE_MODE_ZBD) {
+               ret = zbd_do_io_u_trim(td, io_u);
+               if (ret == io_u_completed)
+                       return io_u->xfer_buflen;
+               if (ret)
+                       goto err;
+       }
+
+       return fio_nvme_trim(td, f, io_u->offset, io_u->xfer_buflen);
+
+err:
+       io_u->error = ret;
+       return 0;
+}
+
 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
                                          struct io_u *io_u)
 {
@@ -567,7 +597,11 @@ static enum fio_q_status fio_ioring_queue(struct thread_data *td,
                if (ld->queued)
                        return FIO_Q_BUSY;
 
-               do_io_u_trim(td, io_u);
+               if (!strcmp(td->io_ops->name, "io_uring_cmd"))
+                       fio_ioring_cmd_io_u_trim(td, io_u);
+               else
+                       do_io_u_trim(td, io_u);
+
                io_u_mark_submit(td, 1);
                io_u_mark_complete(td, 1);
                return FIO_Q_COMPLETED;
@@ -632,12 +666,16 @@ static int fio_ioring_commit(struct thread_data *td)
         */
        if (o->sqpoll_thread) {
                struct io_sq_ring *ring = &ld->sq_ring;
+               unsigned start = *ld->sq_ring.head;
                unsigned flags;
 
                flags = atomic_load_acquire(ring->flags);
                if (flags & IORING_SQ_NEED_WAKEUP)
                        io_uring_enter(ld, ld->queued, 0,
                                        IORING_ENTER_SQ_WAKEUP);
+               fio_ioring_queued(td, start, ld->queued);
+               io_u_mark_submit(td, ld->queued);
+
                ld->queued = 0;
                return 0;
        }
@@ -665,6 +703,7 @@ static int fio_ioring_commit(struct thread_data *td)
                                usleep(1);
                                continue;
                        }
+                       ret = -errno;
                        td_verror(td, errno, "io_uring_enter submit");
                        break;
                }
@@ -798,6 +837,14 @@ static int fio_ioring_queue_init(struct thread_data *td)
                        p.flags |= IORING_SETUP_SQ_AFF;
                        p.sq_thread_cpu = o->sqpoll_cpu;
                }
+
+               /*
+                * Submission latency for sqpoll_thread is just the time it
+                * takes to fill in the SQ ring entries, and any syscall if
+                * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
+                * separately.
+                */
+               td->o.disable_slat = 1;
        }
 
        /*
@@ -807,9 +854,30 @@ static int fio_ioring_queue_init(struct thread_data *td)
        p.flags |= IORING_SETUP_CQSIZE;
        p.cq_entries = depth;
 
+       /*
+        * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+        * completing IO operations.
+        */
+       p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+       /*
+        * io_uring is always a single issuer, and we can defer task_work
+        * runs until we reap events.
+        */
+       p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
 retry:
        ret = syscall(__NR_io_uring_setup, depth, &p);
        if (ret < 0) {
+               if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+                       p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+                       p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+                       goto retry;
+               }
+               if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+                       p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+                       goto retry;
+               }
                if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
                        p.flags &= ~IORING_SETUP_CQSIZE;
                        goto retry;
@@ -849,6 +917,14 @@ static int fio_ioring_cmd_queue_init(struct thread_data *td)
                        p.flags |= IORING_SETUP_SQ_AFF;
                        p.sq_thread_cpu = o->sqpoll_cpu;
                }
+
+               /*
+                * Submission latency for sqpoll_thread is just the time it
+                * takes to fill in the SQ ring entries, and any syscall if
+                * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
+                * separately.
+                */
+               td->o.disable_slat = 1;
        }
        if (o->cmd_type == FIO_URING_CMD_NVME) {
                p.flags |= IORING_SETUP_SQE128;
@@ -862,9 +938,30 @@ static int fio_ioring_cmd_queue_init(struct thread_data *td)
        p.flags |= IORING_SETUP_CQSIZE;
        p.cq_entries = depth;
 
+       /*
+        * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+        * completing IO operations.
+        */
+       p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+       /*
+        * io_uring is always a single issuer, and we can defer task_work
+        * runs until we reap events.
+        */
+       p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
 retry:
        ret = syscall(__NR_io_uring_setup, depth, &p);
        if (ret < 0) {
+               if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+                       p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+                       p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+                       goto retry;
+               }
+               if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+                       p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+                       goto retry;
+               }
                if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
                        p.flags &= ~IORING_SETUP_CQSIZE;
                        goto retry;
@@ -1080,7 +1177,7 @@ static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
        if (o->cmd_type == FIO_URING_CMD_NVME) {
                struct nvme_data *data = NULL;
                unsigned int nsid, lba_size = 0;
-               unsigned long long nlba = 0;
+               __u64 nlba = 0;
                int ret;
 
                /* Store the namespace-id and lba size. */
@@ -1146,7 +1243,7 @@ static int fio_ioring_cmd_get_file_size(struct thread_data *td,
        if (o->cmd_type == FIO_URING_CMD_NVME) {
                struct nvme_data *data = NULL;
                unsigned int nsid, lba_size = 0;
-               unsigned long long nlba = 0;
+               __u64 nlba = 0;
                int ret;
 
                ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
@@ -1194,6 +1291,29 @@ static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
        return fio_nvme_get_max_open_zones(td, f, max_open_zones);
 }
 
+static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
+                                    struct fio_ruhs_info *fruhs_info)
+{
+       struct nvme_fdp_ruh_status *ruhs;
+       int bytes, ret, i;
+
+       bytes = sizeof(*ruhs) + 128 * sizeof(struct nvme_fdp_ruh_status_desc);
+       ruhs = scalloc(1, bytes);
+       if (!ruhs)
+               return -ENOMEM;
+
+       ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
+       if (ret)
+               goto free;
+
+       fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
+       for (i = 0; i < fruhs_info->nr_ruhs; i++)
+               fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
+free:
+       sfree(ruhs);
+       return ret;
+}
+
 static struct ioengine_ops ioengine_uring = {
        .name                   = "io_uring",
        .version                = FIO_IOOPS_VERSION,
@@ -1239,6 +1359,7 @@ static struct ioengine_ops ioengine_uring_cmd = {
        .get_max_open_zones     = fio_ioring_cmd_get_max_open_zones,
        .options                = options,
        .option_struct_size     = sizeof(struct ioring_options),
+       .fdp_fetch_ruhs         = fio_ioring_cmd_fetch_ruhs,
 };
 
 static void fio_init fio_ioring_register(void)