test: fix t/run-fio-tests.py style issues identified by pylint
[fio.git] / engines / io_uring.c
index 94376efa7f79ad91894830250e059312f28ed798..c679177fb4765cd4cdf4777588af77f7d0794e5c 100644 (file)
@@ -433,6 +433,10 @@ static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
                ld->prepped = 0;
                sqe->flags |= IOSQE_ASYNC;
        }
+       if (o->fixedbufs) {
+               sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
+               sqe->buf_index = io_u->index;
+       }
 
        cmd = (struct nvme_uring_cmd *)sqe->cmd;
        return fio_nvme_uring_cmd_prep(cmd, io_u,
@@ -809,9 +813,30 @@ static int fio_ioring_queue_init(struct thread_data *td)
        p.flags |= IORING_SETUP_CQSIZE;
        p.cq_entries = depth;
 
+       /*
+        * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+        * completing IO operations.
+        */
+       p.flags |= IORING_SETUP_COOP_TASKRUN;
+
+       /*
+        * io_uring is always a single issuer, and we can defer task_work
+        * runs until we reap events.
+        */
+       p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
+
 retry:
        ret = syscall(__NR_io_uring_setup, depth, &p);
        if (ret < 0) {
+               if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
+                       p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
+                       p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
+                       goto retry;
+               }
+               if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+                       p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+                       goto retry;
+               }
                if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
                        p.flags &= ~IORING_SETUP_CQSIZE;
                        goto retry;