struct ioring_data *ld = td->io_ops_data;
struct ioring_options *o = td->eo;
struct fio_file *f = io_u->file;
+ struct nvme_uring_cmd *cmd;
struct io_uring_sqe *sqe;
- int ret;
- /* nvme_uring_cmd case */
- if (o->cmd_type == FIO_URING_CMD_NVME) {
- struct nvme_uring_cmd *cmd;
+ /* only supports nvme_uring_cmd */
+ if (o->cmd_type != FIO_URING_CMD_NVME)
+ return -EINVAL;
- sqe = &ld->sqes[(io_u->index) << 1];
+ sqe = &ld->sqes[(io_u->index) << 1];
- if (o->registerfiles) {
- sqe->fd = f->engine_pos;
- sqe->flags = IOSQE_FIXED_FILE;
- } else {
- sqe->fd = f->fd;
- }
- sqe->rw_flags = 0;
- if (!td->o.odirect && o->uncached)
- sqe->rw_flags |= RWF_UNCACHED;
- if (o->nowait)
- sqe->rw_flags |= RWF_NOWAIT;
-
- sqe->opcode = IORING_OP_URING_CMD;
- sqe->user_data = (unsigned long) io_u;
- if (o->nonvectored)
- sqe->cmd_op = NVME_URING_CMD_IO;
- else
- sqe->cmd_op = NVME_URING_CMD_IO_VEC;
- if (o->force_async && ++ld->prepped == o->force_async) {
- ld->prepped = 0;
- sqe->flags |= IOSQE_ASYNC;
- }
-
- cmd = (struct nvme_uring_cmd *)sqe->cmd;
- ret = fio_nvme_uring_cmd_prep(cmd, io_u,
- o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
+ if (o->registerfiles) {
+ sqe->fd = f->engine_pos;
+ sqe->flags = IOSQE_FIXED_FILE;
+ } else {
+ sqe->fd = f->fd;
+ }
+ sqe->rw_flags = 0;
+ if (!td->o.odirect && o->uncached)
+ sqe->rw_flags |= RWF_UNCACHED;
+ if (o->nowait)
+ sqe->rw_flags |= RWF_NOWAIT;
- return ret;
+ sqe->opcode = IORING_OP_URING_CMD;
+ sqe->user_data = (unsigned long) io_u;
+ if (o->nonvectored)
+ sqe->cmd_op = NVME_URING_CMD_IO;
+ else
+ sqe->cmd_op = NVME_URING_CMD_IO_VEC;
+ if (o->force_async && ++ld->prepped == o->force_async) {
+ ld->prepped = 0;
+ sqe->flags |= IOSQE_ASYNC;
}
- return -EINVAL;
+
+ cmd = (struct nvme_uring_cmd *)sqe->cmd;
+ return fio_nvme_uring_cmd_prep(cmd, io_u,
+ o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
}
static struct io_u *fio_ioring_event(struct thread_data *td, int event)
if (r < 0) {
if (errno == EAGAIN || errno == EINTR)
continue;
+ r = -errno;
td_verror(td, errno, "io_uring_enter");
break;
}
start++;
}
+
+ /*
+ * only used for iolog
+ */
+ if (td->o.read_iolog_file)
+ memcpy(&td->last_issue, &now, sizeof(now));
}
static int fio_ioring_commit(struct thread_data *td)
usleep(1);
continue;
}
+ ret = -errno;
td_verror(td, errno, "io_uring_enter submit");
break;
}
p.flags |= IORING_SETUP_CQSIZE;
p.cq_entries = depth;
+ /*
+ * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
+ * completing IO operations.
+ */
+ p.flags |= IORING_SETUP_COOP_TASKRUN;
+
retry:
ret = syscall(__NR_io_uring_setup, depth, &p);
if (ret < 0) {
+ if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
+ p.flags &= ~IORING_SETUP_COOP_TASKRUN;
+ goto retry;
+ }
if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
p.flags &= ~IORING_SETUP_CQSIZE;
goto retry;
static struct ioengine_ops ioengine_uring = {
.name = "io_uring",
.version = FIO_IOOPS_VERSION,
- .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD,
+ .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
+ FIO_ASYNCIO_SETS_ISSUE_TIME,
.init = fio_ioring_init,
.post_init = fio_ioring_post_init,
.io_u_init = fio_ioring_io_u_init,
static struct ioengine_ops ioengine_uring_cmd = {
.name = "io_uring_cmd",
.version = FIO_IOOPS_VERSION,
- .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO,
+ .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
+ FIO_MEMALIGN | FIO_RAWIO |
+ FIO_ASYNCIO_SETS_ISSUE_TIME,
.init = fio_ioring_init,
.post_init = fio_ioring_cmd_post_init,
.io_u_init = fio_ioring_io_u_init,