4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
32 FIO_URING_CMD_NVME = 1,
39 unsigned *ring_entries;
48 unsigned *ring_entries;
49 struct io_uring_cqe *cqes;
60 struct io_u **io_u_index;
64 struct io_sq_ring sq_ring;
65 struct io_uring_sqe *sqes;
67 unsigned sq_ring_mask;
69 struct io_cq_ring cq_ring;
70 unsigned cq_ring_mask;
77 struct ioring_mmap mmap[3];
79 struct cmdprio cmdprio;
82 struct ioring_options {
83 struct thread_data *td;
85 struct cmdprio_options cmdprio_options;
86 unsigned int fixedbufs;
87 unsigned int registerfiles;
88 unsigned int sqpoll_thread;
89 unsigned int sqpoll_set;
90 unsigned int sqpoll_cpu;
91 unsigned int nonvectored;
92 unsigned int uncached;
94 unsigned int force_async;
95 enum uring_cmd_type cmd_type;
98 static const int ddir_to_op[2][2] = {
99 { IORING_OP_READV, IORING_OP_READ },
100 { IORING_OP_WRITEV, IORING_OP_WRITE }
103 static const int fixed_ddir_to_op[2] = {
104 IORING_OP_READ_FIXED,
105 IORING_OP_WRITE_FIXED
108 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
110 struct ioring_options *o = data;
112 o->sqpoll_cpu = *val;
117 static struct fio_option options[] = {
120 .lname = "High Priority",
121 .type = FIO_OPT_STR_SET,
122 .off1 = offsetof(struct ioring_options, hipri),
123 .help = "Use polled IO completions",
124 .category = FIO_OPT_C_ENGINE,
125 .group = FIO_OPT_G_IOURING,
127 #ifdef FIO_HAVE_IOPRIO_CLASS
129 .name = "cmdprio_percentage",
130 .lname = "high priority percentage",
132 .off1 = offsetof(struct ioring_options,
133 cmdprio_options.percentage[DDIR_READ]),
134 .off2 = offsetof(struct ioring_options,
135 cmdprio_options.percentage[DDIR_WRITE]),
138 .help = "Send high priority I/O this percentage of the time",
139 .category = FIO_OPT_C_ENGINE,
140 .group = FIO_OPT_G_IOURING,
143 .name = "cmdprio_class",
144 .lname = "Asynchronous I/O priority class",
146 .off1 = offsetof(struct ioring_options,
147 cmdprio_options.class[DDIR_READ]),
148 .off2 = offsetof(struct ioring_options,
149 cmdprio_options.class[DDIR_WRITE]),
150 .help = "Set asynchronous IO priority class",
151 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
152 .maxval = IOPRIO_MAX_PRIO_CLASS,
154 .category = FIO_OPT_C_ENGINE,
155 .group = FIO_OPT_G_IOURING,
159 .lname = "Asynchronous I/O priority level",
161 .off1 = offsetof(struct ioring_options,
162 cmdprio_options.level[DDIR_READ]),
163 .off2 = offsetof(struct ioring_options,
164 cmdprio_options.level[DDIR_WRITE]),
165 .help = "Set asynchronous IO priority level",
166 .minval = IOPRIO_MIN_PRIO,
167 .maxval = IOPRIO_MAX_PRIO,
169 .category = FIO_OPT_C_ENGINE,
170 .group = FIO_OPT_G_IOURING,
173 .name = "cmdprio_bssplit",
174 .lname = "Priority percentage block size split",
175 .type = FIO_OPT_STR_STORE,
176 .off1 = offsetof(struct ioring_options,
177 cmdprio_options.bssplit_str),
178 .help = "Set priority percentages for different block sizes",
179 .category = FIO_OPT_C_ENGINE,
180 .group = FIO_OPT_G_IOURING,
184 .name = "cmdprio_percentage",
185 .lname = "high priority percentage",
186 .type = FIO_OPT_UNSUPPORTED,
187 .help = "Your platform does not support I/O priority classes",
190 .name = "cmdprio_class",
191 .lname = "Asynchronous I/O priority class",
192 .type = FIO_OPT_UNSUPPORTED,
193 .help = "Your platform does not support I/O priority classes",
197 .lname = "Asynchronous I/O priority level",
198 .type = FIO_OPT_UNSUPPORTED,
199 .help = "Your platform does not support I/O priority classes",
202 .name = "cmdprio_bssplit",
203 .lname = "Priority percentage block size split",
204 .type = FIO_OPT_UNSUPPORTED,
205 .help = "Your platform does not support I/O priority classes",
210 .lname = "Fixed (pre-mapped) IO buffers",
211 .type = FIO_OPT_STR_SET,
212 .off1 = offsetof(struct ioring_options, fixedbufs),
213 .help = "Pre map IO buffers",
214 .category = FIO_OPT_C_ENGINE,
215 .group = FIO_OPT_G_IOURING,
218 .name = "registerfiles",
219 .lname = "Register file set",
220 .type = FIO_OPT_STR_SET,
221 .off1 = offsetof(struct ioring_options, registerfiles),
222 .help = "Pre-open/register files",
223 .category = FIO_OPT_C_ENGINE,
224 .group = FIO_OPT_G_IOURING,
227 .name = "sqthread_poll",
228 .lname = "Kernel SQ thread polling",
229 .type = FIO_OPT_STR_SET,
230 .off1 = offsetof(struct ioring_options, sqpoll_thread),
231 .help = "Offload submission/completion to kernel thread",
232 .category = FIO_OPT_C_ENGINE,
233 .group = FIO_OPT_G_IOURING,
236 .name = "sqthread_poll_cpu",
237 .lname = "SQ Thread Poll CPU",
239 .cb = fio_ioring_sqpoll_cb,
240 .help = "What CPU to run SQ thread polling on",
241 .category = FIO_OPT_C_ENGINE,
242 .group = FIO_OPT_G_IOURING,
245 .name = "nonvectored",
246 .lname = "Non-vectored",
248 .off1 = offsetof(struct ioring_options, nonvectored),
250 .help = "Use non-vectored read/write commands",
251 .category = FIO_OPT_C_ENGINE,
252 .group = FIO_OPT_G_IOURING,
258 .off1 = offsetof(struct ioring_options, uncached),
259 .help = "Use RWF_UNCACHED for buffered read/writes",
260 .category = FIO_OPT_C_ENGINE,
261 .group = FIO_OPT_G_IOURING,
265 .lname = "RWF_NOWAIT",
266 .type = FIO_OPT_BOOL,
267 .off1 = offsetof(struct ioring_options, nowait),
268 .help = "Use RWF_NOWAIT for reads/writes",
269 .category = FIO_OPT_C_ENGINE,
270 .group = FIO_OPT_G_IOURING,
273 .name = "force_async",
274 .lname = "Force async",
276 .off1 = offsetof(struct ioring_options, force_async),
277 .help = "Set IOSQE_ASYNC every N requests",
278 .category = FIO_OPT_C_ENGINE,
279 .group = FIO_OPT_G_IOURING,
283 .lname = "Uring cmd type",
285 .off1 = offsetof(struct ioring_options, cmd_type),
286 .help = "Specify uring-cmd type",
290 .oval = FIO_URING_CMD_NVME,
291 .help = "Issue nvme-uring-cmd",
294 .category = FIO_OPT_C_ENGINE,
295 .group = FIO_OPT_G_IOURING,
302 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
303 unsigned int min_complete, unsigned int flags)
305 #ifdef FIO_ARCH_HAS_SYSCALL
306 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
307 min_complete, flags, NULL, 0);
309 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
310 min_complete, flags, NULL, 0);
314 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
316 struct ioring_data *ld = td->io_ops_data;
317 struct ioring_options *o = td->eo;
318 struct fio_file *f = io_u->file;
319 struct io_uring_sqe *sqe;
321 sqe = &ld->sqes[io_u->index];
323 if (o->registerfiles) {
324 sqe->fd = f->engine_pos;
325 sqe->flags = IOSQE_FIXED_FILE;
331 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
333 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
334 sqe->addr = (unsigned long) io_u->xfer_buf;
335 sqe->len = io_u->xfer_buflen;
336 sqe->buf_index = io_u->index;
338 struct iovec *iov = &ld->iovecs[io_u->index];
341 * Update based on actual io_u, requeue could have
344 iov->iov_base = io_u->xfer_buf;
345 iov->iov_len = io_u->xfer_buflen;
347 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
348 if (o->nonvectored) {
349 sqe->addr = (unsigned long) iov->iov_base;
350 sqe->len = iov->iov_len;
352 sqe->addr = (unsigned long) iov;
357 if (!td->o.odirect && o->uncached)
358 sqe->rw_flags |= RWF_UNCACHED;
360 sqe->rw_flags |= RWF_NOWAIT;
363 * Since io_uring can have a submission context (sqthread_poll)
364 * that is different from the process context, we cannot rely on
365 * the IO priority set by ioprio_set() (option prio/prioclass)
367 * td->ioprio will have the value of the "default prio", so set
368 * this unconditionally. This value might get overridden by
369 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
370 * cmdprio_bssplit is used.
372 sqe->ioprio = td->ioprio;
373 sqe->off = io_u->offset;
374 } else if (ddir_sync(io_u->ddir)) {
376 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
377 sqe->off = f->first_write;
378 sqe->len = f->last_write - f->first_write;
379 sqe->sync_range_flags = td->o.sync_file_range;
380 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
385 if (io_u->ddir == DDIR_DATASYNC)
386 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
387 sqe->opcode = IORING_OP_FSYNC;
391 if (o->force_async && ++ld->prepped == o->force_async) {
393 sqe->flags |= IOSQE_ASYNC;
396 sqe->user_data = (unsigned long) io_u;
400 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
402 struct ioring_data *ld = td->io_ops_data;
403 struct ioring_options *o = td->eo;
404 struct fio_file *f = io_u->file;
405 struct nvme_uring_cmd *cmd;
406 struct io_uring_sqe *sqe;
408 /* only supports nvme_uring_cmd */
409 if (o->cmd_type != FIO_URING_CMD_NVME)
412 sqe = &ld->sqes[(io_u->index) << 1];
414 if (o->registerfiles) {
415 sqe->fd = f->engine_pos;
416 sqe->flags = IOSQE_FIXED_FILE;
421 if (!td->o.odirect && o->uncached)
422 sqe->rw_flags |= RWF_UNCACHED;
424 sqe->rw_flags |= RWF_NOWAIT;
426 sqe->opcode = IORING_OP_URING_CMD;
427 sqe->user_data = (unsigned long) io_u;
429 sqe->cmd_op = NVME_URING_CMD_IO;
431 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
432 if (o->force_async && ++ld->prepped == o->force_async) {
434 sqe->flags |= IOSQE_ASYNC;
437 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
438 sqe->buf_index = io_u->index;
441 cmd = (struct nvme_uring_cmd *)sqe->cmd;
442 return fio_nvme_uring_cmd_prep(cmd, io_u,
443 o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
446 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
448 struct ioring_data *ld = td->io_ops_data;
449 struct io_uring_cqe *cqe;
453 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
455 cqe = &ld->cq_ring.cqes[index];
456 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
458 if (cqe->res != io_u->xfer_buflen) {
459 if (cqe->res > io_u->xfer_buflen)
460 io_u->error = -cqe->res;
462 io_u->resid = io_u->xfer_buflen - cqe->res;
469 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
471 struct ioring_data *ld = td->io_ops_data;
472 struct ioring_options *o = td->eo;
473 struct io_uring_cqe *cqe;
477 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
478 if (o->cmd_type == FIO_URING_CMD_NVME)
481 cqe = &ld->cq_ring.cqes[index];
482 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
485 io_u->error = -cqe->res;
492 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
495 struct ioring_data *ld = td->io_ops_data;
496 struct io_cq_ring *ring = &ld->cq_ring;
497 unsigned head, reaped = 0;
501 if (head == atomic_load_acquire(ring->tail))
505 } while (reaped + events < max);
508 atomic_store_release(ring->head, head);
513 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
514 unsigned int max, const struct timespec *t)
516 struct ioring_data *ld = td->io_ops_data;
517 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
518 struct ioring_options *o = td->eo;
519 struct io_cq_ring *ring = &ld->cq_ring;
523 ld->cq_ring_off = *ring->head;
525 r = fio_ioring_cqring_reap(td, events, max);
533 if (!o->sqpoll_thread) {
534 r = io_uring_enter(ld, 0, actual_min,
535 IORING_ENTER_GETEVENTS);
537 if (errno == EAGAIN || errno == EINTR)
540 td_verror(td, errno, "io_uring_enter");
544 } while (events < min);
546 return r < 0 ? r : events;
549 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
552 struct ioring_data *ld = td->io_ops_data;
553 struct cmdprio *cmdprio = &ld->cmdprio;
555 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
556 ld->sqes[io_u->index].ioprio = io_u->ioprio;
559 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
562 struct ioring_data *ld = td->io_ops_data;
563 struct io_sq_ring *ring = &ld->sq_ring;
564 unsigned tail, next_tail;
566 fio_ro_check(td, io_u);
568 if (ld->queued == ld->iodepth)
571 if (io_u->ddir == DDIR_TRIM) {
575 do_io_u_trim(td, io_u);
576 io_u_mark_submit(td, 1);
577 io_u_mark_complete(td, 1);
578 return FIO_Q_COMPLETED;
582 next_tail = tail + 1;
583 if (next_tail == atomic_load_acquire(ring->head))
586 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
587 fio_ioring_cmdprio_prep(td, io_u);
589 ring->array[tail & ld->sq_ring_mask] = io_u->index;
590 atomic_store_release(ring->tail, next_tail);
596 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
598 struct ioring_data *ld = td->io_ops_data;
601 if (!fio_fill_issue_time(td))
604 fio_gettime(&now, NULL);
607 struct io_sq_ring *ring = &ld->sq_ring;
608 int index = ring->array[start & ld->sq_ring_mask];
609 struct io_u *io_u = ld->io_u_index[index];
611 memcpy(&io_u->issue_time, &now, sizeof(now));
612 io_u_queued(td, io_u);
618 * only used for iolog
620 if (td->o.read_iolog_file)
621 memcpy(&td->last_issue, &now, sizeof(now));
624 static int fio_ioring_commit(struct thread_data *td)
626 struct ioring_data *ld = td->io_ops_data;
627 struct ioring_options *o = td->eo;
634 * Kernel side does submission. just need to check if the ring is
635 * flagged as needing a kick, if so, call io_uring_enter(). This
636 * only happens if we've been idle too long.
638 if (o->sqpoll_thread) {
639 struct io_sq_ring *ring = &ld->sq_ring;
640 unsigned start = *ld->sq_ring.head;
643 flags = atomic_load_acquire(ring->flags);
644 if (flags & IORING_SQ_NEED_WAKEUP)
645 io_uring_enter(ld, ld->queued, 0,
646 IORING_ENTER_SQ_WAKEUP);
647 fio_ioring_queued(td, start, ld->queued);
648 io_u_mark_submit(td, ld->queued);
655 unsigned start = *ld->sq_ring.head;
656 long nr = ld->queued;
658 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
660 fio_ioring_queued(td, start, ret);
661 io_u_mark_submit(td, ret);
666 io_u_mark_submit(td, ret);
669 if (errno == EAGAIN || errno == EINTR) {
670 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
673 /* Shouldn't happen */
678 td_verror(td, errno, "io_uring_enter submit");
681 } while (ld->queued);
686 static void fio_ioring_unmap(struct ioring_data *ld)
690 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
691 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
695 static void fio_ioring_cleanup(struct thread_data *td)
697 struct ioring_data *ld = td->io_ops_data;
700 if (!(td->flags & TD_F_CHILD))
701 fio_ioring_unmap(ld);
703 fio_cmdprio_cleanup(&ld->cmdprio);
704 free(ld->io_u_index);
711 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
713 struct io_sq_ring *sring = &ld->sq_ring;
714 struct io_cq_ring *cring = &ld->cq_ring;
717 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
718 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
719 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
721 ld->mmap[0].ptr = ptr;
722 sring->head = ptr + p->sq_off.head;
723 sring->tail = ptr + p->sq_off.tail;
724 sring->ring_mask = ptr + p->sq_off.ring_mask;
725 sring->ring_entries = ptr + p->sq_off.ring_entries;
726 sring->flags = ptr + p->sq_off.flags;
727 sring->array = ptr + p->sq_off.array;
728 ld->sq_ring_mask = *sring->ring_mask;
730 if (p->flags & IORING_SETUP_SQE128)
731 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
733 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
734 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
735 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
737 ld->mmap[1].ptr = ld->sqes;
739 if (p->flags & IORING_SETUP_CQE32) {
740 ld->mmap[2].len = p->cq_off.cqes +
741 2 * p->cq_entries * sizeof(struct io_uring_cqe);
743 ld->mmap[2].len = p->cq_off.cqes +
744 p->cq_entries * sizeof(struct io_uring_cqe);
746 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
747 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
749 ld->mmap[2].ptr = ptr;
750 cring->head = ptr + p->cq_off.head;
751 cring->tail = ptr + p->cq_off.tail;
752 cring->ring_mask = ptr + p->cq_off.ring_mask;
753 cring->ring_entries = ptr + p->cq_off.ring_entries;
754 cring->cqes = ptr + p->cq_off.cqes;
755 ld->cq_ring_mask = *cring->ring_mask;
759 static void fio_ioring_probe(struct thread_data *td)
761 struct ioring_data *ld = td->io_ops_data;
762 struct ioring_options *o = td->eo;
763 struct io_uring_probe *p;
766 /* already set by user, don't touch */
767 if (o->nonvectored != -1)
770 /* default to off, as that's always safe */
773 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
777 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
778 ret = syscall(__NR_io_uring_register, ld->ring_fd,
779 IORING_REGISTER_PROBE, p, 256);
783 if (IORING_OP_WRITE > p->ops_len)
786 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
787 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
793 static int fio_ioring_queue_init(struct thread_data *td)
795 struct ioring_data *ld = td->io_ops_data;
796 struct ioring_options *o = td->eo;
797 int depth = td->o.iodepth;
798 struct io_uring_params p;
801 memset(&p, 0, sizeof(p));
804 p.flags |= IORING_SETUP_IOPOLL;
805 if (o->sqpoll_thread) {
806 p.flags |= IORING_SETUP_SQPOLL;
808 p.flags |= IORING_SETUP_SQ_AFF;
809 p.sq_thread_cpu = o->sqpoll_cpu;
813 * Submission latency for sqpoll_thread is just the time it
814 * takes to fill in the SQ ring entries, and any syscall if
815 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
818 td->o.disable_slat = 1;
822 * Clamp CQ ring size at our SQ ring size, we don't need more entries
825 p.flags |= IORING_SETUP_CQSIZE;
826 p.cq_entries = depth;
829 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
830 * completing IO operations.
832 p.flags |= IORING_SETUP_COOP_TASKRUN;
835 * io_uring is always a single issuer, and we can defer task_work
836 * runs until we reap events.
838 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
841 ret = syscall(__NR_io_uring_setup, depth, &p);
843 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
844 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
845 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
848 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
849 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
852 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
853 p.flags &= ~IORING_SETUP_CQSIZE;
861 fio_ioring_probe(td);
864 ret = syscall(__NR_io_uring_register, ld->ring_fd,
865 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
870 return fio_ioring_mmap(ld, &p);
873 static int fio_ioring_cmd_queue_init(struct thread_data *td)
875 struct ioring_data *ld = td->io_ops_data;
876 struct ioring_options *o = td->eo;
877 int depth = td->o.iodepth;
878 struct io_uring_params p;
881 memset(&p, 0, sizeof(p));
884 p.flags |= IORING_SETUP_IOPOLL;
885 if (o->sqpoll_thread) {
886 p.flags |= IORING_SETUP_SQPOLL;
888 p.flags |= IORING_SETUP_SQ_AFF;
889 p.sq_thread_cpu = o->sqpoll_cpu;
893 * Submission latency for sqpoll_thread is just the time it
894 * takes to fill in the SQ ring entries, and any syscall if
895 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
898 td->o.disable_slat = 1;
900 if (o->cmd_type == FIO_URING_CMD_NVME) {
901 p.flags |= IORING_SETUP_SQE128;
902 p.flags |= IORING_SETUP_CQE32;
906 * Clamp CQ ring size at our SQ ring size, we don't need more entries
909 p.flags |= IORING_SETUP_CQSIZE;
910 p.cq_entries = depth;
913 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
914 * completing IO operations.
916 p.flags |= IORING_SETUP_COOP_TASKRUN;
919 * io_uring is always a single issuer, and we can defer task_work
920 * runs until we reap events.
922 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
925 ret = syscall(__NR_io_uring_setup, depth, &p);
927 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
928 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
929 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
932 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
933 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
936 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
937 p.flags &= ~IORING_SETUP_CQSIZE;
945 fio_ioring_probe(td);
948 ret = syscall(__NR_io_uring_register, ld->ring_fd,
949 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
954 return fio_ioring_mmap(ld, &p);
957 static int fio_ioring_register_files(struct thread_data *td)
959 struct ioring_data *ld = td->io_ops_data;
964 ld->fds = calloc(td->o.nr_files, sizeof(int));
966 for_each_file(td, f, i) {
967 ret = generic_open_file(td, f);
974 ret = syscall(__NR_io_uring_register, ld->ring_fd,
975 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
983 * Pretend the file is closed again, and really close it if we hit
986 for_each_file(td, f, i) {
989 ret2 = generic_close_file(td, f);
997 static int fio_ioring_post_init(struct thread_data *td)
999 struct ioring_data *ld = td->io_ops_data;
1000 struct ioring_options *o = td->eo;
1004 for (i = 0; i < td->o.iodepth; i++) {
1005 struct iovec *iov = &ld->iovecs[i];
1007 io_u = ld->io_u_index[i];
1008 iov->iov_base = io_u->buf;
1009 iov->iov_len = td_max_bs(td);
1012 err = fio_ioring_queue_init(td);
1014 int init_err = errno;
1016 if (init_err == ENOSYS)
1017 log_err("fio: your kernel doesn't support io_uring\n");
1018 td_verror(td, init_err, "io_queue_init");
1022 for (i = 0; i < td->o.iodepth; i++) {
1023 struct io_uring_sqe *sqe;
1026 memset(sqe, 0, sizeof(*sqe));
1029 if (o->registerfiles) {
1030 err = fio_ioring_register_files(td);
1032 td_verror(td, errno, "ioring_register_files");
1040 static int fio_ioring_cmd_post_init(struct thread_data *td)
1042 struct ioring_data *ld = td->io_ops_data;
1043 struct ioring_options *o = td->eo;
1047 for (i = 0; i < td->o.iodepth; i++) {
1048 struct iovec *iov = &ld->iovecs[i];
1050 io_u = ld->io_u_index[i];
1051 iov->iov_base = io_u->buf;
1052 iov->iov_len = td_max_bs(td);
1055 err = fio_ioring_cmd_queue_init(td);
1057 int init_err = errno;
1059 td_verror(td, init_err, "io_queue_init");
1063 for (i = 0; i < td->o.iodepth; i++) {
1064 struct io_uring_sqe *sqe;
1066 if (o->cmd_type == FIO_URING_CMD_NVME) {
1067 sqe = &ld->sqes[i << 1];
1068 memset(sqe, 0, 2 * sizeof(*sqe));
1071 memset(sqe, 0, sizeof(*sqe));
1075 if (o->registerfiles) {
1076 err = fio_ioring_register_files(td);
1078 td_verror(td, errno, "ioring_register_files");
1086 static int fio_ioring_init(struct thread_data *td)
1088 struct ioring_options *o = td->eo;
1089 struct ioring_data *ld;
1092 /* sqthread submission requires registered files */
1093 if (o->sqpoll_thread)
1094 o->registerfiles = 1;
1096 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1097 log_err("fio: io_uring registered files require nr_files to "
1098 "be identical to open_files\n");
1102 ld = calloc(1, sizeof(*ld));
1104 /* ring depth must be a power-of-2 */
1105 ld->iodepth = td->o.iodepth;
1106 td->o.iodepth = roundup_pow2(td->o.iodepth);
1109 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1110 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1112 td->io_ops_data = ld;
1114 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1116 td_verror(td, EINVAL, "fio_ioring_init");
1123 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1125 struct ioring_data *ld = td->io_ops_data;
1127 ld->io_u_index[io_u->index] = io_u;
1131 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1133 struct ioring_data *ld = td->io_ops_data;
1134 struct ioring_options *o = td->eo;
1136 if (!ld || !o->registerfiles)
1137 return generic_open_file(td, f);
1139 f->fd = ld->fds[f->engine_pos];
1143 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1145 struct ioring_data *ld = td->io_ops_data;
1146 struct ioring_options *o = td->eo;
1148 if (o->cmd_type == FIO_URING_CMD_NVME) {
1149 struct nvme_data *data = NULL;
1150 unsigned int nsid, lba_size = 0;
1151 unsigned long long nlba = 0;
1154 /* Store the namespace-id and lba size. */
1155 data = FILE_ENG_DATA(f);
1157 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1161 data = calloc(1, sizeof(struct nvme_data));
1163 data->lba_shift = ilog2(lba_size);
1165 FILE_SET_ENG_DATA(f, data);
1168 if (!ld || !o->registerfiles)
1169 return generic_open_file(td, f);
1171 f->fd = ld->fds[f->engine_pos];
1175 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1177 struct ioring_data *ld = td->io_ops_data;
1178 struct ioring_options *o = td->eo;
1180 if (!ld || !o->registerfiles)
1181 return generic_close_file(td, f);
1187 static int fio_ioring_cmd_close_file(struct thread_data *td,
1190 struct ioring_data *ld = td->io_ops_data;
1191 struct ioring_options *o = td->eo;
1193 if (o->cmd_type == FIO_URING_CMD_NVME) {
1194 struct nvme_data *data = FILE_ENG_DATA(f);
1196 FILE_SET_ENG_DATA(f, NULL);
1199 if (!ld || !o->registerfiles)
1200 return generic_close_file(td, f);
1206 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1209 struct ioring_options *o = td->eo;
1211 if (fio_file_size_known(f))
1214 if (o->cmd_type == FIO_URING_CMD_NVME) {
1215 struct nvme_data *data = NULL;
1216 unsigned int nsid, lba_size = 0;
1217 unsigned long long nlba = 0;
1220 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1224 data = calloc(1, sizeof(struct nvme_data));
1226 data->lba_shift = ilog2(lba_size);
1228 f->real_file_size = lba_size * nlba;
1229 fio_file_set_size_known(f);
1231 FILE_SET_ENG_DATA(f, data);
1234 return generic_get_file_size(td, f);
1237 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1239 enum zbd_zoned_model *model)
1241 return fio_nvme_get_zoned_model(td, f, model);
1244 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1245 struct fio_file *f, uint64_t offset,
1246 struct zbd_zone *zbdz,
1247 unsigned int nr_zones)
1249 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1252 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1253 uint64_t offset, uint64_t length)
1255 return fio_nvme_reset_wp(td, f, offset, length);
1258 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1260 unsigned int *max_open_zones)
1262 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1265 static struct ioengine_ops ioengine_uring = {
1267 .version = FIO_IOOPS_VERSION,
1268 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1269 FIO_ASYNCIO_SETS_ISSUE_TIME,
1270 .init = fio_ioring_init,
1271 .post_init = fio_ioring_post_init,
1272 .io_u_init = fio_ioring_io_u_init,
1273 .prep = fio_ioring_prep,
1274 .queue = fio_ioring_queue,
1275 .commit = fio_ioring_commit,
1276 .getevents = fio_ioring_getevents,
1277 .event = fio_ioring_event,
1278 .cleanup = fio_ioring_cleanup,
1279 .open_file = fio_ioring_open_file,
1280 .close_file = fio_ioring_close_file,
1281 .get_file_size = generic_get_file_size,
1283 .option_struct_size = sizeof(struct ioring_options),
1286 static struct ioengine_ops ioengine_uring_cmd = {
1287 .name = "io_uring_cmd",
1288 .version = FIO_IOOPS_VERSION,
1289 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1290 FIO_MEMALIGN | FIO_RAWIO |
1291 FIO_ASYNCIO_SETS_ISSUE_TIME,
1292 .init = fio_ioring_init,
1293 .post_init = fio_ioring_cmd_post_init,
1294 .io_u_init = fio_ioring_io_u_init,
1295 .prep = fio_ioring_cmd_prep,
1296 .queue = fio_ioring_queue,
1297 .commit = fio_ioring_commit,
1298 .getevents = fio_ioring_getevents,
1299 .event = fio_ioring_cmd_event,
1300 .cleanup = fio_ioring_cleanup,
1301 .open_file = fio_ioring_cmd_open_file,
1302 .close_file = fio_ioring_cmd_close_file,
1303 .get_file_size = fio_ioring_cmd_get_file_size,
1304 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1305 .report_zones = fio_ioring_cmd_report_zones,
1306 .reset_wp = fio_ioring_cmd_reset_wp,
1307 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1309 .option_struct_size = sizeof(struct ioring_options),
1312 static void fio_init fio_ioring_register(void)
1314 register_ioengine(&ioengine_uring);
1315 register_ioengine(&ioengine_uring_cmd);
1318 static void fio_exit fio_ioring_unregister(void)
1320 unregister_ioengine(&ioengine_uring);
1321 unregister_ioengine(&ioengine_uring_cmd);