4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
33 FIO_URING_CMD_NVME = 1,
40 unsigned *ring_entries;
49 unsigned *ring_entries;
50 struct io_uring_cqe *cqes;
61 struct io_u **io_u_index;
65 struct io_sq_ring sq_ring;
66 struct io_uring_sqe *sqes;
68 unsigned sq_ring_mask;
70 struct io_cq_ring cq_ring;
71 unsigned cq_ring_mask;
78 struct ioring_mmap mmap[3];
80 struct cmdprio cmdprio;
82 struct nvme_dsm_range *dsm;
85 struct ioring_options {
86 struct thread_data *td;
88 struct cmdprio_options cmdprio_options;
89 unsigned int fixedbufs;
90 unsigned int registerfiles;
91 unsigned int sqpoll_thread;
92 unsigned int sqpoll_set;
93 unsigned int sqpoll_cpu;
94 unsigned int nonvectored;
95 unsigned int uncached;
97 unsigned int force_async;
98 enum uring_cmd_type cmd_type;
101 static const int ddir_to_op[2][2] = {
102 { IORING_OP_READV, IORING_OP_READ },
103 { IORING_OP_WRITEV, IORING_OP_WRITE }
106 static const int fixed_ddir_to_op[2] = {
107 IORING_OP_READ_FIXED,
108 IORING_OP_WRITE_FIXED
111 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
113 struct ioring_options *o = data;
115 o->sqpoll_cpu = *val;
120 static struct fio_option options[] = {
123 .lname = "High Priority",
124 .type = FIO_OPT_STR_SET,
125 .off1 = offsetof(struct ioring_options, hipri),
126 .help = "Use polled IO completions",
127 .category = FIO_OPT_C_ENGINE,
128 .group = FIO_OPT_G_IOURING,
132 .lname = "Fixed (pre-mapped) IO buffers",
133 .type = FIO_OPT_STR_SET,
134 .off1 = offsetof(struct ioring_options, fixedbufs),
135 .help = "Pre map IO buffers",
136 .category = FIO_OPT_C_ENGINE,
137 .group = FIO_OPT_G_IOURING,
140 .name = "registerfiles",
141 .lname = "Register file set",
142 .type = FIO_OPT_STR_SET,
143 .off1 = offsetof(struct ioring_options, registerfiles),
144 .help = "Pre-open/register files",
145 .category = FIO_OPT_C_ENGINE,
146 .group = FIO_OPT_G_IOURING,
149 .name = "sqthread_poll",
150 .lname = "Kernel SQ thread polling",
151 .type = FIO_OPT_STR_SET,
152 .off1 = offsetof(struct ioring_options, sqpoll_thread),
153 .help = "Offload submission/completion to kernel thread",
154 .category = FIO_OPT_C_ENGINE,
155 .group = FIO_OPT_G_IOURING,
158 .name = "sqthread_poll_cpu",
159 .lname = "SQ Thread Poll CPU",
161 .cb = fio_ioring_sqpoll_cb,
162 .help = "What CPU to run SQ thread polling on",
163 .category = FIO_OPT_C_ENGINE,
164 .group = FIO_OPT_G_IOURING,
167 .name = "nonvectored",
168 .lname = "Non-vectored",
170 .off1 = offsetof(struct ioring_options, nonvectored),
172 .help = "Use non-vectored read/write commands",
173 .category = FIO_OPT_C_ENGINE,
174 .group = FIO_OPT_G_IOURING,
180 .off1 = offsetof(struct ioring_options, uncached),
181 .help = "Use RWF_UNCACHED for buffered read/writes",
182 .category = FIO_OPT_C_ENGINE,
183 .group = FIO_OPT_G_IOURING,
187 .lname = "RWF_NOWAIT",
188 .type = FIO_OPT_BOOL,
189 .off1 = offsetof(struct ioring_options, nowait),
190 .help = "Use RWF_NOWAIT for reads/writes",
191 .category = FIO_OPT_C_ENGINE,
192 .group = FIO_OPT_G_IOURING,
195 .name = "force_async",
196 .lname = "Force async",
198 .off1 = offsetof(struct ioring_options, force_async),
199 .help = "Set IOSQE_ASYNC every N requests",
200 .category = FIO_OPT_C_ENGINE,
201 .group = FIO_OPT_G_IOURING,
205 .lname = "Uring cmd type",
207 .off1 = offsetof(struct ioring_options, cmd_type),
208 .help = "Specify uring-cmd type",
212 .oval = FIO_URING_CMD_NVME,
213 .help = "Issue nvme-uring-cmd",
216 .category = FIO_OPT_C_ENGINE,
217 .group = FIO_OPT_G_IOURING,
219 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
225 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
226 unsigned int min_complete, unsigned int flags)
228 #ifdef FIO_ARCH_HAS_SYSCALL
229 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
230 min_complete, flags, NULL, 0);
232 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
233 min_complete, flags, NULL, 0);
237 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
239 struct ioring_data *ld = td->io_ops_data;
240 struct ioring_options *o = td->eo;
241 struct fio_file *f = io_u->file;
242 struct io_uring_sqe *sqe;
244 sqe = &ld->sqes[io_u->index];
246 if (o->registerfiles) {
247 sqe->fd = f->engine_pos;
248 sqe->flags = IOSQE_FIXED_FILE;
254 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
256 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
257 sqe->addr = (unsigned long) io_u->xfer_buf;
258 sqe->len = io_u->xfer_buflen;
259 sqe->buf_index = io_u->index;
261 struct iovec *iov = &ld->iovecs[io_u->index];
264 * Update based on actual io_u, requeue could have
267 iov->iov_base = io_u->xfer_buf;
268 iov->iov_len = io_u->xfer_buflen;
270 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
271 if (o->nonvectored) {
272 sqe->addr = (unsigned long) iov->iov_base;
273 sqe->len = iov->iov_len;
275 sqe->addr = (unsigned long) iov;
280 if (!td->o.odirect && o->uncached)
281 sqe->rw_flags |= RWF_UNCACHED;
283 sqe->rw_flags |= RWF_NOWAIT;
286 * Since io_uring can have a submission context (sqthread_poll)
287 * that is different from the process context, we cannot rely on
288 * the IO priority set by ioprio_set() (option prio/prioclass)
290 * td->ioprio will have the value of the "default prio", so set
291 * this unconditionally. This value might get overridden by
292 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
293 * cmdprio_bssplit is used.
295 sqe->ioprio = td->ioprio;
296 sqe->off = io_u->offset;
297 } else if (ddir_sync(io_u->ddir)) {
299 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
300 sqe->off = f->first_write;
301 sqe->len = f->last_write - f->first_write;
302 sqe->sync_range_flags = td->o.sync_file_range;
303 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
308 if (io_u->ddir == DDIR_DATASYNC)
309 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
310 sqe->opcode = IORING_OP_FSYNC;
314 if (o->force_async && ++ld->prepped == o->force_async) {
316 sqe->flags |= IOSQE_ASYNC;
319 sqe->user_data = (unsigned long) io_u;
323 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
325 struct ioring_data *ld = td->io_ops_data;
326 struct ioring_options *o = td->eo;
327 struct fio_file *f = io_u->file;
328 struct nvme_uring_cmd *cmd;
329 struct io_uring_sqe *sqe;
331 /* only supports nvme_uring_cmd */
332 if (o->cmd_type != FIO_URING_CMD_NVME)
335 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
338 sqe = &ld->sqes[(io_u->index) << 1];
340 if (o->registerfiles) {
341 sqe->fd = f->engine_pos;
342 sqe->flags = IOSQE_FIXED_FILE;
347 if (!td->o.odirect && o->uncached)
348 sqe->rw_flags |= RWF_UNCACHED;
350 sqe->rw_flags |= RWF_NOWAIT;
352 sqe->opcode = IORING_OP_URING_CMD;
353 sqe->user_data = (unsigned long) io_u;
355 sqe->cmd_op = NVME_URING_CMD_IO;
357 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
358 if (o->force_async && ++ld->prepped == o->force_async) {
360 sqe->flags |= IOSQE_ASYNC;
363 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
364 sqe->buf_index = io_u->index;
367 cmd = (struct nvme_uring_cmd *)sqe->cmd;
368 return fio_nvme_uring_cmd_prep(cmd, io_u,
369 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
370 &ld->dsm[io_u->index]);
373 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
375 struct ioring_data *ld = td->io_ops_data;
376 struct io_uring_cqe *cqe;
380 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
382 cqe = &ld->cq_ring.cqes[index];
383 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
385 if (cqe->res != io_u->xfer_buflen) {
386 if (cqe->res > io_u->xfer_buflen)
387 io_u->error = -cqe->res;
389 io_u->resid = io_u->xfer_buflen - cqe->res;
396 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
398 struct ioring_data *ld = td->io_ops_data;
399 struct ioring_options *o = td->eo;
400 struct io_uring_cqe *cqe;
404 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
405 if (o->cmd_type == FIO_URING_CMD_NVME)
408 cqe = &ld->cq_ring.cqes[index];
409 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
412 io_u->error = -cqe->res;
419 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
422 struct ioring_data *ld = td->io_ops_data;
423 struct io_cq_ring *ring = &ld->cq_ring;
424 unsigned head, reaped = 0;
428 if (head == atomic_load_acquire(ring->tail))
432 } while (reaped + events < max);
435 atomic_store_release(ring->head, head);
440 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
441 unsigned int max, const struct timespec *t)
443 struct ioring_data *ld = td->io_ops_data;
444 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
445 struct ioring_options *o = td->eo;
446 struct io_cq_ring *ring = &ld->cq_ring;
450 ld->cq_ring_off = *ring->head;
452 r = fio_ioring_cqring_reap(td, events, max);
461 if (!o->sqpoll_thread) {
462 r = io_uring_enter(ld, 0, actual_min,
463 IORING_ENTER_GETEVENTS);
465 if (errno == EAGAIN || errno == EINTR)
468 td_verror(td, errno, "io_uring_enter");
472 } while (events < min);
474 return r < 0 ? r : events;
477 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
480 struct ioring_data *ld = td->io_ops_data;
481 struct cmdprio *cmdprio = &ld->cmdprio;
483 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
484 ld->sqes[io_u->index].ioprio = io_u->ioprio;
487 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
490 struct ioring_data *ld = td->io_ops_data;
491 struct io_sq_ring *ring = &ld->sq_ring;
492 unsigned tail, next_tail;
494 fio_ro_check(td, io_u);
496 if (ld->queued == ld->iodepth)
499 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
503 do_io_u_trim(td, io_u);
505 io_u_mark_submit(td, 1);
506 io_u_mark_complete(td, 1);
507 return FIO_Q_COMPLETED;
511 next_tail = tail + 1;
512 if (next_tail == atomic_load_acquire(ring->head))
515 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
516 fio_ioring_cmdprio_prep(td, io_u);
518 ring->array[tail & ld->sq_ring_mask] = io_u->index;
519 atomic_store_release(ring->tail, next_tail);
525 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
527 struct ioring_data *ld = td->io_ops_data;
530 if (!fio_fill_issue_time(td))
533 fio_gettime(&now, NULL);
536 struct io_sq_ring *ring = &ld->sq_ring;
537 int index = ring->array[start & ld->sq_ring_mask];
538 struct io_u *io_u = ld->io_u_index[index];
540 memcpy(&io_u->issue_time, &now, sizeof(now));
541 io_u_queued(td, io_u);
547 * only used for iolog
549 if (td->o.read_iolog_file)
550 memcpy(&td->last_issue, &now, sizeof(now));
553 static int fio_ioring_commit(struct thread_data *td)
555 struct ioring_data *ld = td->io_ops_data;
556 struct ioring_options *o = td->eo;
563 * Kernel side does submission. just need to check if the ring is
564 * flagged as needing a kick, if so, call io_uring_enter(). This
565 * only happens if we've been idle too long.
567 if (o->sqpoll_thread) {
568 struct io_sq_ring *ring = &ld->sq_ring;
569 unsigned start = *ld->sq_ring.tail - ld->queued;
572 flags = atomic_load_acquire(ring->flags);
573 if (flags & IORING_SQ_NEED_WAKEUP)
574 io_uring_enter(ld, ld->queued, 0,
575 IORING_ENTER_SQ_WAKEUP);
576 fio_ioring_queued(td, start, ld->queued);
577 io_u_mark_submit(td, ld->queued);
584 unsigned start = *ld->sq_ring.head;
585 long nr = ld->queued;
587 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
589 fio_ioring_queued(td, start, ret);
590 io_u_mark_submit(td, ret);
595 io_u_mark_submit(td, ret);
598 if (errno == EAGAIN || errno == EINTR) {
599 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
602 /* Shouldn't happen */
607 td_verror(td, errno, "io_uring_enter submit");
610 } while (ld->queued);
615 static void fio_ioring_unmap(struct ioring_data *ld)
619 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
620 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
624 static void fio_ioring_cleanup(struct thread_data *td)
626 struct ioring_data *ld = td->io_ops_data;
629 if (!(td->flags & TD_F_CHILD))
630 fio_ioring_unmap(ld);
632 fio_cmdprio_cleanup(&ld->cmdprio);
633 free(ld->io_u_index);
641 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
643 struct io_sq_ring *sring = &ld->sq_ring;
644 struct io_cq_ring *cring = &ld->cq_ring;
647 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
648 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
649 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
651 ld->mmap[0].ptr = ptr;
652 sring->head = ptr + p->sq_off.head;
653 sring->tail = ptr + p->sq_off.tail;
654 sring->ring_mask = ptr + p->sq_off.ring_mask;
655 sring->ring_entries = ptr + p->sq_off.ring_entries;
656 sring->flags = ptr + p->sq_off.flags;
657 sring->array = ptr + p->sq_off.array;
658 ld->sq_ring_mask = *sring->ring_mask;
660 if (p->flags & IORING_SETUP_SQE128)
661 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
663 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
664 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
665 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
667 ld->mmap[1].ptr = ld->sqes;
669 if (p->flags & IORING_SETUP_CQE32) {
670 ld->mmap[2].len = p->cq_off.cqes +
671 2 * p->cq_entries * sizeof(struct io_uring_cqe);
673 ld->mmap[2].len = p->cq_off.cqes +
674 p->cq_entries * sizeof(struct io_uring_cqe);
676 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
677 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
679 ld->mmap[2].ptr = ptr;
680 cring->head = ptr + p->cq_off.head;
681 cring->tail = ptr + p->cq_off.tail;
682 cring->ring_mask = ptr + p->cq_off.ring_mask;
683 cring->ring_entries = ptr + p->cq_off.ring_entries;
684 cring->cqes = ptr + p->cq_off.cqes;
685 ld->cq_ring_mask = *cring->ring_mask;
689 static void fio_ioring_probe(struct thread_data *td)
691 struct ioring_data *ld = td->io_ops_data;
692 struct ioring_options *o = td->eo;
693 struct io_uring_probe *p;
696 /* already set by user, don't touch */
697 if (o->nonvectored != -1)
700 /* default to off, as that's always safe */
703 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
707 ret = syscall(__NR_io_uring_register, ld->ring_fd,
708 IORING_REGISTER_PROBE, p, 256);
712 if (IORING_OP_WRITE > p->ops_len)
715 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
716 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
722 static int fio_ioring_queue_init(struct thread_data *td)
724 struct ioring_data *ld = td->io_ops_data;
725 struct ioring_options *o = td->eo;
726 int depth = td->o.iodepth;
727 struct io_uring_params p;
730 memset(&p, 0, sizeof(p));
733 p.flags |= IORING_SETUP_IOPOLL;
734 if (o->sqpoll_thread) {
735 p.flags |= IORING_SETUP_SQPOLL;
737 p.flags |= IORING_SETUP_SQ_AFF;
738 p.sq_thread_cpu = o->sqpoll_cpu;
742 * Submission latency for sqpoll_thread is just the time it
743 * takes to fill in the SQ ring entries, and any syscall if
744 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
747 td->o.disable_slat = 1;
751 * Clamp CQ ring size at our SQ ring size, we don't need more entries
754 p.flags |= IORING_SETUP_CQSIZE;
755 p.cq_entries = depth;
758 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
759 * completing IO operations.
761 p.flags |= IORING_SETUP_COOP_TASKRUN;
764 * io_uring is always a single issuer, and we can defer task_work
765 * runs until we reap events.
767 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
770 ret = syscall(__NR_io_uring_setup, depth, &p);
772 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
773 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
774 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
777 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
778 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
781 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
782 p.flags &= ~IORING_SETUP_CQSIZE;
790 fio_ioring_probe(td);
793 ret = syscall(__NR_io_uring_register, ld->ring_fd,
794 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
799 return fio_ioring_mmap(ld, &p);
802 static int fio_ioring_cmd_queue_init(struct thread_data *td)
804 struct ioring_data *ld = td->io_ops_data;
805 struct ioring_options *o = td->eo;
806 int depth = td->o.iodepth;
807 struct io_uring_params p;
810 memset(&p, 0, sizeof(p));
813 p.flags |= IORING_SETUP_IOPOLL;
814 if (o->sqpoll_thread) {
815 p.flags |= IORING_SETUP_SQPOLL;
817 p.flags |= IORING_SETUP_SQ_AFF;
818 p.sq_thread_cpu = o->sqpoll_cpu;
822 * Submission latency for sqpoll_thread is just the time it
823 * takes to fill in the SQ ring entries, and any syscall if
824 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
827 td->o.disable_slat = 1;
829 if (o->cmd_type == FIO_URING_CMD_NVME) {
830 p.flags |= IORING_SETUP_SQE128;
831 p.flags |= IORING_SETUP_CQE32;
835 * Clamp CQ ring size at our SQ ring size, we don't need more entries
838 p.flags |= IORING_SETUP_CQSIZE;
839 p.cq_entries = depth;
842 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
843 * completing IO operations.
845 p.flags |= IORING_SETUP_COOP_TASKRUN;
848 * io_uring is always a single issuer, and we can defer task_work
849 * runs until we reap events.
851 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
854 ret = syscall(__NR_io_uring_setup, depth, &p);
856 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
857 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
858 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
861 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
862 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
865 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
866 p.flags &= ~IORING_SETUP_CQSIZE;
874 fio_ioring_probe(td);
877 ret = syscall(__NR_io_uring_register, ld->ring_fd,
878 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
883 return fio_ioring_mmap(ld, &p);
886 static int fio_ioring_register_files(struct thread_data *td)
888 struct ioring_data *ld = td->io_ops_data;
893 ld->fds = calloc(td->o.nr_files, sizeof(int));
895 for_each_file(td, f, i) {
896 ret = generic_open_file(td, f);
903 ret = syscall(__NR_io_uring_register, ld->ring_fd,
904 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
912 * Pretend the file is closed again, and really close it if we hit
915 for_each_file(td, f, i) {
918 ret2 = generic_close_file(td, f);
926 static int fio_ioring_post_init(struct thread_data *td)
928 struct ioring_data *ld = td->io_ops_data;
929 struct ioring_options *o = td->eo;
933 for (i = 0; i < td->o.iodepth; i++) {
934 struct iovec *iov = &ld->iovecs[i];
936 io_u = ld->io_u_index[i];
937 iov->iov_base = io_u->buf;
938 iov->iov_len = td_max_bs(td);
941 err = fio_ioring_queue_init(td);
943 int init_err = errno;
945 if (init_err == ENOSYS)
946 log_err("fio: your kernel doesn't support io_uring\n");
947 td_verror(td, init_err, "io_queue_init");
951 for (i = 0; i < td->o.iodepth; i++) {
952 struct io_uring_sqe *sqe;
955 memset(sqe, 0, sizeof(*sqe));
958 if (o->registerfiles) {
959 err = fio_ioring_register_files(td);
961 td_verror(td, errno, "ioring_register_files");
969 static int fio_ioring_cmd_post_init(struct thread_data *td)
971 struct ioring_data *ld = td->io_ops_data;
972 struct ioring_options *o = td->eo;
976 for (i = 0; i < td->o.iodepth; i++) {
977 struct iovec *iov = &ld->iovecs[i];
979 io_u = ld->io_u_index[i];
980 iov->iov_base = io_u->buf;
981 iov->iov_len = td_max_bs(td);
984 err = fio_ioring_cmd_queue_init(td);
986 int init_err = errno;
988 td_verror(td, init_err, "io_queue_init");
992 for (i = 0; i < td->o.iodepth; i++) {
993 struct io_uring_sqe *sqe;
995 if (o->cmd_type == FIO_URING_CMD_NVME) {
996 sqe = &ld->sqes[i << 1];
997 memset(sqe, 0, 2 * sizeof(*sqe));
1000 memset(sqe, 0, sizeof(*sqe));
1004 if (o->registerfiles) {
1005 err = fio_ioring_register_files(td);
1007 td_verror(td, errno, "ioring_register_files");
1015 static int fio_ioring_init(struct thread_data *td)
1017 struct ioring_options *o = td->eo;
1018 struct ioring_data *ld;
1021 /* sqthread submission requires registered files */
1022 if (o->sqpoll_thread)
1023 o->registerfiles = 1;
1025 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1026 log_err("fio: io_uring registered files require nr_files to "
1027 "be identical to open_files\n");
1031 ld = calloc(1, sizeof(*ld));
1033 /* ring depth must be a power-of-2 */
1034 ld->iodepth = td->o.iodepth;
1035 td->o.iodepth = roundup_pow2(td->o.iodepth);
1038 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1039 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1041 td->io_ops_data = ld;
1043 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1045 td_verror(td, EINVAL, "fio_ioring_init");
1050 * For io_uring_cmd, trims are async operations unless we are operating
1051 * in zbd mode where trim means zone reset.
1053 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1054 td->o.zone_mode == ZONE_MODE_ZBD)
1055 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1057 ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
1062 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1064 struct ioring_data *ld = td->io_ops_data;
1066 ld->io_u_index[io_u->index] = io_u;
1070 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1072 struct ioring_data *ld = td->io_ops_data;
1073 struct ioring_options *o = td->eo;
1075 if (!ld || !o->registerfiles)
1076 return generic_open_file(td, f);
1078 f->fd = ld->fds[f->engine_pos];
1082 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1084 struct ioring_data *ld = td->io_ops_data;
1085 struct ioring_options *o = td->eo;
1087 if (o->cmd_type == FIO_URING_CMD_NVME) {
1088 struct nvme_data *data = NULL;
1089 unsigned int nsid, lba_size = 0;
1094 /* Store the namespace-id and lba size. */
1095 data = FILE_ENG_DATA(f);
1097 ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
1101 data = calloc(1, sizeof(struct nvme_data));
1104 data->lba_ext = lba_size + ms;
1106 data->lba_shift = ilog2(lba_size);
1108 FILE_SET_ENG_DATA(f, data);
1111 assert(data->lba_shift < 32);
1112 lba_size = data->lba_ext ? data->lba_ext : (1U << data->lba_shift);
1114 for_each_rw_ddir(ddir) {
1115 if (td->o.min_bs[ddir] % lba_size ||
1116 td->o.max_bs[ddir] % lba_size) {
1118 log_err("block size must be a multiple of "
1119 "(LBA data size + Metadata size)\n");
1121 log_err("block size must be a multiple of LBA data size\n");
1126 if (!ld || !o->registerfiles)
1127 return generic_open_file(td, f);
1129 f->fd = ld->fds[f->engine_pos];
1133 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1135 struct ioring_data *ld = td->io_ops_data;
1136 struct ioring_options *o = td->eo;
1138 if (!ld || !o->registerfiles)
1139 return generic_close_file(td, f);
1145 static int fio_ioring_cmd_close_file(struct thread_data *td,
1148 struct ioring_data *ld = td->io_ops_data;
1149 struct ioring_options *o = td->eo;
1151 if (o->cmd_type == FIO_URING_CMD_NVME) {
1152 struct nvme_data *data = FILE_ENG_DATA(f);
1154 FILE_SET_ENG_DATA(f, NULL);
1157 if (!ld || !o->registerfiles)
1158 return generic_close_file(td, f);
1164 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1167 struct ioring_options *o = td->eo;
1169 if (fio_file_size_known(f))
1172 if (o->cmd_type == FIO_URING_CMD_NVME) {
1173 struct nvme_data *data = NULL;
1174 unsigned int nsid, lba_size = 0;
1179 ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
1183 data = calloc(1, sizeof(struct nvme_data));
1186 data->lba_ext = lba_size + ms;
1188 data->lba_shift = ilog2(lba_size);
1190 f->real_file_size = lba_size * nlba;
1191 fio_file_set_size_known(f);
1193 FILE_SET_ENG_DATA(f, data);
1196 return generic_get_file_size(td, f);
1199 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1201 enum zbd_zoned_model *model)
1203 return fio_nvme_get_zoned_model(td, f, model);
1206 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1207 struct fio_file *f, uint64_t offset,
1208 struct zbd_zone *zbdz,
1209 unsigned int nr_zones)
1211 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1214 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1215 uint64_t offset, uint64_t length)
1217 return fio_nvme_reset_wp(td, f, offset, length);
1220 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1222 unsigned int *max_open_zones)
1224 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1227 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1228 struct fio_ruhs_info *fruhs_info)
1230 struct nvme_fdp_ruh_status *ruhs;
1233 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1234 ruhs = scalloc(1, bytes);
1238 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1242 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1243 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1244 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1250 static struct ioengine_ops ioengine_uring = {
1252 .version = FIO_IOOPS_VERSION,
1253 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1254 FIO_ASYNCIO_SETS_ISSUE_TIME,
1255 .init = fio_ioring_init,
1256 .post_init = fio_ioring_post_init,
1257 .io_u_init = fio_ioring_io_u_init,
1258 .prep = fio_ioring_prep,
1259 .queue = fio_ioring_queue,
1260 .commit = fio_ioring_commit,
1261 .getevents = fio_ioring_getevents,
1262 .event = fio_ioring_event,
1263 .cleanup = fio_ioring_cleanup,
1264 .open_file = fio_ioring_open_file,
1265 .close_file = fio_ioring_close_file,
1266 .get_file_size = generic_get_file_size,
1268 .option_struct_size = sizeof(struct ioring_options),
1271 static struct ioengine_ops ioengine_uring_cmd = {
1272 .name = "io_uring_cmd",
1273 .version = FIO_IOOPS_VERSION,
1274 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1275 FIO_ASYNCIO_SETS_ISSUE_TIME,
1276 .init = fio_ioring_init,
1277 .post_init = fio_ioring_cmd_post_init,
1278 .io_u_init = fio_ioring_io_u_init,
1279 .prep = fio_ioring_cmd_prep,
1280 .queue = fio_ioring_queue,
1281 .commit = fio_ioring_commit,
1282 .getevents = fio_ioring_getevents,
1283 .event = fio_ioring_cmd_event,
1284 .cleanup = fio_ioring_cleanup,
1285 .open_file = fio_ioring_cmd_open_file,
1286 .close_file = fio_ioring_cmd_close_file,
1287 .get_file_size = fio_ioring_cmd_get_file_size,
1288 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1289 .report_zones = fio_ioring_cmd_report_zones,
1290 .reset_wp = fio_ioring_cmd_reset_wp,
1291 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1293 .option_struct_size = sizeof(struct ioring_options),
1294 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1297 static void fio_init fio_ioring_register(void)
1299 register_ioengine(&ioengine_uring);
1300 register_ioengine(&ioengine_uring_cmd);
1303 static void fio_exit fio_ioring_unregister(void)
1305 unregister_ioengine(&ioengine_uring);
1306 unregister_ioengine(&ioengine_uring_cmd);