4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
32 FIO_URING_CMD_NVME = 1,
39 unsigned *ring_entries;
48 unsigned *ring_entries;
49 struct io_uring_cqe *cqes;
60 struct io_u **io_u_index;
64 struct io_sq_ring sq_ring;
65 struct io_uring_sqe *sqes;
67 unsigned sq_ring_mask;
69 struct io_cq_ring cq_ring;
70 unsigned cq_ring_mask;
77 struct ioring_mmap mmap[3];
79 struct cmdprio cmdprio;
82 struct ioring_options {
83 struct thread_data *td;
85 struct cmdprio_options cmdprio_options;
86 unsigned int fixedbufs;
87 unsigned int registerfiles;
88 unsigned int sqpoll_thread;
89 unsigned int sqpoll_set;
90 unsigned int sqpoll_cpu;
91 unsigned int nonvectored;
92 unsigned int uncached;
94 unsigned int force_async;
95 enum uring_cmd_type cmd_type;
98 static const int ddir_to_op[2][2] = {
99 { IORING_OP_READV, IORING_OP_READ },
100 { IORING_OP_WRITEV, IORING_OP_WRITE }
103 static const int fixed_ddir_to_op[2] = {
104 IORING_OP_READ_FIXED,
105 IORING_OP_WRITE_FIXED
108 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
110 struct ioring_options *o = data;
112 o->sqpoll_cpu = *val;
117 static struct fio_option options[] = {
120 .lname = "High Priority",
121 .type = FIO_OPT_STR_SET,
122 .off1 = offsetof(struct ioring_options, hipri),
123 .help = "Use polled IO completions",
124 .category = FIO_OPT_C_ENGINE,
125 .group = FIO_OPT_G_IOURING,
127 #ifdef FIO_HAVE_IOPRIO_CLASS
129 .name = "cmdprio_percentage",
130 .lname = "high priority percentage",
132 .off1 = offsetof(struct ioring_options,
133 cmdprio_options.percentage[DDIR_READ]),
134 .off2 = offsetof(struct ioring_options,
135 cmdprio_options.percentage[DDIR_WRITE]),
138 .help = "Send high priority I/O this percentage of the time",
139 .category = FIO_OPT_C_ENGINE,
140 .group = FIO_OPT_G_IOURING,
143 .name = "cmdprio_class",
144 .lname = "Asynchronous I/O priority class",
146 .off1 = offsetof(struct ioring_options,
147 cmdprio_options.class[DDIR_READ]),
148 .off2 = offsetof(struct ioring_options,
149 cmdprio_options.class[DDIR_WRITE]),
150 .help = "Set asynchronous IO priority class",
151 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
152 .maxval = IOPRIO_MAX_PRIO_CLASS,
154 .category = FIO_OPT_C_ENGINE,
155 .group = FIO_OPT_G_IOURING,
159 .lname = "Asynchronous I/O priority level",
161 .off1 = offsetof(struct ioring_options,
162 cmdprio_options.level[DDIR_READ]),
163 .off2 = offsetof(struct ioring_options,
164 cmdprio_options.level[DDIR_WRITE]),
165 .help = "Set asynchronous IO priority level",
166 .minval = IOPRIO_MIN_PRIO,
167 .maxval = IOPRIO_MAX_PRIO,
169 .category = FIO_OPT_C_ENGINE,
170 .group = FIO_OPT_G_IOURING,
173 .name = "cmdprio_bssplit",
174 .lname = "Priority percentage block size split",
175 .type = FIO_OPT_STR_STORE,
176 .off1 = offsetof(struct ioring_options,
177 cmdprio_options.bssplit_str),
178 .help = "Set priority percentages for different block sizes",
179 .category = FIO_OPT_C_ENGINE,
180 .group = FIO_OPT_G_IOURING,
184 .name = "cmdprio_percentage",
185 .lname = "high priority percentage",
186 .type = FIO_OPT_UNSUPPORTED,
187 .help = "Your platform does not support I/O priority classes",
190 .name = "cmdprio_class",
191 .lname = "Asynchronous I/O priority class",
192 .type = FIO_OPT_UNSUPPORTED,
193 .help = "Your platform does not support I/O priority classes",
197 .lname = "Asynchronous I/O priority level",
198 .type = FIO_OPT_UNSUPPORTED,
199 .help = "Your platform does not support I/O priority classes",
202 .name = "cmdprio_bssplit",
203 .lname = "Priority percentage block size split",
204 .type = FIO_OPT_UNSUPPORTED,
205 .help = "Your platform does not support I/O priority classes",
210 .lname = "Fixed (pre-mapped) IO buffers",
211 .type = FIO_OPT_STR_SET,
212 .off1 = offsetof(struct ioring_options, fixedbufs),
213 .help = "Pre map IO buffers",
214 .category = FIO_OPT_C_ENGINE,
215 .group = FIO_OPT_G_IOURING,
218 .name = "registerfiles",
219 .lname = "Register file set",
220 .type = FIO_OPT_STR_SET,
221 .off1 = offsetof(struct ioring_options, registerfiles),
222 .help = "Pre-open/register files",
223 .category = FIO_OPT_C_ENGINE,
224 .group = FIO_OPT_G_IOURING,
227 .name = "sqthread_poll",
228 .lname = "Kernel SQ thread polling",
230 .off1 = offsetof(struct ioring_options, sqpoll_thread),
231 .help = "Offload submission/completion to kernel thread",
232 .category = FIO_OPT_C_ENGINE,
233 .group = FIO_OPT_G_IOURING,
236 .name = "sqthread_poll_cpu",
237 .lname = "SQ Thread Poll CPU",
239 .cb = fio_ioring_sqpoll_cb,
240 .help = "What CPU to run SQ thread polling on",
241 .category = FIO_OPT_C_ENGINE,
242 .group = FIO_OPT_G_IOURING,
245 .name = "nonvectored",
246 .lname = "Non-vectored",
248 .off1 = offsetof(struct ioring_options, nonvectored),
250 .help = "Use non-vectored read/write commands",
251 .category = FIO_OPT_C_ENGINE,
252 .group = FIO_OPT_G_IOURING,
258 .off1 = offsetof(struct ioring_options, uncached),
259 .help = "Use RWF_UNCACHED for buffered read/writes",
260 .category = FIO_OPT_C_ENGINE,
261 .group = FIO_OPT_G_IOURING,
265 .lname = "RWF_NOWAIT",
266 .type = FIO_OPT_BOOL,
267 .off1 = offsetof(struct ioring_options, nowait),
268 .help = "Use RWF_NOWAIT for reads/writes",
269 .category = FIO_OPT_C_ENGINE,
270 .group = FIO_OPT_G_IOURING,
273 .name = "force_async",
274 .lname = "Force async",
276 .off1 = offsetof(struct ioring_options, force_async),
277 .help = "Set IOSQE_ASYNC every N requests",
278 .category = FIO_OPT_C_ENGINE,
279 .group = FIO_OPT_G_IOURING,
283 .lname = "Uring cmd type",
285 .off1 = offsetof(struct ioring_options, cmd_type),
286 .help = "Specify uring-cmd type",
290 .oval = FIO_URING_CMD_NVME,
291 .help = "Issue nvme-uring-cmd",
294 .category = FIO_OPT_C_ENGINE,
295 .group = FIO_OPT_G_IOURING,
302 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
303 unsigned int min_complete, unsigned int flags)
305 #ifdef FIO_ARCH_HAS_SYSCALL
306 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
307 min_complete, flags, NULL, 0);
309 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
310 min_complete, flags, NULL, 0);
314 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
316 struct ioring_data *ld = td->io_ops_data;
317 struct ioring_options *o = td->eo;
318 struct fio_file *f = io_u->file;
319 struct io_uring_sqe *sqe;
321 sqe = &ld->sqes[io_u->index];
323 if (o->registerfiles) {
324 sqe->fd = f->engine_pos;
325 sqe->flags = IOSQE_FIXED_FILE;
331 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
333 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
334 sqe->addr = (unsigned long) io_u->xfer_buf;
335 sqe->len = io_u->xfer_buflen;
336 sqe->buf_index = io_u->index;
338 struct iovec *iov = &ld->iovecs[io_u->index];
341 * Update based on actual io_u, requeue could have
344 iov->iov_base = io_u->xfer_buf;
345 iov->iov_len = io_u->xfer_buflen;
347 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
348 if (o->nonvectored) {
349 sqe->addr = (unsigned long) iov->iov_base;
350 sqe->len = iov->iov_len;
352 sqe->addr = (unsigned long) iov;
357 if (!td->o.odirect && o->uncached)
358 sqe->rw_flags |= RWF_UNCACHED;
360 sqe->rw_flags |= RWF_NOWAIT;
363 * Since io_uring can have a submission context (sqthread_poll)
364 * that is different from the process context, we cannot rely on
365 * the IO priority set by ioprio_set() (option prio/prioclass)
367 * td->ioprio will have the value of the "default prio", so set
368 * this unconditionally. This value might get overridden by
369 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
370 * cmdprio_bssplit is used.
372 sqe->ioprio = td->ioprio;
373 sqe->off = io_u->offset;
374 } else if (ddir_sync(io_u->ddir)) {
376 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
377 sqe->off = f->first_write;
378 sqe->len = f->last_write - f->first_write;
379 sqe->sync_range_flags = td->o.sync_file_range;
380 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
385 if (io_u->ddir == DDIR_DATASYNC)
386 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
387 sqe->opcode = IORING_OP_FSYNC;
391 if (o->force_async && ++ld->prepped == o->force_async) {
393 sqe->flags |= IOSQE_ASYNC;
396 sqe->user_data = (unsigned long) io_u;
400 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
402 struct ioring_data *ld = td->io_ops_data;
403 struct ioring_options *o = td->eo;
404 struct fio_file *f = io_u->file;
405 struct nvme_uring_cmd *cmd;
406 struct io_uring_sqe *sqe;
408 /* only supports nvme_uring_cmd */
409 if (o->cmd_type != FIO_URING_CMD_NVME)
412 sqe = &ld->sqes[(io_u->index) << 1];
414 if (o->registerfiles) {
415 sqe->fd = f->engine_pos;
416 sqe->flags = IOSQE_FIXED_FILE;
421 if (!td->o.odirect && o->uncached)
422 sqe->rw_flags |= RWF_UNCACHED;
424 sqe->rw_flags |= RWF_NOWAIT;
426 sqe->opcode = IORING_OP_URING_CMD;
427 sqe->user_data = (unsigned long) io_u;
429 sqe->cmd_op = NVME_URING_CMD_IO;
431 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
432 if (o->force_async && ++ld->prepped == o->force_async) {
434 sqe->flags |= IOSQE_ASYNC;
437 cmd = (struct nvme_uring_cmd *)sqe->cmd;
438 return fio_nvme_uring_cmd_prep(cmd, io_u,
439 o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
442 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
444 struct ioring_data *ld = td->io_ops_data;
445 struct io_uring_cqe *cqe;
449 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
451 cqe = &ld->cq_ring.cqes[index];
452 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
454 if (cqe->res != io_u->xfer_buflen) {
455 if (cqe->res > io_u->xfer_buflen)
456 io_u->error = -cqe->res;
458 io_u->resid = io_u->xfer_buflen - cqe->res;
465 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
467 struct ioring_data *ld = td->io_ops_data;
468 struct ioring_options *o = td->eo;
469 struct io_uring_cqe *cqe;
473 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
474 if (o->cmd_type == FIO_URING_CMD_NVME)
477 cqe = &ld->cq_ring.cqes[index];
478 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
481 io_u->error = -cqe->res;
488 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
491 struct ioring_data *ld = td->io_ops_data;
492 struct io_cq_ring *ring = &ld->cq_ring;
493 unsigned head, reaped = 0;
497 if (head == atomic_load_acquire(ring->tail))
501 } while (reaped + events < max);
504 atomic_store_release(ring->head, head);
509 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
510 unsigned int max, const struct timespec *t)
512 struct ioring_data *ld = td->io_ops_data;
513 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
514 struct ioring_options *o = td->eo;
515 struct io_cq_ring *ring = &ld->cq_ring;
519 ld->cq_ring_off = *ring->head;
521 r = fio_ioring_cqring_reap(td, events, max);
529 if (!o->sqpoll_thread) {
530 r = io_uring_enter(ld, 0, actual_min,
531 IORING_ENTER_GETEVENTS);
533 if (errno == EAGAIN || errno == EINTR)
536 td_verror(td, errno, "io_uring_enter");
540 } while (events < min);
542 return r < 0 ? r : events;
545 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
548 struct ioring_data *ld = td->io_ops_data;
549 struct cmdprio *cmdprio = &ld->cmdprio;
551 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
552 ld->sqes[io_u->index].ioprio = io_u->ioprio;
555 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
558 struct ioring_data *ld = td->io_ops_data;
559 struct io_sq_ring *ring = &ld->sq_ring;
560 unsigned tail, next_tail;
562 fio_ro_check(td, io_u);
564 if (ld->queued == ld->iodepth)
567 if (io_u->ddir == DDIR_TRIM) {
571 do_io_u_trim(td, io_u);
572 io_u_mark_submit(td, 1);
573 io_u_mark_complete(td, 1);
574 return FIO_Q_COMPLETED;
578 next_tail = tail + 1;
579 if (next_tail == atomic_load_acquire(ring->head))
582 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
583 fio_ioring_cmdprio_prep(td, io_u);
585 ring->array[tail & ld->sq_ring_mask] = io_u->index;
586 atomic_store_release(ring->tail, next_tail);
592 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
594 struct ioring_data *ld = td->io_ops_data;
597 if (!fio_fill_issue_time(td))
600 fio_gettime(&now, NULL);
603 struct io_sq_ring *ring = &ld->sq_ring;
604 int index = ring->array[start & ld->sq_ring_mask];
605 struct io_u *io_u = ld->io_u_index[index];
607 memcpy(&io_u->issue_time, &now, sizeof(now));
608 io_u_queued(td, io_u);
614 * only used for iolog
616 if (td->o.read_iolog_file)
617 memcpy(&td->last_issue, &now, sizeof(now));
620 static int fio_ioring_commit(struct thread_data *td)
622 struct ioring_data *ld = td->io_ops_data;
623 struct ioring_options *o = td->eo;
630 * Kernel side does submission. just need to check if the ring is
631 * flagged as needing a kick, if so, call io_uring_enter(). This
632 * only happens if we've been idle too long.
634 if (o->sqpoll_thread) {
635 struct io_sq_ring *ring = &ld->sq_ring;
638 flags = atomic_load_acquire(ring->flags);
639 if (flags & IORING_SQ_NEED_WAKEUP)
640 io_uring_enter(ld, ld->queued, 0,
641 IORING_ENTER_SQ_WAKEUP);
647 unsigned start = *ld->sq_ring.head;
648 long nr = ld->queued;
650 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
652 fio_ioring_queued(td, start, ret);
653 io_u_mark_submit(td, ret);
658 io_u_mark_submit(td, ret);
661 if (errno == EAGAIN || errno == EINTR) {
662 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
665 /* Shouldn't happen */
670 td_verror(td, errno, "io_uring_enter submit");
673 } while (ld->queued);
678 static void fio_ioring_unmap(struct ioring_data *ld)
682 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
683 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
687 static void fio_ioring_cleanup(struct thread_data *td)
689 struct ioring_data *ld = td->io_ops_data;
692 if (!(td->flags & TD_F_CHILD))
693 fio_ioring_unmap(ld);
695 fio_cmdprio_cleanup(&ld->cmdprio);
696 free(ld->io_u_index);
703 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
705 struct io_sq_ring *sring = &ld->sq_ring;
706 struct io_cq_ring *cring = &ld->cq_ring;
709 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
710 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
711 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
713 ld->mmap[0].ptr = ptr;
714 sring->head = ptr + p->sq_off.head;
715 sring->tail = ptr + p->sq_off.tail;
716 sring->ring_mask = ptr + p->sq_off.ring_mask;
717 sring->ring_entries = ptr + p->sq_off.ring_entries;
718 sring->flags = ptr + p->sq_off.flags;
719 sring->array = ptr + p->sq_off.array;
720 ld->sq_ring_mask = *sring->ring_mask;
722 if (p->flags & IORING_SETUP_SQE128)
723 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
725 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
726 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
727 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
729 ld->mmap[1].ptr = ld->sqes;
731 if (p->flags & IORING_SETUP_CQE32) {
732 ld->mmap[2].len = p->cq_off.cqes +
733 2 * p->cq_entries * sizeof(struct io_uring_cqe);
735 ld->mmap[2].len = p->cq_off.cqes +
736 p->cq_entries * sizeof(struct io_uring_cqe);
738 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
739 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
741 ld->mmap[2].ptr = ptr;
742 cring->head = ptr + p->cq_off.head;
743 cring->tail = ptr + p->cq_off.tail;
744 cring->ring_mask = ptr + p->cq_off.ring_mask;
745 cring->ring_entries = ptr + p->cq_off.ring_entries;
746 cring->cqes = ptr + p->cq_off.cqes;
747 ld->cq_ring_mask = *cring->ring_mask;
751 static void fio_ioring_probe(struct thread_data *td)
753 struct ioring_data *ld = td->io_ops_data;
754 struct ioring_options *o = td->eo;
755 struct io_uring_probe *p;
758 /* already set by user, don't touch */
759 if (o->nonvectored != -1)
762 /* default to off, as that's always safe */
765 p = malloc(sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
769 memset(p, 0, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
770 ret = syscall(__NR_io_uring_register, ld->ring_fd,
771 IORING_REGISTER_PROBE, p, 256);
775 if (IORING_OP_WRITE > p->ops_len)
778 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
779 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
785 static int fio_ioring_queue_init(struct thread_data *td)
787 struct ioring_data *ld = td->io_ops_data;
788 struct ioring_options *o = td->eo;
789 int depth = td->o.iodepth;
790 struct io_uring_params p;
793 memset(&p, 0, sizeof(p));
796 p.flags |= IORING_SETUP_IOPOLL;
797 if (o->sqpoll_thread) {
798 p.flags |= IORING_SETUP_SQPOLL;
800 p.flags |= IORING_SETUP_SQ_AFF;
801 p.sq_thread_cpu = o->sqpoll_cpu;
806 * Clamp CQ ring size at our SQ ring size, we don't need more entries
809 p.flags |= IORING_SETUP_CQSIZE;
810 p.cq_entries = depth;
813 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
814 * completing IO operations.
816 p.flags |= IORING_SETUP_COOP_TASKRUN;
819 * io_uring is always a single issuer, and we can defer task_work
820 * runs until we reap events.
822 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
825 ret = syscall(__NR_io_uring_setup, depth, &p);
827 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
828 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
829 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
832 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
833 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
836 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
837 p.flags &= ~IORING_SETUP_CQSIZE;
845 fio_ioring_probe(td);
848 ret = syscall(__NR_io_uring_register, ld->ring_fd,
849 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
854 return fio_ioring_mmap(ld, &p);
857 static int fio_ioring_cmd_queue_init(struct thread_data *td)
859 struct ioring_data *ld = td->io_ops_data;
860 struct ioring_options *o = td->eo;
861 int depth = td->o.iodepth;
862 struct io_uring_params p;
865 memset(&p, 0, sizeof(p));
868 p.flags |= IORING_SETUP_IOPOLL;
869 if (o->sqpoll_thread) {
870 p.flags |= IORING_SETUP_SQPOLL;
872 p.flags |= IORING_SETUP_SQ_AFF;
873 p.sq_thread_cpu = o->sqpoll_cpu;
876 if (o->cmd_type == FIO_URING_CMD_NVME) {
877 p.flags |= IORING_SETUP_SQE128;
878 p.flags |= IORING_SETUP_CQE32;
882 * Clamp CQ ring size at our SQ ring size, we don't need more entries
885 p.flags |= IORING_SETUP_CQSIZE;
886 p.cq_entries = depth;
889 ret = syscall(__NR_io_uring_setup, depth, &p);
891 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
892 p.flags &= ~IORING_SETUP_CQSIZE;
900 fio_ioring_probe(td);
903 ret = syscall(__NR_io_uring_register, ld->ring_fd,
904 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
909 return fio_ioring_mmap(ld, &p);
912 static int fio_ioring_register_files(struct thread_data *td)
914 struct ioring_data *ld = td->io_ops_data;
919 ld->fds = calloc(td->o.nr_files, sizeof(int));
921 for_each_file(td, f, i) {
922 ret = generic_open_file(td, f);
929 ret = syscall(__NR_io_uring_register, ld->ring_fd,
930 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
938 * Pretend the file is closed again, and really close it if we hit
941 for_each_file(td, f, i) {
944 ret2 = generic_close_file(td, f);
952 static int fio_ioring_post_init(struct thread_data *td)
954 struct ioring_data *ld = td->io_ops_data;
955 struct ioring_options *o = td->eo;
959 for (i = 0; i < td->o.iodepth; i++) {
960 struct iovec *iov = &ld->iovecs[i];
962 io_u = ld->io_u_index[i];
963 iov->iov_base = io_u->buf;
964 iov->iov_len = td_max_bs(td);
967 err = fio_ioring_queue_init(td);
969 int init_err = errno;
971 if (init_err == ENOSYS)
972 log_err("fio: your kernel doesn't support io_uring\n");
973 td_verror(td, init_err, "io_queue_init");
977 for (i = 0; i < td->o.iodepth; i++) {
978 struct io_uring_sqe *sqe;
981 memset(sqe, 0, sizeof(*sqe));
984 if (o->registerfiles) {
985 err = fio_ioring_register_files(td);
987 td_verror(td, errno, "ioring_register_files");
995 static int fio_ioring_cmd_post_init(struct thread_data *td)
997 struct ioring_data *ld = td->io_ops_data;
998 struct ioring_options *o = td->eo;
1002 for (i = 0; i < td->o.iodepth; i++) {
1003 struct iovec *iov = &ld->iovecs[i];
1005 io_u = ld->io_u_index[i];
1006 iov->iov_base = io_u->buf;
1007 iov->iov_len = td_max_bs(td);
1010 err = fio_ioring_cmd_queue_init(td);
1012 int init_err = errno;
1014 td_verror(td, init_err, "io_queue_init");
1018 for (i = 0; i < td->o.iodepth; i++) {
1019 struct io_uring_sqe *sqe;
1021 if (o->cmd_type == FIO_URING_CMD_NVME) {
1022 sqe = &ld->sqes[i << 1];
1023 memset(sqe, 0, 2 * sizeof(*sqe));
1026 memset(sqe, 0, sizeof(*sqe));
1030 if (o->registerfiles) {
1031 err = fio_ioring_register_files(td);
1033 td_verror(td, errno, "ioring_register_files");
1041 static int fio_ioring_init(struct thread_data *td)
1043 struct ioring_options *o = td->eo;
1044 struct ioring_data *ld;
1047 /* sqthread submission requires registered files */
1048 if (o->sqpoll_thread)
1049 o->registerfiles = 1;
1051 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1052 log_err("fio: io_uring registered files require nr_files to "
1053 "be identical to open_files\n");
1057 ld = calloc(1, sizeof(*ld));
1059 /* ring depth must be a power-of-2 */
1060 ld->iodepth = td->o.iodepth;
1061 td->o.iodepth = roundup_pow2(td->o.iodepth);
1064 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1065 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1067 td->io_ops_data = ld;
1069 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1071 td_verror(td, EINVAL, "fio_ioring_init");
1078 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1080 struct ioring_data *ld = td->io_ops_data;
1082 ld->io_u_index[io_u->index] = io_u;
1086 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1088 struct ioring_data *ld = td->io_ops_data;
1089 struct ioring_options *o = td->eo;
1091 if (!ld || !o->registerfiles)
1092 return generic_open_file(td, f);
1094 f->fd = ld->fds[f->engine_pos];
1098 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1100 struct ioring_data *ld = td->io_ops_data;
1101 struct ioring_options *o = td->eo;
1103 if (o->cmd_type == FIO_URING_CMD_NVME) {
1104 struct nvme_data *data = NULL;
1105 unsigned int nsid, lba_size = 0;
1106 unsigned long long nlba = 0;
1109 /* Store the namespace-id and lba size. */
1110 data = FILE_ENG_DATA(f);
1112 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1116 data = calloc(1, sizeof(struct nvme_data));
1118 data->lba_shift = ilog2(lba_size);
1120 FILE_SET_ENG_DATA(f, data);
1123 if (!ld || !o->registerfiles)
1124 return generic_open_file(td, f);
1126 f->fd = ld->fds[f->engine_pos];
1130 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1132 struct ioring_data *ld = td->io_ops_data;
1133 struct ioring_options *o = td->eo;
1135 if (!ld || !o->registerfiles)
1136 return generic_close_file(td, f);
1142 static int fio_ioring_cmd_close_file(struct thread_data *td,
1145 struct ioring_data *ld = td->io_ops_data;
1146 struct ioring_options *o = td->eo;
1148 if (o->cmd_type == FIO_URING_CMD_NVME) {
1149 struct nvme_data *data = FILE_ENG_DATA(f);
1151 FILE_SET_ENG_DATA(f, NULL);
1154 if (!ld || !o->registerfiles)
1155 return generic_close_file(td, f);
1161 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1164 struct ioring_options *o = td->eo;
1166 if (fio_file_size_known(f))
1169 if (o->cmd_type == FIO_URING_CMD_NVME) {
1170 struct nvme_data *data = NULL;
1171 unsigned int nsid, lba_size = 0;
1172 unsigned long long nlba = 0;
1175 ret = fio_nvme_get_info(f, &nsid, &lba_size, &nlba);
1179 data = calloc(1, sizeof(struct nvme_data));
1181 data->lba_shift = ilog2(lba_size);
1183 f->real_file_size = lba_size * nlba;
1184 fio_file_set_size_known(f);
1186 FILE_SET_ENG_DATA(f, data);
1189 return generic_get_file_size(td, f);
1192 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1194 enum zbd_zoned_model *model)
1196 return fio_nvme_get_zoned_model(td, f, model);
1199 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1200 struct fio_file *f, uint64_t offset,
1201 struct zbd_zone *zbdz,
1202 unsigned int nr_zones)
1204 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1207 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1208 uint64_t offset, uint64_t length)
1210 return fio_nvme_reset_wp(td, f, offset, length);
1213 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1215 unsigned int *max_open_zones)
1217 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1220 static struct ioengine_ops ioengine_uring = {
1222 .version = FIO_IOOPS_VERSION,
1223 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1224 FIO_ASYNCIO_SETS_ISSUE_TIME,
1225 .init = fio_ioring_init,
1226 .post_init = fio_ioring_post_init,
1227 .io_u_init = fio_ioring_io_u_init,
1228 .prep = fio_ioring_prep,
1229 .queue = fio_ioring_queue,
1230 .commit = fio_ioring_commit,
1231 .getevents = fio_ioring_getevents,
1232 .event = fio_ioring_event,
1233 .cleanup = fio_ioring_cleanup,
1234 .open_file = fio_ioring_open_file,
1235 .close_file = fio_ioring_close_file,
1236 .get_file_size = generic_get_file_size,
1238 .option_struct_size = sizeof(struct ioring_options),
1241 static struct ioengine_ops ioengine_uring_cmd = {
1242 .name = "io_uring_cmd",
1243 .version = FIO_IOOPS_VERSION,
1244 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1245 FIO_MEMALIGN | FIO_RAWIO |
1246 FIO_ASYNCIO_SETS_ISSUE_TIME,
1247 .init = fio_ioring_init,
1248 .post_init = fio_ioring_cmd_post_init,
1249 .io_u_init = fio_ioring_io_u_init,
1250 .prep = fio_ioring_cmd_prep,
1251 .queue = fio_ioring_queue,
1252 .commit = fio_ioring_commit,
1253 .getevents = fio_ioring_getevents,
1254 .event = fio_ioring_cmd_event,
1255 .cleanup = fio_ioring_cleanup,
1256 .open_file = fio_ioring_cmd_open_file,
1257 .close_file = fio_ioring_cmd_close_file,
1258 .get_file_size = fio_ioring_cmd_get_file_size,
1259 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1260 .report_zones = fio_ioring_cmd_report_zones,
1261 .reset_wp = fio_ioring_cmd_reset_wp,
1262 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1264 .option_struct_size = sizeof(struct ioring_options),
1267 static void fio_init fio_ioring_register(void)
1269 register_ioengine(&ioengine_uring);
1270 register_ioengine(&ioengine_uring_cmd);
1273 static void fio_exit fio_ioring_unregister(void)
1275 unregister_ioengine(&ioengine_uring);
1276 unregister_ioengine(&ioengine_uring_cmd);