4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
33 FIO_URING_CMD_NVME = 1,
40 unsigned *ring_entries;
49 unsigned *ring_entries;
50 struct io_uring_cqe *cqes;
61 struct io_u **io_u_index;
65 struct io_sq_ring sq_ring;
66 struct io_uring_sqe *sqes;
68 unsigned sq_ring_mask;
70 struct io_cq_ring cq_ring;
71 unsigned cq_ring_mask;
78 struct ioring_mmap mmap[3];
80 struct cmdprio cmdprio;
83 struct ioring_options {
84 struct thread_data *td;
86 struct cmdprio_options cmdprio_options;
87 unsigned int fixedbufs;
88 unsigned int registerfiles;
89 unsigned int sqpoll_thread;
90 unsigned int sqpoll_set;
91 unsigned int sqpoll_cpu;
92 unsigned int nonvectored;
93 unsigned int uncached;
95 unsigned int force_async;
96 enum uring_cmd_type cmd_type;
99 static const int ddir_to_op[2][2] = {
100 { IORING_OP_READV, IORING_OP_READ },
101 { IORING_OP_WRITEV, IORING_OP_WRITE }
104 static const int fixed_ddir_to_op[2] = {
105 IORING_OP_READ_FIXED,
106 IORING_OP_WRITE_FIXED
109 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
111 struct ioring_options *o = data;
113 o->sqpoll_cpu = *val;
118 static struct fio_option options[] = {
121 .lname = "High Priority",
122 .type = FIO_OPT_STR_SET,
123 .off1 = offsetof(struct ioring_options, hipri),
124 .help = "Use polled IO completions",
125 .category = FIO_OPT_C_ENGINE,
126 .group = FIO_OPT_G_IOURING,
128 #ifdef FIO_HAVE_IOPRIO_CLASS
130 .name = "cmdprio_percentage",
131 .lname = "high priority percentage",
133 .off1 = offsetof(struct ioring_options,
134 cmdprio_options.percentage[DDIR_READ]),
135 .off2 = offsetof(struct ioring_options,
136 cmdprio_options.percentage[DDIR_WRITE]),
139 .help = "Send high priority I/O this percentage of the time",
140 .category = FIO_OPT_C_ENGINE,
141 .group = FIO_OPT_G_IOURING,
144 .name = "cmdprio_class",
145 .lname = "Asynchronous I/O priority class",
147 .off1 = offsetof(struct ioring_options,
148 cmdprio_options.class[DDIR_READ]),
149 .off2 = offsetof(struct ioring_options,
150 cmdprio_options.class[DDIR_WRITE]),
151 .help = "Set asynchronous IO priority class",
152 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
153 .maxval = IOPRIO_MAX_PRIO_CLASS,
155 .category = FIO_OPT_C_ENGINE,
156 .group = FIO_OPT_G_IOURING,
160 .lname = "Asynchronous I/O priority level",
162 .off1 = offsetof(struct ioring_options,
163 cmdprio_options.level[DDIR_READ]),
164 .off2 = offsetof(struct ioring_options,
165 cmdprio_options.level[DDIR_WRITE]),
166 .help = "Set asynchronous IO priority level",
167 .minval = IOPRIO_MIN_PRIO,
168 .maxval = IOPRIO_MAX_PRIO,
170 .category = FIO_OPT_C_ENGINE,
171 .group = FIO_OPT_G_IOURING,
174 .name = "cmdprio_bssplit",
175 .lname = "Priority percentage block size split",
176 .type = FIO_OPT_STR_STORE,
177 .off1 = offsetof(struct ioring_options,
178 cmdprio_options.bssplit_str),
179 .help = "Set priority percentages for different block sizes",
180 .category = FIO_OPT_C_ENGINE,
181 .group = FIO_OPT_G_IOURING,
185 .name = "cmdprio_percentage",
186 .lname = "high priority percentage",
187 .type = FIO_OPT_UNSUPPORTED,
188 .help = "Your platform does not support I/O priority classes",
191 .name = "cmdprio_class",
192 .lname = "Asynchronous I/O priority class",
193 .type = FIO_OPT_UNSUPPORTED,
194 .help = "Your platform does not support I/O priority classes",
198 .lname = "Asynchronous I/O priority level",
199 .type = FIO_OPT_UNSUPPORTED,
200 .help = "Your platform does not support I/O priority classes",
203 .name = "cmdprio_bssplit",
204 .lname = "Priority percentage block size split",
205 .type = FIO_OPT_UNSUPPORTED,
206 .help = "Your platform does not support I/O priority classes",
211 .lname = "Fixed (pre-mapped) IO buffers",
212 .type = FIO_OPT_STR_SET,
213 .off1 = offsetof(struct ioring_options, fixedbufs),
214 .help = "Pre map IO buffers",
215 .category = FIO_OPT_C_ENGINE,
216 .group = FIO_OPT_G_IOURING,
219 .name = "registerfiles",
220 .lname = "Register file set",
221 .type = FIO_OPT_STR_SET,
222 .off1 = offsetof(struct ioring_options, registerfiles),
223 .help = "Pre-open/register files",
224 .category = FIO_OPT_C_ENGINE,
225 .group = FIO_OPT_G_IOURING,
228 .name = "sqthread_poll",
229 .lname = "Kernel SQ thread polling",
230 .type = FIO_OPT_STR_SET,
231 .off1 = offsetof(struct ioring_options, sqpoll_thread),
232 .help = "Offload submission/completion to kernel thread",
233 .category = FIO_OPT_C_ENGINE,
234 .group = FIO_OPT_G_IOURING,
237 .name = "sqthread_poll_cpu",
238 .lname = "SQ Thread Poll CPU",
240 .cb = fio_ioring_sqpoll_cb,
241 .help = "What CPU to run SQ thread polling on",
242 .category = FIO_OPT_C_ENGINE,
243 .group = FIO_OPT_G_IOURING,
246 .name = "nonvectored",
247 .lname = "Non-vectored",
249 .off1 = offsetof(struct ioring_options, nonvectored),
251 .help = "Use non-vectored read/write commands",
252 .category = FIO_OPT_C_ENGINE,
253 .group = FIO_OPT_G_IOURING,
259 .off1 = offsetof(struct ioring_options, uncached),
260 .help = "Use RWF_UNCACHED for buffered read/writes",
261 .category = FIO_OPT_C_ENGINE,
262 .group = FIO_OPT_G_IOURING,
266 .lname = "RWF_NOWAIT",
267 .type = FIO_OPT_BOOL,
268 .off1 = offsetof(struct ioring_options, nowait),
269 .help = "Use RWF_NOWAIT for reads/writes",
270 .category = FIO_OPT_C_ENGINE,
271 .group = FIO_OPT_G_IOURING,
274 .name = "force_async",
275 .lname = "Force async",
277 .off1 = offsetof(struct ioring_options, force_async),
278 .help = "Set IOSQE_ASYNC every N requests",
279 .category = FIO_OPT_C_ENGINE,
280 .group = FIO_OPT_G_IOURING,
284 .lname = "Uring cmd type",
286 .off1 = offsetof(struct ioring_options, cmd_type),
287 .help = "Specify uring-cmd type",
291 .oval = FIO_URING_CMD_NVME,
292 .help = "Issue nvme-uring-cmd",
295 .category = FIO_OPT_C_ENGINE,
296 .group = FIO_OPT_G_IOURING,
303 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
304 unsigned int min_complete, unsigned int flags)
306 #ifdef FIO_ARCH_HAS_SYSCALL
307 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
308 min_complete, flags, NULL, 0);
310 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
311 min_complete, flags, NULL, 0);
315 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
317 struct ioring_data *ld = td->io_ops_data;
318 struct ioring_options *o = td->eo;
319 struct fio_file *f = io_u->file;
320 struct io_uring_sqe *sqe;
322 sqe = &ld->sqes[io_u->index];
324 if (o->registerfiles) {
325 sqe->fd = f->engine_pos;
326 sqe->flags = IOSQE_FIXED_FILE;
332 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
334 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
335 sqe->addr = (unsigned long) io_u->xfer_buf;
336 sqe->len = io_u->xfer_buflen;
337 sqe->buf_index = io_u->index;
339 struct iovec *iov = &ld->iovecs[io_u->index];
342 * Update based on actual io_u, requeue could have
345 iov->iov_base = io_u->xfer_buf;
346 iov->iov_len = io_u->xfer_buflen;
348 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
349 if (o->nonvectored) {
350 sqe->addr = (unsigned long) iov->iov_base;
351 sqe->len = iov->iov_len;
353 sqe->addr = (unsigned long) iov;
358 if (!td->o.odirect && o->uncached)
359 sqe->rw_flags |= RWF_UNCACHED;
361 sqe->rw_flags |= RWF_NOWAIT;
364 * Since io_uring can have a submission context (sqthread_poll)
365 * that is different from the process context, we cannot rely on
366 * the IO priority set by ioprio_set() (option prio/prioclass)
368 * td->ioprio will have the value of the "default prio", so set
369 * this unconditionally. This value might get overridden by
370 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
371 * cmdprio_bssplit is used.
373 sqe->ioprio = td->ioprio;
374 sqe->off = io_u->offset;
375 } else if (ddir_sync(io_u->ddir)) {
377 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
378 sqe->off = f->first_write;
379 sqe->len = f->last_write - f->first_write;
380 sqe->sync_range_flags = td->o.sync_file_range;
381 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
386 if (io_u->ddir == DDIR_DATASYNC)
387 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
388 sqe->opcode = IORING_OP_FSYNC;
392 if (o->force_async && ++ld->prepped == o->force_async) {
394 sqe->flags |= IOSQE_ASYNC;
397 sqe->user_data = (unsigned long) io_u;
401 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
403 struct ioring_data *ld = td->io_ops_data;
404 struct ioring_options *o = td->eo;
405 struct fio_file *f = io_u->file;
406 struct nvme_uring_cmd *cmd;
407 struct io_uring_sqe *sqe;
409 /* only supports nvme_uring_cmd */
410 if (o->cmd_type != FIO_URING_CMD_NVME)
413 if (io_u->ddir == DDIR_TRIM)
416 sqe = &ld->sqes[(io_u->index) << 1];
418 if (o->registerfiles) {
419 sqe->fd = f->engine_pos;
420 sqe->flags = IOSQE_FIXED_FILE;
425 if (!td->o.odirect && o->uncached)
426 sqe->rw_flags |= RWF_UNCACHED;
428 sqe->rw_flags |= RWF_NOWAIT;
430 sqe->opcode = IORING_OP_URING_CMD;
431 sqe->user_data = (unsigned long) io_u;
433 sqe->cmd_op = NVME_URING_CMD_IO;
435 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
436 if (o->force_async && ++ld->prepped == o->force_async) {
438 sqe->flags |= IOSQE_ASYNC;
441 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
442 sqe->buf_index = io_u->index;
445 cmd = (struct nvme_uring_cmd *)sqe->cmd;
446 return fio_nvme_uring_cmd_prep(cmd, io_u,
447 o->nonvectored ? NULL : &ld->iovecs[io_u->index]);
450 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
452 struct ioring_data *ld = td->io_ops_data;
453 struct io_uring_cqe *cqe;
457 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
459 cqe = &ld->cq_ring.cqes[index];
460 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
462 if (cqe->res != io_u->xfer_buflen) {
463 if (cqe->res > io_u->xfer_buflen)
464 io_u->error = -cqe->res;
466 io_u->resid = io_u->xfer_buflen - cqe->res;
473 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
475 struct ioring_data *ld = td->io_ops_data;
476 struct ioring_options *o = td->eo;
477 struct io_uring_cqe *cqe;
481 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
482 if (o->cmd_type == FIO_URING_CMD_NVME)
485 cqe = &ld->cq_ring.cqes[index];
486 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
489 io_u->error = -cqe->res;
496 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
499 struct ioring_data *ld = td->io_ops_data;
500 struct io_cq_ring *ring = &ld->cq_ring;
501 unsigned head, reaped = 0;
505 if (head == atomic_load_acquire(ring->tail))
509 } while (reaped + events < max);
512 atomic_store_release(ring->head, head);
517 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
518 unsigned int max, const struct timespec *t)
520 struct ioring_data *ld = td->io_ops_data;
521 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
522 struct ioring_options *o = td->eo;
523 struct io_cq_ring *ring = &ld->cq_ring;
527 ld->cq_ring_off = *ring->head;
529 r = fio_ioring_cqring_reap(td, events, max);
538 if (!o->sqpoll_thread) {
539 r = io_uring_enter(ld, 0, actual_min,
540 IORING_ENTER_GETEVENTS);
542 if (errno == EAGAIN || errno == EINTR)
545 td_verror(td, errno, "io_uring_enter");
549 } while (events < min);
551 return r < 0 ? r : events;
554 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
557 struct ioring_data *ld = td->io_ops_data;
558 struct cmdprio *cmdprio = &ld->cmdprio;
560 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
561 ld->sqes[io_u->index].ioprio = io_u->ioprio;
564 static int fio_ioring_cmd_io_u_trim(struct thread_data *td,
567 struct fio_file *f = io_u->file;
570 if (td->o.zone_mode == ZONE_MODE_ZBD) {
571 ret = zbd_do_io_u_trim(td, io_u);
572 if (ret == io_u_completed)
573 return io_u->xfer_buflen;
578 return fio_nvme_trim(td, f, io_u->offset, io_u->xfer_buflen);
585 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
588 struct ioring_data *ld = td->io_ops_data;
589 struct io_sq_ring *ring = &ld->sq_ring;
590 unsigned tail, next_tail;
592 fio_ro_check(td, io_u);
594 if (ld->queued == ld->iodepth)
597 if (io_u->ddir == DDIR_TRIM) {
601 if (!strcmp(td->io_ops->name, "io_uring_cmd"))
602 fio_ioring_cmd_io_u_trim(td, io_u);
604 do_io_u_trim(td, io_u);
606 io_u_mark_submit(td, 1);
607 io_u_mark_complete(td, 1);
608 return FIO_Q_COMPLETED;
612 next_tail = tail + 1;
613 if (next_tail == atomic_load_acquire(ring->head))
616 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
617 fio_ioring_cmdprio_prep(td, io_u);
619 ring->array[tail & ld->sq_ring_mask] = io_u->index;
620 atomic_store_release(ring->tail, next_tail);
626 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
628 struct ioring_data *ld = td->io_ops_data;
631 if (!fio_fill_issue_time(td))
634 fio_gettime(&now, NULL);
637 struct io_sq_ring *ring = &ld->sq_ring;
638 int index = ring->array[start & ld->sq_ring_mask];
639 struct io_u *io_u = ld->io_u_index[index];
641 memcpy(&io_u->issue_time, &now, sizeof(now));
642 io_u_queued(td, io_u);
648 * only used for iolog
650 if (td->o.read_iolog_file)
651 memcpy(&td->last_issue, &now, sizeof(now));
654 static int fio_ioring_commit(struct thread_data *td)
656 struct ioring_data *ld = td->io_ops_data;
657 struct ioring_options *o = td->eo;
664 * Kernel side does submission. just need to check if the ring is
665 * flagged as needing a kick, if so, call io_uring_enter(). This
666 * only happens if we've been idle too long.
668 if (o->sqpoll_thread) {
669 struct io_sq_ring *ring = &ld->sq_ring;
670 unsigned start = *ld->sq_ring.head;
673 flags = atomic_load_acquire(ring->flags);
674 if (flags & IORING_SQ_NEED_WAKEUP)
675 io_uring_enter(ld, ld->queued, 0,
676 IORING_ENTER_SQ_WAKEUP);
677 fio_ioring_queued(td, start, ld->queued);
678 io_u_mark_submit(td, ld->queued);
685 unsigned start = *ld->sq_ring.head;
686 long nr = ld->queued;
688 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
690 fio_ioring_queued(td, start, ret);
691 io_u_mark_submit(td, ret);
696 io_u_mark_submit(td, ret);
699 if (errno == EAGAIN || errno == EINTR) {
700 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
703 /* Shouldn't happen */
708 td_verror(td, errno, "io_uring_enter submit");
711 } while (ld->queued);
716 static void fio_ioring_unmap(struct ioring_data *ld)
720 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
721 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
725 static void fio_ioring_cleanup(struct thread_data *td)
727 struct ioring_data *ld = td->io_ops_data;
730 if (!(td->flags & TD_F_CHILD))
731 fio_ioring_unmap(ld);
733 fio_cmdprio_cleanup(&ld->cmdprio);
734 free(ld->io_u_index);
741 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
743 struct io_sq_ring *sring = &ld->sq_ring;
744 struct io_cq_ring *cring = &ld->cq_ring;
747 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
748 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
749 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
751 ld->mmap[0].ptr = ptr;
752 sring->head = ptr + p->sq_off.head;
753 sring->tail = ptr + p->sq_off.tail;
754 sring->ring_mask = ptr + p->sq_off.ring_mask;
755 sring->ring_entries = ptr + p->sq_off.ring_entries;
756 sring->flags = ptr + p->sq_off.flags;
757 sring->array = ptr + p->sq_off.array;
758 ld->sq_ring_mask = *sring->ring_mask;
760 if (p->flags & IORING_SETUP_SQE128)
761 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
763 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
764 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
765 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
767 ld->mmap[1].ptr = ld->sqes;
769 if (p->flags & IORING_SETUP_CQE32) {
770 ld->mmap[2].len = p->cq_off.cqes +
771 2 * p->cq_entries * sizeof(struct io_uring_cqe);
773 ld->mmap[2].len = p->cq_off.cqes +
774 p->cq_entries * sizeof(struct io_uring_cqe);
776 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
777 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
779 ld->mmap[2].ptr = ptr;
780 cring->head = ptr + p->cq_off.head;
781 cring->tail = ptr + p->cq_off.tail;
782 cring->ring_mask = ptr + p->cq_off.ring_mask;
783 cring->ring_entries = ptr + p->cq_off.ring_entries;
784 cring->cqes = ptr + p->cq_off.cqes;
785 ld->cq_ring_mask = *cring->ring_mask;
789 static void fio_ioring_probe(struct thread_data *td)
791 struct ioring_data *ld = td->io_ops_data;
792 struct ioring_options *o = td->eo;
793 struct io_uring_probe *p;
796 /* already set by user, don't touch */
797 if (o->nonvectored != -1)
800 /* default to off, as that's always safe */
803 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
807 ret = syscall(__NR_io_uring_register, ld->ring_fd,
808 IORING_REGISTER_PROBE, p, 256);
812 if (IORING_OP_WRITE > p->ops_len)
815 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
816 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
822 static int fio_ioring_queue_init(struct thread_data *td)
824 struct ioring_data *ld = td->io_ops_data;
825 struct ioring_options *o = td->eo;
826 int depth = td->o.iodepth;
827 struct io_uring_params p;
830 memset(&p, 0, sizeof(p));
833 p.flags |= IORING_SETUP_IOPOLL;
834 if (o->sqpoll_thread) {
835 p.flags |= IORING_SETUP_SQPOLL;
837 p.flags |= IORING_SETUP_SQ_AFF;
838 p.sq_thread_cpu = o->sqpoll_cpu;
842 * Submission latency for sqpoll_thread is just the time it
843 * takes to fill in the SQ ring entries, and any syscall if
844 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
847 td->o.disable_slat = 1;
851 * Clamp CQ ring size at our SQ ring size, we don't need more entries
854 p.flags |= IORING_SETUP_CQSIZE;
855 p.cq_entries = depth;
858 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
859 * completing IO operations.
861 p.flags |= IORING_SETUP_COOP_TASKRUN;
864 * io_uring is always a single issuer, and we can defer task_work
865 * runs until we reap events.
867 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
870 ret = syscall(__NR_io_uring_setup, depth, &p);
872 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
873 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
874 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
877 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
878 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
881 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
882 p.flags &= ~IORING_SETUP_CQSIZE;
890 fio_ioring_probe(td);
893 ret = syscall(__NR_io_uring_register, ld->ring_fd,
894 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
899 return fio_ioring_mmap(ld, &p);
902 static int fio_ioring_cmd_queue_init(struct thread_data *td)
904 struct ioring_data *ld = td->io_ops_data;
905 struct ioring_options *o = td->eo;
906 int depth = td->o.iodepth;
907 struct io_uring_params p;
910 memset(&p, 0, sizeof(p));
913 p.flags |= IORING_SETUP_IOPOLL;
914 if (o->sqpoll_thread) {
915 p.flags |= IORING_SETUP_SQPOLL;
917 p.flags |= IORING_SETUP_SQ_AFF;
918 p.sq_thread_cpu = o->sqpoll_cpu;
922 * Submission latency for sqpoll_thread is just the time it
923 * takes to fill in the SQ ring entries, and any syscall if
924 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
927 td->o.disable_slat = 1;
929 if (o->cmd_type == FIO_URING_CMD_NVME) {
930 p.flags |= IORING_SETUP_SQE128;
931 p.flags |= IORING_SETUP_CQE32;
935 * Clamp CQ ring size at our SQ ring size, we don't need more entries
938 p.flags |= IORING_SETUP_CQSIZE;
939 p.cq_entries = depth;
942 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
943 * completing IO operations.
945 p.flags |= IORING_SETUP_COOP_TASKRUN;
948 * io_uring is always a single issuer, and we can defer task_work
949 * runs until we reap events.
951 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
954 ret = syscall(__NR_io_uring_setup, depth, &p);
956 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
957 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
958 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
961 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
962 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
965 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
966 p.flags &= ~IORING_SETUP_CQSIZE;
974 fio_ioring_probe(td);
977 ret = syscall(__NR_io_uring_register, ld->ring_fd,
978 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
983 return fio_ioring_mmap(ld, &p);
986 static int fio_ioring_register_files(struct thread_data *td)
988 struct ioring_data *ld = td->io_ops_data;
993 ld->fds = calloc(td->o.nr_files, sizeof(int));
995 for_each_file(td, f, i) {
996 ret = generic_open_file(td, f);
1003 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1004 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1012 * Pretend the file is closed again, and really close it if we hit
1015 for_each_file(td, f, i) {
1017 int fio_unused ret2;
1018 ret2 = generic_close_file(td, f);
1026 static int fio_ioring_post_init(struct thread_data *td)
1028 struct ioring_data *ld = td->io_ops_data;
1029 struct ioring_options *o = td->eo;
1033 for (i = 0; i < td->o.iodepth; i++) {
1034 struct iovec *iov = &ld->iovecs[i];
1036 io_u = ld->io_u_index[i];
1037 iov->iov_base = io_u->buf;
1038 iov->iov_len = td_max_bs(td);
1041 err = fio_ioring_queue_init(td);
1043 int init_err = errno;
1045 if (init_err == ENOSYS)
1046 log_err("fio: your kernel doesn't support io_uring\n");
1047 td_verror(td, init_err, "io_queue_init");
1051 for (i = 0; i < td->o.iodepth; i++) {
1052 struct io_uring_sqe *sqe;
1055 memset(sqe, 0, sizeof(*sqe));
1058 if (o->registerfiles) {
1059 err = fio_ioring_register_files(td);
1061 td_verror(td, errno, "ioring_register_files");
1069 static int fio_ioring_cmd_post_init(struct thread_data *td)
1071 struct ioring_data *ld = td->io_ops_data;
1072 struct ioring_options *o = td->eo;
1076 for (i = 0; i < td->o.iodepth; i++) {
1077 struct iovec *iov = &ld->iovecs[i];
1079 io_u = ld->io_u_index[i];
1080 iov->iov_base = io_u->buf;
1081 iov->iov_len = td_max_bs(td);
1084 err = fio_ioring_cmd_queue_init(td);
1086 int init_err = errno;
1088 td_verror(td, init_err, "io_queue_init");
1092 for (i = 0; i < td->o.iodepth; i++) {
1093 struct io_uring_sqe *sqe;
1095 if (o->cmd_type == FIO_URING_CMD_NVME) {
1096 sqe = &ld->sqes[i << 1];
1097 memset(sqe, 0, 2 * sizeof(*sqe));
1100 memset(sqe, 0, sizeof(*sqe));
1104 if (o->registerfiles) {
1105 err = fio_ioring_register_files(td);
1107 td_verror(td, errno, "ioring_register_files");
1115 static int fio_ioring_init(struct thread_data *td)
1117 struct ioring_options *o = td->eo;
1118 struct ioring_data *ld;
1121 /* sqthread submission requires registered files */
1122 if (o->sqpoll_thread)
1123 o->registerfiles = 1;
1125 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1126 log_err("fio: io_uring registered files require nr_files to "
1127 "be identical to open_files\n");
1131 ld = calloc(1, sizeof(*ld));
1133 /* ring depth must be a power-of-2 */
1134 ld->iodepth = td->o.iodepth;
1135 td->o.iodepth = roundup_pow2(td->o.iodepth);
1138 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1139 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1141 td->io_ops_data = ld;
1143 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1145 td_verror(td, EINVAL, "fio_ioring_init");
1152 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1154 struct ioring_data *ld = td->io_ops_data;
1156 ld->io_u_index[io_u->index] = io_u;
1160 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1162 struct ioring_data *ld = td->io_ops_data;
1163 struct ioring_options *o = td->eo;
1165 if (!ld || !o->registerfiles)
1166 return generic_open_file(td, f);
1168 f->fd = ld->fds[f->engine_pos];
1172 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1174 struct ioring_data *ld = td->io_ops_data;
1175 struct ioring_options *o = td->eo;
1177 if (o->cmd_type == FIO_URING_CMD_NVME) {
1178 struct nvme_data *data = NULL;
1179 unsigned int nsid, lba_size = 0;
1184 /* Store the namespace-id and lba size. */
1185 data = FILE_ENG_DATA(f);
1187 ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
1191 data = calloc(1, sizeof(struct nvme_data));
1194 data->lba_ext = lba_size + ms;
1196 data->lba_shift = ilog2(lba_size);
1198 FILE_SET_ENG_DATA(f, data);
1201 assert(data->lba_shift < 32);
1202 lba_size = data->lba_ext ? data->lba_ext : (1U << data->lba_shift);
1204 for_each_rw_ddir(ddir) {
1205 if (td->o.min_bs[ddir] % lba_size ||
1206 td->o.max_bs[ddir] % lba_size) {
1208 log_err("block size must be a multiple of "
1209 "(LBA data size + Metadata size)\n");
1211 log_err("block size must be a multiple of LBA data size\n");
1216 if (!ld || !o->registerfiles)
1217 return generic_open_file(td, f);
1219 f->fd = ld->fds[f->engine_pos];
1223 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1225 struct ioring_data *ld = td->io_ops_data;
1226 struct ioring_options *o = td->eo;
1228 if (!ld || !o->registerfiles)
1229 return generic_close_file(td, f);
1235 static int fio_ioring_cmd_close_file(struct thread_data *td,
1238 struct ioring_data *ld = td->io_ops_data;
1239 struct ioring_options *o = td->eo;
1241 if (o->cmd_type == FIO_URING_CMD_NVME) {
1242 struct nvme_data *data = FILE_ENG_DATA(f);
1244 FILE_SET_ENG_DATA(f, NULL);
1247 if (!ld || !o->registerfiles)
1248 return generic_close_file(td, f);
1254 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1257 struct ioring_options *o = td->eo;
1259 if (fio_file_size_known(f))
1262 if (o->cmd_type == FIO_URING_CMD_NVME) {
1263 struct nvme_data *data = NULL;
1264 unsigned int nsid, lba_size = 0;
1269 ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
1273 data = calloc(1, sizeof(struct nvme_data));
1276 data->lba_ext = lba_size + ms;
1278 data->lba_shift = ilog2(lba_size);
1280 f->real_file_size = lba_size * nlba;
1281 fio_file_set_size_known(f);
1283 FILE_SET_ENG_DATA(f, data);
1286 return generic_get_file_size(td, f);
1289 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1291 enum zbd_zoned_model *model)
1293 return fio_nvme_get_zoned_model(td, f, model);
1296 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1297 struct fio_file *f, uint64_t offset,
1298 struct zbd_zone *zbdz,
1299 unsigned int nr_zones)
1301 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1304 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1305 uint64_t offset, uint64_t length)
1307 return fio_nvme_reset_wp(td, f, offset, length);
1310 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1312 unsigned int *max_open_zones)
1314 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1317 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1318 struct fio_ruhs_info *fruhs_info)
1320 struct nvme_fdp_ruh_status *ruhs;
1323 bytes = sizeof(*ruhs) + 128 * sizeof(struct nvme_fdp_ruh_status_desc);
1324 ruhs = scalloc(1, bytes);
1328 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1332 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1333 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1334 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1340 static struct ioengine_ops ioengine_uring = {
1342 .version = FIO_IOOPS_VERSION,
1343 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1344 FIO_ASYNCIO_SETS_ISSUE_TIME,
1345 .init = fio_ioring_init,
1346 .post_init = fio_ioring_post_init,
1347 .io_u_init = fio_ioring_io_u_init,
1348 .prep = fio_ioring_prep,
1349 .queue = fio_ioring_queue,
1350 .commit = fio_ioring_commit,
1351 .getevents = fio_ioring_getevents,
1352 .event = fio_ioring_event,
1353 .cleanup = fio_ioring_cleanup,
1354 .open_file = fio_ioring_open_file,
1355 .close_file = fio_ioring_close_file,
1356 .get_file_size = generic_get_file_size,
1358 .option_struct_size = sizeof(struct ioring_options),
1361 static struct ioengine_ops ioengine_uring_cmd = {
1362 .name = "io_uring_cmd",
1363 .version = FIO_IOOPS_VERSION,
1364 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1365 FIO_MEMALIGN | FIO_RAWIO |
1366 FIO_ASYNCIO_SETS_ISSUE_TIME,
1367 .init = fio_ioring_init,
1368 .post_init = fio_ioring_cmd_post_init,
1369 .io_u_init = fio_ioring_io_u_init,
1370 .prep = fio_ioring_cmd_prep,
1371 .queue = fio_ioring_queue,
1372 .commit = fio_ioring_commit,
1373 .getevents = fio_ioring_getevents,
1374 .event = fio_ioring_cmd_event,
1375 .cleanup = fio_ioring_cleanup,
1376 .open_file = fio_ioring_cmd_open_file,
1377 .close_file = fio_ioring_cmd_close_file,
1378 .get_file_size = fio_ioring_cmd_get_file_size,
1379 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1380 .report_zones = fio_ioring_cmd_report_zones,
1381 .reset_wp = fio_ioring_cmd_reset_wp,
1382 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1384 .option_struct_size = sizeof(struct ioring_options),
1385 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1388 static void fio_init fio_ioring_register(void)
1390 register_ioengine(&ioengine_uring);
1391 register_ioengine(&ioengine_uring_cmd);
1394 static void fio_exit fio_ioring_unregister(void)
1396 unregister_ioengine(&ioengine_uring);
1397 unregister_ioengine(&ioengine_uring_cmd);