4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
21 #include "../verify.h"
23 #ifdef ARCH_HAVE_IOURING
25 #include "../lib/types.h"
26 #include "../os/linux/io_uring.h"
34 FIO_URING_CMD_NVME = 1,
41 unsigned *ring_entries;
50 unsigned *ring_entries;
51 struct io_uring_cqe *cqes;
62 struct io_u **io_u_index;
67 struct io_sq_ring sq_ring;
68 struct io_uring_sqe *sqes;
70 unsigned sq_ring_mask;
72 struct io_cq_ring cq_ring;
73 unsigned cq_ring_mask;
80 struct ioring_mmap mmap[3];
82 struct cmdprio cmdprio;
84 struct nvme_dsm_range *dsm;
87 struct ioring_options {
88 struct thread_data *td;
90 struct cmdprio_options cmdprio_options;
91 unsigned int fixedbufs;
92 unsigned int registerfiles;
93 unsigned int sqpoll_thread;
94 unsigned int sqpoll_set;
95 unsigned int sqpoll_cpu;
96 unsigned int nonvectored;
97 unsigned int uncached;
99 unsigned int force_async;
100 unsigned int md_per_io_size;
103 unsigned int apptag_mask;
106 enum uring_cmd_type cmd_type;
109 static const int ddir_to_op[2][2] = {
110 { IORING_OP_READV, IORING_OP_READ },
111 { IORING_OP_WRITEV, IORING_OP_WRITE }
114 static const int fixed_ddir_to_op[2] = {
115 IORING_OP_READ_FIXED,
116 IORING_OP_WRITE_FIXED
119 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
121 struct ioring_options *o = data;
123 o->sqpoll_cpu = *val;
128 static struct fio_option options[] = {
131 .lname = "High Priority",
132 .type = FIO_OPT_STR_SET,
133 .off1 = offsetof(struct ioring_options, hipri),
134 .help = "Use polled IO completions",
135 .category = FIO_OPT_C_ENGINE,
136 .group = FIO_OPT_G_IOURING,
140 .lname = "Fixed (pre-mapped) IO buffers",
141 .type = FIO_OPT_STR_SET,
142 .off1 = offsetof(struct ioring_options, fixedbufs),
143 .help = "Pre map IO buffers",
144 .category = FIO_OPT_C_ENGINE,
145 .group = FIO_OPT_G_IOURING,
148 .name = "registerfiles",
149 .lname = "Register file set",
150 .type = FIO_OPT_STR_SET,
151 .off1 = offsetof(struct ioring_options, registerfiles),
152 .help = "Pre-open/register files",
153 .category = FIO_OPT_C_ENGINE,
154 .group = FIO_OPT_G_IOURING,
157 .name = "sqthread_poll",
158 .lname = "Kernel SQ thread polling",
159 .type = FIO_OPT_STR_SET,
160 .off1 = offsetof(struct ioring_options, sqpoll_thread),
161 .help = "Offload submission/completion to kernel thread",
162 .category = FIO_OPT_C_ENGINE,
163 .group = FIO_OPT_G_IOURING,
166 .name = "sqthread_poll_cpu",
167 .lname = "SQ Thread Poll CPU",
169 .cb = fio_ioring_sqpoll_cb,
170 .help = "What CPU to run SQ thread polling on",
171 .category = FIO_OPT_C_ENGINE,
172 .group = FIO_OPT_G_IOURING,
175 .name = "nonvectored",
176 .lname = "Non-vectored",
178 .off1 = offsetof(struct ioring_options, nonvectored),
180 .help = "Use non-vectored read/write commands",
181 .category = FIO_OPT_C_ENGINE,
182 .group = FIO_OPT_G_IOURING,
188 .off1 = offsetof(struct ioring_options, uncached),
189 .help = "Use RWF_UNCACHED for buffered read/writes",
190 .category = FIO_OPT_C_ENGINE,
191 .group = FIO_OPT_G_IOURING,
195 .lname = "RWF_NOWAIT",
196 .type = FIO_OPT_BOOL,
197 .off1 = offsetof(struct ioring_options, nowait),
198 .help = "Use RWF_NOWAIT for reads/writes",
199 .category = FIO_OPT_C_ENGINE,
200 .group = FIO_OPT_G_IOURING,
203 .name = "force_async",
204 .lname = "Force async",
206 .off1 = offsetof(struct ioring_options, force_async),
207 .help = "Set IOSQE_ASYNC every N requests",
208 .category = FIO_OPT_C_ENGINE,
209 .group = FIO_OPT_G_IOURING,
213 .lname = "Uring cmd type",
215 .off1 = offsetof(struct ioring_options, cmd_type),
216 .help = "Specify uring-cmd type",
220 .oval = FIO_URING_CMD_NVME,
221 .help = "Issue nvme-uring-cmd",
224 .category = FIO_OPT_C_ENGINE,
225 .group = FIO_OPT_G_IOURING,
227 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
229 .name = "md_per_io_size",
230 .lname = "Separate Metadata Buffer Size per I/O",
232 .off1 = offsetof(struct ioring_options, md_per_io_size),
234 .help = "Size of separate metadata buffer per I/O (Default: 0)",
235 .category = FIO_OPT_C_ENGINE,
236 .group = FIO_OPT_G_IOURING,
240 .lname = "Protection Information Action",
241 .type = FIO_OPT_BOOL,
242 .off1 = offsetof(struct ioring_options, pi_act),
244 .help = "Protection Information Action bit (pi_act=1 or pi_act=0)",
245 .category = FIO_OPT_C_ENGINE,
246 .group = FIO_OPT_G_IOURING,
250 .lname = "Protection Information Check",
251 .type = FIO_OPT_STR_STORE,
252 .off1 = offsetof(struct ioring_options, pi_chk),
254 .help = "Control of Protection Information Checking (pi_chk=GUARD,REFTAG,APPTAG)",
255 .category = FIO_OPT_C_ENGINE,
256 .group = FIO_OPT_G_IOURING,
260 .lname = "Application Tag used in Protection Information",
262 .off1 = offsetof(struct ioring_options, apptag),
264 .help = "Application Tag used in Protection Information field (Default: 0x1234)",
265 .category = FIO_OPT_C_ENGINE,
266 .group = FIO_OPT_G_IOURING,
269 .name = "apptag_mask",
270 .lname = "Application Tag Mask",
272 .off1 = offsetof(struct ioring_options, apptag_mask),
274 .help = "Application Tag Mask used with Application Tag (Default: 0xffff)",
275 .category = FIO_OPT_C_ENGINE,
276 .group = FIO_OPT_G_IOURING,
283 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
284 unsigned int min_complete, unsigned int flags)
286 #ifdef FIO_ARCH_HAS_SYSCALL
287 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
288 min_complete, flags, NULL, 0);
290 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
291 min_complete, flags, NULL, 0);
295 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
297 struct ioring_data *ld = td->io_ops_data;
298 struct ioring_options *o = td->eo;
299 struct fio_file *f = io_u->file;
300 struct io_uring_sqe *sqe;
302 sqe = &ld->sqes[io_u->index];
304 if (o->registerfiles) {
305 sqe->fd = f->engine_pos;
306 sqe->flags = IOSQE_FIXED_FILE;
312 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
314 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
315 sqe->addr = (unsigned long) io_u->xfer_buf;
316 sqe->len = io_u->xfer_buflen;
317 sqe->buf_index = io_u->index;
319 struct iovec *iov = &ld->iovecs[io_u->index];
322 * Update based on actual io_u, requeue could have
325 iov->iov_base = io_u->xfer_buf;
326 iov->iov_len = io_u->xfer_buflen;
328 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
329 if (o->nonvectored) {
330 sqe->addr = (unsigned long) iov->iov_base;
331 sqe->len = iov->iov_len;
333 sqe->addr = (unsigned long) iov;
338 if (!td->o.odirect && o->uncached)
339 sqe->rw_flags |= RWF_UNCACHED;
341 sqe->rw_flags |= RWF_NOWAIT;
344 * Since io_uring can have a submission context (sqthread_poll)
345 * that is different from the process context, we cannot rely on
346 * the IO priority set by ioprio_set() (options prio, prioclass,
347 * and priohint) to be inherited.
348 * td->ioprio will have the value of the "default prio", so set
349 * this unconditionally. This value might get overridden by
350 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
351 * cmdprio_bssplit is used.
353 sqe->ioprio = td->ioprio;
354 sqe->off = io_u->offset;
355 } else if (ddir_sync(io_u->ddir)) {
357 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
358 sqe->off = f->first_write;
359 sqe->len = f->last_write - f->first_write;
360 sqe->sync_range_flags = td->o.sync_file_range;
361 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
366 if (io_u->ddir == DDIR_DATASYNC)
367 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
368 sqe->opcode = IORING_OP_FSYNC;
372 if (o->force_async && ++ld->prepped == o->force_async) {
374 sqe->flags |= IOSQE_ASYNC;
377 sqe->user_data = (unsigned long) io_u;
381 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
383 struct ioring_data *ld = td->io_ops_data;
384 struct ioring_options *o = td->eo;
385 struct fio_file *f = io_u->file;
386 struct nvme_uring_cmd *cmd;
387 struct io_uring_sqe *sqe;
389 /* only supports nvme_uring_cmd */
390 if (o->cmd_type != FIO_URING_CMD_NVME)
393 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
396 sqe = &ld->sqes[(io_u->index) << 1];
398 if (o->registerfiles) {
399 sqe->fd = f->engine_pos;
400 sqe->flags = IOSQE_FIXED_FILE;
405 if (!td->o.odirect && o->uncached)
406 sqe->rw_flags |= RWF_UNCACHED;
408 sqe->rw_flags |= RWF_NOWAIT;
410 sqe->opcode = IORING_OP_URING_CMD;
411 sqe->user_data = (unsigned long) io_u;
413 sqe->cmd_op = NVME_URING_CMD_IO;
415 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
416 if (o->force_async && ++ld->prepped == o->force_async) {
418 sqe->flags |= IOSQE_ASYNC;
421 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
422 sqe->buf_index = io_u->index;
425 cmd = (struct nvme_uring_cmd *)sqe->cmd;
426 return fio_nvme_uring_cmd_prep(cmd, io_u,
427 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
428 &ld->dsm[io_u->index]);
431 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
433 struct ioring_data *ld = td->io_ops_data;
434 struct io_uring_cqe *cqe;
438 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
440 cqe = &ld->cq_ring.cqes[index];
441 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
443 if (cqe->res != io_u->xfer_buflen) {
444 if (cqe->res > io_u->xfer_buflen)
445 io_u->error = -cqe->res;
447 io_u->resid = io_u->xfer_buflen - cqe->res;
454 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
456 struct ioring_data *ld = td->io_ops_data;
457 struct ioring_options *o = td->eo;
458 struct io_uring_cqe *cqe;
460 struct nvme_data *data;
464 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
465 if (o->cmd_type == FIO_URING_CMD_NVME)
468 cqe = &ld->cq_ring.cqes[index];
469 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
472 io_u->error = -cqe->res;
476 if (o->cmd_type == FIO_URING_CMD_NVME) {
477 data = FILE_ENG_DATA(io_u->file);
478 if (data->pi_type && (io_u->ddir == DDIR_READ) && !o->pi_act) {
479 ret = fio_nvme_pi_verify(data, io_u);
488 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
491 struct ioring_data *ld = td->io_ops_data;
492 struct io_cq_ring *ring = &ld->cq_ring;
493 unsigned head, reaped = 0;
497 if (head == atomic_load_acquire(ring->tail))
501 } while (reaped + events < max);
504 atomic_store_release(ring->head, head);
509 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
510 unsigned int max, const struct timespec *t)
512 struct ioring_data *ld = td->io_ops_data;
513 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
514 struct ioring_options *o = td->eo;
515 struct io_cq_ring *ring = &ld->cq_ring;
519 ld->cq_ring_off = *ring->head;
521 r = fio_ioring_cqring_reap(td, events, max);
530 if (!o->sqpoll_thread) {
531 r = io_uring_enter(ld, 0, actual_min,
532 IORING_ENTER_GETEVENTS);
534 if (errno == EAGAIN || errno == EINTR)
537 td_verror(td, errno, "io_uring_enter");
541 } while (events < min);
543 return r < 0 ? r : events;
546 static inline void fio_ioring_cmd_nvme_pi(struct thread_data *td,
549 struct ioring_data *ld = td->io_ops_data;
550 struct ioring_options *o = td->eo;
551 struct nvme_uring_cmd *cmd;
552 struct io_uring_sqe *sqe;
553 struct nvme_cmd_ext_io_opts ext_opts = {0};
554 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
556 if (io_u->ddir == DDIR_TRIM)
559 sqe = &ld->sqes[(io_u->index) << 1];
560 cmd = (struct nvme_uring_cmd *)sqe->cmd;
564 ext_opts.io_flags |= NVME_IO_PRINFO_PRACT;
565 ext_opts.io_flags |= o->prchk;
566 ext_opts.apptag = o->apptag;
567 ext_opts.apptag_mask = o->apptag_mask;
570 fio_nvme_pi_fill(cmd, io_u, &ext_opts);
573 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
576 struct ioring_data *ld = td->io_ops_data;
577 struct cmdprio *cmdprio = &ld->cmdprio;
579 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
580 ld->sqes[io_u->index].ioprio = io_u->ioprio;
583 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
586 struct ioring_data *ld = td->io_ops_data;
587 struct ioring_options *o = td->eo;
588 struct io_sq_ring *ring = &ld->sq_ring;
589 unsigned tail, next_tail;
591 fio_ro_check(td, io_u);
593 if (ld->queued == ld->iodepth)
596 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
600 do_io_u_trim(td, io_u);
602 io_u_mark_submit(td, 1);
603 io_u_mark_complete(td, 1);
604 return FIO_Q_COMPLETED;
608 next_tail = tail + 1;
609 if (next_tail == atomic_load_relaxed(ring->head))
612 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
613 fio_ioring_cmdprio_prep(td, io_u);
615 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
616 o->cmd_type == FIO_URING_CMD_NVME)
617 fio_ioring_cmd_nvme_pi(td, io_u);
619 ring->array[tail & ld->sq_ring_mask] = io_u->index;
620 atomic_store_release(ring->tail, next_tail);
626 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
628 struct ioring_data *ld = td->io_ops_data;
631 if (!fio_fill_issue_time(td))
634 fio_gettime(&now, NULL);
637 struct io_sq_ring *ring = &ld->sq_ring;
638 int index = ring->array[start & ld->sq_ring_mask];
639 struct io_u *io_u = ld->io_u_index[index];
641 memcpy(&io_u->issue_time, &now, sizeof(now));
642 io_u_queued(td, io_u);
648 * only used for iolog
650 if (td->o.read_iolog_file)
651 memcpy(&td->last_issue, &now, sizeof(now));
654 static int fio_ioring_commit(struct thread_data *td)
656 struct ioring_data *ld = td->io_ops_data;
657 struct ioring_options *o = td->eo;
664 * Kernel side does submission. just need to check if the ring is
665 * flagged as needing a kick, if so, call io_uring_enter(). This
666 * only happens if we've been idle too long.
668 if (o->sqpoll_thread) {
669 struct io_sq_ring *ring = &ld->sq_ring;
670 unsigned start = *ld->sq_ring.tail - ld->queued;
673 flags = atomic_load_relaxed(ring->flags);
674 if (flags & IORING_SQ_NEED_WAKEUP)
675 io_uring_enter(ld, ld->queued, 0,
676 IORING_ENTER_SQ_WAKEUP);
677 fio_ioring_queued(td, start, ld->queued);
678 io_u_mark_submit(td, ld->queued);
685 unsigned start = *ld->sq_ring.head;
686 long nr = ld->queued;
688 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
690 fio_ioring_queued(td, start, ret);
691 io_u_mark_submit(td, ret);
696 io_u_mark_submit(td, ret);
699 if (errno == EAGAIN || errno == EINTR) {
700 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
703 /* Shouldn't happen */
708 td_verror(td, errno, "io_uring_enter submit");
711 } while (ld->queued);
716 static void fio_ioring_unmap(struct ioring_data *ld)
720 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
721 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
725 static void fio_ioring_cleanup(struct thread_data *td)
727 struct ioring_data *ld = td->io_ops_data;
730 if (!(td->flags & TD_F_CHILD))
731 fio_ioring_unmap(ld);
733 fio_cmdprio_cleanup(&ld->cmdprio);
734 free(ld->io_u_index);
743 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
745 struct io_sq_ring *sring = &ld->sq_ring;
746 struct io_cq_ring *cring = &ld->cq_ring;
749 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
750 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
751 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
753 ld->mmap[0].ptr = ptr;
754 sring->head = ptr + p->sq_off.head;
755 sring->tail = ptr + p->sq_off.tail;
756 sring->ring_mask = ptr + p->sq_off.ring_mask;
757 sring->ring_entries = ptr + p->sq_off.ring_entries;
758 sring->flags = ptr + p->sq_off.flags;
759 sring->array = ptr + p->sq_off.array;
760 ld->sq_ring_mask = *sring->ring_mask;
762 if (p->flags & IORING_SETUP_SQE128)
763 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
765 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
766 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
767 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
769 ld->mmap[1].ptr = ld->sqes;
771 if (p->flags & IORING_SETUP_CQE32) {
772 ld->mmap[2].len = p->cq_off.cqes +
773 2 * p->cq_entries * sizeof(struct io_uring_cqe);
775 ld->mmap[2].len = p->cq_off.cqes +
776 p->cq_entries * sizeof(struct io_uring_cqe);
778 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
779 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
781 ld->mmap[2].ptr = ptr;
782 cring->head = ptr + p->cq_off.head;
783 cring->tail = ptr + p->cq_off.tail;
784 cring->ring_mask = ptr + p->cq_off.ring_mask;
785 cring->ring_entries = ptr + p->cq_off.ring_entries;
786 cring->cqes = ptr + p->cq_off.cqes;
787 ld->cq_ring_mask = *cring->ring_mask;
791 static void fio_ioring_probe(struct thread_data *td)
793 struct ioring_data *ld = td->io_ops_data;
794 struct ioring_options *o = td->eo;
795 struct io_uring_probe *p;
798 /* already set by user, don't touch */
799 if (o->nonvectored != -1)
802 /* default to off, as that's always safe */
805 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
809 ret = syscall(__NR_io_uring_register, ld->ring_fd,
810 IORING_REGISTER_PROBE, p, 256);
814 if (IORING_OP_WRITE > p->ops_len)
817 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
818 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
824 static int fio_ioring_queue_init(struct thread_data *td)
826 struct ioring_data *ld = td->io_ops_data;
827 struct ioring_options *o = td->eo;
828 int depth = td->o.iodepth;
829 struct io_uring_params p;
832 memset(&p, 0, sizeof(p));
835 p.flags |= IORING_SETUP_IOPOLL;
836 if (o->sqpoll_thread) {
837 p.flags |= IORING_SETUP_SQPOLL;
839 p.flags |= IORING_SETUP_SQ_AFF;
840 p.sq_thread_cpu = o->sqpoll_cpu;
844 * Submission latency for sqpoll_thread is just the time it
845 * takes to fill in the SQ ring entries, and any syscall if
846 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
849 td->o.disable_slat = 1;
853 * Clamp CQ ring size at our SQ ring size, we don't need more entries
856 p.flags |= IORING_SETUP_CQSIZE;
857 p.cq_entries = depth;
860 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
861 * completing IO operations.
863 p.flags |= IORING_SETUP_COOP_TASKRUN;
866 * io_uring is always a single issuer, and we can defer task_work
867 * runs until we reap events.
869 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
872 ret = syscall(__NR_io_uring_setup, depth, &p);
874 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
875 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
876 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
879 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
880 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
883 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
884 p.flags &= ~IORING_SETUP_CQSIZE;
892 fio_ioring_probe(td);
895 ret = syscall(__NR_io_uring_register, ld->ring_fd,
896 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
901 return fio_ioring_mmap(ld, &p);
904 static int fio_ioring_cmd_queue_init(struct thread_data *td)
906 struct ioring_data *ld = td->io_ops_data;
907 struct ioring_options *o = td->eo;
908 int depth = td->o.iodepth;
909 struct io_uring_params p;
912 memset(&p, 0, sizeof(p));
915 p.flags |= IORING_SETUP_IOPOLL;
916 if (o->sqpoll_thread) {
917 p.flags |= IORING_SETUP_SQPOLL;
919 p.flags |= IORING_SETUP_SQ_AFF;
920 p.sq_thread_cpu = o->sqpoll_cpu;
924 * Submission latency for sqpoll_thread is just the time it
925 * takes to fill in the SQ ring entries, and any syscall if
926 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
929 td->o.disable_slat = 1;
931 if (o->cmd_type == FIO_URING_CMD_NVME) {
932 p.flags |= IORING_SETUP_SQE128;
933 p.flags |= IORING_SETUP_CQE32;
937 * Clamp CQ ring size at our SQ ring size, we don't need more entries
940 p.flags |= IORING_SETUP_CQSIZE;
941 p.cq_entries = depth;
944 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
945 * completing IO operations.
947 p.flags |= IORING_SETUP_COOP_TASKRUN;
950 * io_uring is always a single issuer, and we can defer task_work
951 * runs until we reap events.
953 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
956 ret = syscall(__NR_io_uring_setup, depth, &p);
958 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
959 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
960 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
963 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
964 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
967 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
968 p.flags &= ~IORING_SETUP_CQSIZE;
976 fio_ioring_probe(td);
979 ret = syscall(__NR_io_uring_register, ld->ring_fd,
980 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
985 return fio_ioring_mmap(ld, &p);
988 static int fio_ioring_register_files(struct thread_data *td)
990 struct ioring_data *ld = td->io_ops_data;
995 ld->fds = calloc(td->o.nr_files, sizeof(int));
997 for_each_file(td, f, i) {
998 ret = generic_open_file(td, f);
1005 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1006 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1014 * Pretend the file is closed again, and really close it if we hit
1017 for_each_file(td, f, i) {
1019 int fio_unused ret2;
1020 ret2 = generic_close_file(td, f);
1028 static int fio_ioring_post_init(struct thread_data *td)
1030 struct ioring_data *ld = td->io_ops_data;
1031 struct ioring_options *o = td->eo;
1035 for (i = 0; i < td->o.iodepth; i++) {
1036 struct iovec *iov = &ld->iovecs[i];
1038 io_u = ld->io_u_index[i];
1039 iov->iov_base = io_u->buf;
1040 iov->iov_len = td_max_bs(td);
1043 err = fio_ioring_queue_init(td);
1045 int init_err = errno;
1047 if (init_err == ENOSYS)
1048 log_err("fio: your kernel doesn't support io_uring\n");
1049 td_verror(td, init_err, "io_queue_init");
1053 for (i = 0; i < td->o.iodepth; i++) {
1054 struct io_uring_sqe *sqe;
1057 memset(sqe, 0, sizeof(*sqe));
1060 if (o->registerfiles) {
1061 err = fio_ioring_register_files(td);
1063 td_verror(td, errno, "ioring_register_files");
1071 static int fio_ioring_cmd_post_init(struct thread_data *td)
1073 struct ioring_data *ld = td->io_ops_data;
1074 struct ioring_options *o = td->eo;
1078 for (i = 0; i < td->o.iodepth; i++) {
1079 struct iovec *iov = &ld->iovecs[i];
1081 io_u = ld->io_u_index[i];
1082 iov->iov_base = io_u->buf;
1083 iov->iov_len = td_max_bs(td);
1086 err = fio_ioring_cmd_queue_init(td);
1088 int init_err = errno;
1090 td_verror(td, init_err, "io_queue_init");
1094 for (i = 0; i < td->o.iodepth; i++) {
1095 struct io_uring_sqe *sqe;
1097 if (o->cmd_type == FIO_URING_CMD_NVME) {
1098 sqe = &ld->sqes[i << 1];
1099 memset(sqe, 0, 2 * sizeof(*sqe));
1102 memset(sqe, 0, sizeof(*sqe));
1106 if (o->registerfiles) {
1107 err = fio_ioring_register_files(td);
1109 td_verror(td, errno, "ioring_register_files");
1117 static void parse_prchk_flags(struct ioring_options *o)
1122 if (strstr(o->pi_chk, "GUARD") != NULL)
1123 o->prchk = NVME_IO_PRINFO_PRCHK_GUARD;
1124 if (strstr(o->pi_chk, "REFTAG") != NULL)
1125 o->prchk |= NVME_IO_PRINFO_PRCHK_REF;
1126 if (strstr(o->pi_chk, "APPTAG") != NULL)
1127 o->prchk |= NVME_IO_PRINFO_PRCHK_APP;
1130 static int fio_ioring_init(struct thread_data *td)
1132 struct ioring_options *o = td->eo;
1133 struct ioring_data *ld;
1134 unsigned long long md_size;
1137 /* sqthread submission requires registered files */
1138 if (o->sqpoll_thread)
1139 o->registerfiles = 1;
1141 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1142 log_err("fio: io_uring registered files require nr_files to "
1143 "be identical to open_files\n");
1147 ld = calloc(1, sizeof(*ld));
1149 /* ring depth must be a power-of-2 */
1150 ld->iodepth = td->o.iodepth;
1151 td->o.iodepth = roundup_pow2(td->o.iodepth);
1154 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1157 * metadata buffer for nvme command.
1158 * We are only supporting iomem=malloc / mem=malloc as of now.
1160 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1161 (o->cmd_type == FIO_URING_CMD_NVME) && o->md_per_io_size) {
1162 md_size = (unsigned long long) o->md_per_io_size
1163 * (unsigned long long) td->o.iodepth;
1164 md_size += page_mask + td->o.mem_align;
1165 if (td->o.mem_align && td->o.mem_align > page_size)
1166 md_size += td->o.mem_align - page_size;
1167 if (td->o.mem_type == MEM_MALLOC) {
1168 ld->md_buf = malloc(md_size);
1174 log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
1179 parse_prchk_flags(o);
1181 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1183 td->io_ops_data = ld;
1185 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1187 td_verror(td, EINVAL, "fio_ioring_init");
1192 * For io_uring_cmd, trims are async operations unless we are operating
1193 * in zbd mode where trim means zone reset.
1195 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1196 td->o.zone_mode == ZONE_MODE_ZBD)
1197 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1199 ld->dsm = calloc(td->o.iodepth, sizeof(*ld->dsm));
1204 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1206 struct ioring_data *ld = td->io_ops_data;
1207 struct ioring_options *o = td->eo;
1208 struct nvme_pi_data *pi_data;
1211 ld->io_u_index[io_u->index] = io_u;
1213 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1214 p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
1215 p += o->md_per_io_size * io_u->index;
1216 io_u->mmap_data = p;
1219 pi_data = calloc(1, sizeof(*pi_data));
1220 pi_data->io_flags |= o->prchk;
1221 pi_data->apptag_mask = o->apptag_mask;
1222 pi_data->apptag = o->apptag;
1223 io_u->engine_data = pi_data;
1230 static void fio_ioring_io_u_free(struct thread_data *td, struct io_u *io_u)
1232 struct ioring_options *o = td->eo;
1235 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1236 (o->cmd_type == FIO_URING_CMD_NVME)) {
1237 pi = io_u->engine_data;
1239 io_u->engine_data = NULL;
1243 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1245 struct ioring_data *ld = td->io_ops_data;
1246 struct ioring_options *o = td->eo;
1248 if (!ld || !o->registerfiles)
1249 return generic_open_file(td, f);
1251 f->fd = ld->fds[f->engine_pos];
1255 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1257 struct ioring_data *ld = td->io_ops_data;
1258 struct ioring_options *o = td->eo;
1260 if (o->cmd_type == FIO_URING_CMD_NVME) {
1261 struct nvme_data *data = NULL;
1262 unsigned int lba_size = 0;
1266 /* Store the namespace-id and lba size. */
1267 data = FILE_ENG_DATA(f);
1269 data = calloc(1, sizeof(struct nvme_data));
1270 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1276 FILE_SET_ENG_DATA(f, data);
1279 lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
1281 for_each_rw_ddir(ddir) {
1282 if (td->o.min_bs[ddir] % lba_size || td->o.max_bs[ddir] % lba_size) {
1283 if (data->lba_ext) {
1284 log_err("%s: block size must be a multiple of %u "
1285 "(LBA data size + Metadata size)\n", f->file_name, lba_size);
1286 if (td->o.min_bs[ddir] == td->o.max_bs[ddir] &&
1287 !(td->o.min_bs[ddir] % data->lba_size)) {
1288 /* fixed block size is actually a multiple of LBA data size */
1289 unsigned long long suggestion = lba_size *
1290 (td->o.min_bs[ddir] / data->lba_size);
1291 log_err("Did you mean to use a block size of %llu?\n", suggestion);
1294 log_err("%s: block size must be a multiple of LBA data size\n",
1297 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1300 if (data->ms && !data->lba_ext && ddir != DDIR_TRIM &&
1301 (o->md_per_io_size < ((td->o.max_bs[ddir] / data->lba_size) *
1303 log_err("%s: md_per_io_size should be at least %llu bytes\n",
1305 ((td->o.max_bs[ddir] / data->lba_size) * data->ms));
1306 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1312 * For extended logical block sizes we cannot use verify when
1313 * end to end data protection checks are enabled, as the PI
1314 * section of data buffer conflicts with verify.
1316 if (data->ms && data->pi_type && data->lba_ext &&
1317 td->o.verify != VERIFY_NONE) {
1318 log_err("%s: for extended LBA, verify cannot be used when E2E data protection is enabled\n",
1320 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1324 if (!ld || !o->registerfiles)
1325 return generic_open_file(td, f);
1327 f->fd = ld->fds[f->engine_pos];
1331 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1333 struct ioring_data *ld = td->io_ops_data;
1334 struct ioring_options *o = td->eo;
1336 if (!ld || !o->registerfiles)
1337 return generic_close_file(td, f);
1343 static int fio_ioring_cmd_close_file(struct thread_data *td,
1346 struct ioring_data *ld = td->io_ops_data;
1347 struct ioring_options *o = td->eo;
1349 if (o->cmd_type == FIO_URING_CMD_NVME) {
1350 struct nvme_data *data = FILE_ENG_DATA(f);
1352 FILE_SET_ENG_DATA(f, NULL);
1355 if (!ld || !o->registerfiles)
1356 return generic_close_file(td, f);
1362 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1365 struct ioring_options *o = td->eo;
1367 if (fio_file_size_known(f))
1370 if (o->cmd_type == FIO_URING_CMD_NVME) {
1371 struct nvme_data *data = NULL;
1375 data = calloc(1, sizeof(struct nvme_data));
1376 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1382 f->real_file_size = data->lba_size * nlba;
1383 fio_file_set_size_known(f);
1385 FILE_SET_ENG_DATA(f, data);
1388 return generic_get_file_size(td, f);
1391 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1393 enum zbd_zoned_model *model)
1395 return fio_nvme_get_zoned_model(td, f, model);
1398 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1399 struct fio_file *f, uint64_t offset,
1400 struct zbd_zone *zbdz,
1401 unsigned int nr_zones)
1403 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1406 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1407 uint64_t offset, uint64_t length)
1409 return fio_nvme_reset_wp(td, f, offset, length);
1412 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1414 unsigned int *max_open_zones)
1416 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1419 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1420 struct fio_ruhs_info *fruhs_info)
1422 struct nvme_fdp_ruh_status *ruhs;
1425 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1426 ruhs = scalloc(1, bytes);
1430 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1434 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1435 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1436 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1442 static struct ioengine_ops ioengine_uring = {
1444 .version = FIO_IOOPS_VERSION,
1445 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1446 FIO_ASYNCIO_SETS_ISSUE_TIME,
1447 .init = fio_ioring_init,
1448 .post_init = fio_ioring_post_init,
1449 .io_u_init = fio_ioring_io_u_init,
1450 .prep = fio_ioring_prep,
1451 .queue = fio_ioring_queue,
1452 .commit = fio_ioring_commit,
1453 .getevents = fio_ioring_getevents,
1454 .event = fio_ioring_event,
1455 .cleanup = fio_ioring_cleanup,
1456 .open_file = fio_ioring_open_file,
1457 .close_file = fio_ioring_close_file,
1458 .get_file_size = generic_get_file_size,
1460 .option_struct_size = sizeof(struct ioring_options),
1463 static struct ioengine_ops ioengine_uring_cmd = {
1464 .name = "io_uring_cmd",
1465 .version = FIO_IOOPS_VERSION,
1466 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1467 FIO_ASYNCIO_SETS_ISSUE_TIME,
1468 .init = fio_ioring_init,
1469 .post_init = fio_ioring_cmd_post_init,
1470 .io_u_init = fio_ioring_io_u_init,
1471 .io_u_free = fio_ioring_io_u_free,
1472 .prep = fio_ioring_cmd_prep,
1473 .queue = fio_ioring_queue,
1474 .commit = fio_ioring_commit,
1475 .getevents = fio_ioring_getevents,
1476 .event = fio_ioring_cmd_event,
1477 .cleanup = fio_ioring_cleanup,
1478 .open_file = fio_ioring_cmd_open_file,
1479 .close_file = fio_ioring_cmd_close_file,
1480 .get_file_size = fio_ioring_cmd_get_file_size,
1481 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1482 .report_zones = fio_ioring_cmd_report_zones,
1483 .reset_wp = fio_ioring_cmd_reset_wp,
1484 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1486 .option_struct_size = sizeof(struct ioring_options),
1487 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1490 static void fio_init fio_ioring_register(void)
1492 register_ioengine(&ioengine_uring);
1493 register_ioengine(&ioengine_uring_cmd);
1496 static void fio_exit fio_ioring_unregister(void)
1498 unregister_ioengine(&ioengine_uring);
1499 unregister_ioengine(&ioengine_uring_cmd);