4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
33 FIO_URING_CMD_NVME = 1,
40 unsigned *ring_entries;
49 unsigned *ring_entries;
50 struct io_uring_cqe *cqes;
61 struct io_u **io_u_index;
66 struct io_sq_ring sq_ring;
67 struct io_uring_sqe *sqes;
69 unsigned sq_ring_mask;
71 struct io_cq_ring cq_ring;
72 unsigned cq_ring_mask;
79 struct ioring_mmap mmap[3];
81 struct cmdprio cmdprio;
83 struct nvme_dsm_range *dsm;
86 struct ioring_options {
87 struct thread_data *td;
89 struct cmdprio_options cmdprio_options;
90 unsigned int fixedbufs;
91 unsigned int registerfiles;
92 unsigned int sqpoll_thread;
93 unsigned int sqpoll_set;
94 unsigned int sqpoll_cpu;
95 unsigned int nonvectored;
96 unsigned int uncached;
98 unsigned int force_async;
99 unsigned int md_per_io_size;
102 unsigned int apptag_mask;
105 enum uring_cmd_type cmd_type;
108 static const int ddir_to_op[2][2] = {
109 { IORING_OP_READV, IORING_OP_READ },
110 { IORING_OP_WRITEV, IORING_OP_WRITE }
113 static const int fixed_ddir_to_op[2] = {
114 IORING_OP_READ_FIXED,
115 IORING_OP_WRITE_FIXED
118 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
120 struct ioring_options *o = data;
122 o->sqpoll_cpu = *val;
127 static struct fio_option options[] = {
130 .lname = "High Priority",
131 .type = FIO_OPT_STR_SET,
132 .off1 = offsetof(struct ioring_options, hipri),
133 .help = "Use polled IO completions",
134 .category = FIO_OPT_C_ENGINE,
135 .group = FIO_OPT_G_IOURING,
139 .lname = "Fixed (pre-mapped) IO buffers",
140 .type = FIO_OPT_STR_SET,
141 .off1 = offsetof(struct ioring_options, fixedbufs),
142 .help = "Pre map IO buffers",
143 .category = FIO_OPT_C_ENGINE,
144 .group = FIO_OPT_G_IOURING,
147 .name = "registerfiles",
148 .lname = "Register file set",
149 .type = FIO_OPT_STR_SET,
150 .off1 = offsetof(struct ioring_options, registerfiles),
151 .help = "Pre-open/register files",
152 .category = FIO_OPT_C_ENGINE,
153 .group = FIO_OPT_G_IOURING,
156 .name = "sqthread_poll",
157 .lname = "Kernel SQ thread polling",
158 .type = FIO_OPT_STR_SET,
159 .off1 = offsetof(struct ioring_options, sqpoll_thread),
160 .help = "Offload submission/completion to kernel thread",
161 .category = FIO_OPT_C_ENGINE,
162 .group = FIO_OPT_G_IOURING,
165 .name = "sqthread_poll_cpu",
166 .lname = "SQ Thread Poll CPU",
168 .cb = fio_ioring_sqpoll_cb,
169 .help = "What CPU to run SQ thread polling on",
170 .category = FIO_OPT_C_ENGINE,
171 .group = FIO_OPT_G_IOURING,
174 .name = "nonvectored",
175 .lname = "Non-vectored",
177 .off1 = offsetof(struct ioring_options, nonvectored),
179 .help = "Use non-vectored read/write commands",
180 .category = FIO_OPT_C_ENGINE,
181 .group = FIO_OPT_G_IOURING,
187 .off1 = offsetof(struct ioring_options, uncached),
188 .help = "Use RWF_UNCACHED for buffered read/writes",
189 .category = FIO_OPT_C_ENGINE,
190 .group = FIO_OPT_G_IOURING,
194 .lname = "RWF_NOWAIT",
195 .type = FIO_OPT_BOOL,
196 .off1 = offsetof(struct ioring_options, nowait),
197 .help = "Use RWF_NOWAIT for reads/writes",
198 .category = FIO_OPT_C_ENGINE,
199 .group = FIO_OPT_G_IOURING,
202 .name = "force_async",
203 .lname = "Force async",
205 .off1 = offsetof(struct ioring_options, force_async),
206 .help = "Set IOSQE_ASYNC every N requests",
207 .category = FIO_OPT_C_ENGINE,
208 .group = FIO_OPT_G_IOURING,
212 .lname = "Uring cmd type",
214 .off1 = offsetof(struct ioring_options, cmd_type),
215 .help = "Specify uring-cmd type",
219 .oval = FIO_URING_CMD_NVME,
220 .help = "Issue nvme-uring-cmd",
223 .category = FIO_OPT_C_ENGINE,
224 .group = FIO_OPT_G_IOURING,
226 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
228 .name = "md_per_io_size",
229 .lname = "Separate Metadata Buffer Size per I/O",
231 .off1 = offsetof(struct ioring_options, md_per_io_size),
233 .help = "Size of separate metadata buffer per I/O (Default: 0)",
234 .category = FIO_OPT_C_ENGINE,
235 .group = FIO_OPT_G_IOURING,
239 .lname = "Protection Information Action",
240 .type = FIO_OPT_BOOL,
241 .off1 = offsetof(struct ioring_options, pi_act),
243 .help = "Protection Information Action bit (pi_act=1 or pi_act=0)",
244 .category = FIO_OPT_C_ENGINE,
245 .group = FIO_OPT_G_IOURING,
249 .lname = "Protection Information Check",
250 .type = FIO_OPT_STR_STORE,
251 .off1 = offsetof(struct ioring_options, pi_chk),
253 .help = "Control of Protection Information Checking (pi_chk=GUARD,REFTAG,APPTAG)",
254 .category = FIO_OPT_C_ENGINE,
255 .group = FIO_OPT_G_IOURING,
259 .lname = "Application Tag used in Protection Information",
261 .off1 = offsetof(struct ioring_options, apptag),
263 .help = "Application Tag used in Protection Information field (Default: 0x1234)",
264 .category = FIO_OPT_C_ENGINE,
265 .group = FIO_OPT_G_IOURING,
268 .name = "apptag_mask",
269 .lname = "Application Tag Mask",
271 .off1 = offsetof(struct ioring_options, apptag_mask),
273 .help = "Application Tag Mask used with Application Tag (Default: 0xffff)",
274 .category = FIO_OPT_C_ENGINE,
275 .group = FIO_OPT_G_IOURING,
282 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
283 unsigned int min_complete, unsigned int flags)
285 #ifdef FIO_ARCH_HAS_SYSCALL
286 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
287 min_complete, flags, NULL, 0);
289 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
290 min_complete, flags, NULL, 0);
294 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
296 struct ioring_data *ld = td->io_ops_data;
297 struct ioring_options *o = td->eo;
298 struct fio_file *f = io_u->file;
299 struct io_uring_sqe *sqe;
301 sqe = &ld->sqes[io_u->index];
303 if (o->registerfiles) {
304 sqe->fd = f->engine_pos;
305 sqe->flags = IOSQE_FIXED_FILE;
311 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
313 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
314 sqe->addr = (unsigned long) io_u->xfer_buf;
315 sqe->len = io_u->xfer_buflen;
316 sqe->buf_index = io_u->index;
318 struct iovec *iov = &ld->iovecs[io_u->index];
321 * Update based on actual io_u, requeue could have
324 iov->iov_base = io_u->xfer_buf;
325 iov->iov_len = io_u->xfer_buflen;
327 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
328 if (o->nonvectored) {
329 sqe->addr = (unsigned long) iov->iov_base;
330 sqe->len = iov->iov_len;
332 sqe->addr = (unsigned long) iov;
337 if (!td->o.odirect && o->uncached)
338 sqe->rw_flags |= RWF_UNCACHED;
340 sqe->rw_flags |= RWF_NOWAIT;
343 * Since io_uring can have a submission context (sqthread_poll)
344 * that is different from the process context, we cannot rely on
345 * the IO priority set by ioprio_set() (options prio, prioclass,
346 * and priohint) to be inherited.
347 * td->ioprio will have the value of the "default prio", so set
348 * this unconditionally. This value might get overridden by
349 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
350 * cmdprio_bssplit is used.
352 sqe->ioprio = td->ioprio;
353 sqe->off = io_u->offset;
354 } else if (ddir_sync(io_u->ddir)) {
356 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
357 sqe->off = f->first_write;
358 sqe->len = f->last_write - f->first_write;
359 sqe->sync_range_flags = td->o.sync_file_range;
360 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
365 if (io_u->ddir == DDIR_DATASYNC)
366 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
367 sqe->opcode = IORING_OP_FSYNC;
371 if (o->force_async && ++ld->prepped == o->force_async) {
373 sqe->flags |= IOSQE_ASYNC;
376 sqe->user_data = (unsigned long) io_u;
380 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
382 struct ioring_data *ld = td->io_ops_data;
383 struct ioring_options *o = td->eo;
384 struct fio_file *f = io_u->file;
385 struct nvme_uring_cmd *cmd;
386 struct io_uring_sqe *sqe;
388 /* only supports nvme_uring_cmd */
389 if (o->cmd_type != FIO_URING_CMD_NVME)
392 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
395 sqe = &ld->sqes[(io_u->index) << 1];
397 if (o->registerfiles) {
398 sqe->fd = f->engine_pos;
399 sqe->flags = IOSQE_FIXED_FILE;
404 if (!td->o.odirect && o->uncached)
405 sqe->rw_flags |= RWF_UNCACHED;
407 sqe->rw_flags |= RWF_NOWAIT;
409 sqe->opcode = IORING_OP_URING_CMD;
410 sqe->user_data = (unsigned long) io_u;
412 sqe->cmd_op = NVME_URING_CMD_IO;
414 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
415 if (o->force_async && ++ld->prepped == o->force_async) {
417 sqe->flags |= IOSQE_ASYNC;
420 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
421 sqe->buf_index = io_u->index;
424 cmd = (struct nvme_uring_cmd *)sqe->cmd;
425 return fio_nvme_uring_cmd_prep(cmd, io_u,
426 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
427 &ld->dsm[io_u->index]);
430 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
432 struct ioring_data *ld = td->io_ops_data;
433 struct io_uring_cqe *cqe;
437 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
439 cqe = &ld->cq_ring.cqes[index];
440 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
442 if (cqe->res != io_u->xfer_buflen) {
443 if (cqe->res > io_u->xfer_buflen)
444 io_u->error = -cqe->res;
446 io_u->resid = io_u->xfer_buflen - cqe->res;
453 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
455 struct ioring_data *ld = td->io_ops_data;
456 struct ioring_options *o = td->eo;
457 struct io_uring_cqe *cqe;
459 struct nvme_data *data;
463 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
464 if (o->cmd_type == FIO_URING_CMD_NVME)
467 cqe = &ld->cq_ring.cqes[index];
468 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
471 io_u->error = -cqe->res;
475 if (o->cmd_type == FIO_URING_CMD_NVME) {
476 data = FILE_ENG_DATA(io_u->file);
477 if (data->pi_type && (io_u->ddir == DDIR_READ) && !o->pi_act) {
478 ret = fio_nvme_pi_verify(data, io_u);
487 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
490 struct ioring_data *ld = td->io_ops_data;
491 struct io_cq_ring *ring = &ld->cq_ring;
492 unsigned head, reaped = 0;
496 if (head == atomic_load_acquire(ring->tail))
500 } while (reaped + events < max);
503 atomic_store_release(ring->head, head);
508 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
509 unsigned int max, const struct timespec *t)
511 struct ioring_data *ld = td->io_ops_data;
512 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
513 struct ioring_options *o = td->eo;
514 struct io_cq_ring *ring = &ld->cq_ring;
518 ld->cq_ring_off = *ring->head;
520 r = fio_ioring_cqring_reap(td, events, max);
529 if (!o->sqpoll_thread) {
530 r = io_uring_enter(ld, 0, actual_min,
531 IORING_ENTER_GETEVENTS);
533 if (errno == EAGAIN || errno == EINTR)
536 td_verror(td, errno, "io_uring_enter");
540 } while (events < min);
542 return r < 0 ? r : events;
545 static inline void fio_ioring_cmd_nvme_pi(struct thread_data *td,
548 struct ioring_data *ld = td->io_ops_data;
549 struct ioring_options *o = td->eo;
550 struct nvme_uring_cmd *cmd;
551 struct io_uring_sqe *sqe;
552 struct nvme_cmd_ext_io_opts ext_opts = {0};
553 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
555 if (io_u->ddir == DDIR_TRIM)
558 sqe = &ld->sqes[(io_u->index) << 1];
559 cmd = (struct nvme_uring_cmd *)sqe->cmd;
563 ext_opts.io_flags |= NVME_IO_PRINFO_PRACT;
564 ext_opts.io_flags |= o->prchk;
565 ext_opts.apptag = o->apptag;
566 ext_opts.apptag_mask = o->apptag_mask;
569 fio_nvme_pi_fill(cmd, io_u, &ext_opts);
572 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
575 struct ioring_data *ld = td->io_ops_data;
576 struct cmdprio *cmdprio = &ld->cmdprio;
578 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
579 ld->sqes[io_u->index].ioprio = io_u->ioprio;
582 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
585 struct ioring_data *ld = td->io_ops_data;
586 struct ioring_options *o = td->eo;
587 struct io_sq_ring *ring = &ld->sq_ring;
588 unsigned tail, next_tail;
590 fio_ro_check(td, io_u);
592 if (ld->queued == ld->iodepth)
595 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
599 do_io_u_trim(td, io_u);
601 io_u_mark_submit(td, 1);
602 io_u_mark_complete(td, 1);
603 return FIO_Q_COMPLETED;
607 next_tail = tail + 1;
608 if (next_tail == atomic_load_relaxed(ring->head))
611 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
612 fio_ioring_cmdprio_prep(td, io_u);
614 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
615 o->cmd_type == FIO_URING_CMD_NVME)
616 fio_ioring_cmd_nvme_pi(td, io_u);
618 ring->array[tail & ld->sq_ring_mask] = io_u->index;
619 atomic_store_release(ring->tail, next_tail);
625 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
627 struct ioring_data *ld = td->io_ops_data;
630 if (!fio_fill_issue_time(td))
633 fio_gettime(&now, NULL);
636 struct io_sq_ring *ring = &ld->sq_ring;
637 int index = ring->array[start & ld->sq_ring_mask];
638 struct io_u *io_u = ld->io_u_index[index];
640 memcpy(&io_u->issue_time, &now, sizeof(now));
641 io_u_queued(td, io_u);
647 * only used for iolog
649 if (td->o.read_iolog_file)
650 memcpy(&td->last_issue, &now, sizeof(now));
653 static int fio_ioring_commit(struct thread_data *td)
655 struct ioring_data *ld = td->io_ops_data;
656 struct ioring_options *o = td->eo;
663 * Kernel side does submission. just need to check if the ring is
664 * flagged as needing a kick, if so, call io_uring_enter(). This
665 * only happens if we've been idle too long.
667 if (o->sqpoll_thread) {
668 struct io_sq_ring *ring = &ld->sq_ring;
669 unsigned start = *ld->sq_ring.tail - ld->queued;
672 flags = atomic_load_relaxed(ring->flags);
673 if (flags & IORING_SQ_NEED_WAKEUP)
674 io_uring_enter(ld, ld->queued, 0,
675 IORING_ENTER_SQ_WAKEUP);
676 fio_ioring_queued(td, start, ld->queued);
677 io_u_mark_submit(td, ld->queued);
684 unsigned start = *ld->sq_ring.head;
685 long nr = ld->queued;
687 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
689 fio_ioring_queued(td, start, ret);
690 io_u_mark_submit(td, ret);
695 io_u_mark_submit(td, ret);
698 if (errno == EAGAIN || errno == EINTR) {
699 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
702 /* Shouldn't happen */
707 td_verror(td, errno, "io_uring_enter submit");
710 } while (ld->queued);
715 static void fio_ioring_unmap(struct ioring_data *ld)
719 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
720 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
724 static void fio_ioring_cleanup(struct thread_data *td)
726 struct ioring_data *ld = td->io_ops_data;
729 if (!(td->flags & TD_F_CHILD))
730 fio_ioring_unmap(ld);
732 fio_cmdprio_cleanup(&ld->cmdprio);
733 free(ld->io_u_index);
742 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
744 struct io_sq_ring *sring = &ld->sq_ring;
745 struct io_cq_ring *cring = &ld->cq_ring;
748 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
749 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
750 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
752 ld->mmap[0].ptr = ptr;
753 sring->head = ptr + p->sq_off.head;
754 sring->tail = ptr + p->sq_off.tail;
755 sring->ring_mask = ptr + p->sq_off.ring_mask;
756 sring->ring_entries = ptr + p->sq_off.ring_entries;
757 sring->flags = ptr + p->sq_off.flags;
758 sring->array = ptr + p->sq_off.array;
759 ld->sq_ring_mask = *sring->ring_mask;
761 if (p->flags & IORING_SETUP_SQE128)
762 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
764 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
765 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
766 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
768 ld->mmap[1].ptr = ld->sqes;
770 if (p->flags & IORING_SETUP_CQE32) {
771 ld->mmap[2].len = p->cq_off.cqes +
772 2 * p->cq_entries * sizeof(struct io_uring_cqe);
774 ld->mmap[2].len = p->cq_off.cqes +
775 p->cq_entries * sizeof(struct io_uring_cqe);
777 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
778 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
780 ld->mmap[2].ptr = ptr;
781 cring->head = ptr + p->cq_off.head;
782 cring->tail = ptr + p->cq_off.tail;
783 cring->ring_mask = ptr + p->cq_off.ring_mask;
784 cring->ring_entries = ptr + p->cq_off.ring_entries;
785 cring->cqes = ptr + p->cq_off.cqes;
786 ld->cq_ring_mask = *cring->ring_mask;
790 static void fio_ioring_probe(struct thread_data *td)
792 struct ioring_data *ld = td->io_ops_data;
793 struct ioring_options *o = td->eo;
794 struct io_uring_probe *p;
797 /* already set by user, don't touch */
798 if (o->nonvectored != -1)
801 /* default to off, as that's always safe */
804 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
808 ret = syscall(__NR_io_uring_register, ld->ring_fd,
809 IORING_REGISTER_PROBE, p, 256);
813 if (IORING_OP_WRITE > p->ops_len)
816 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
817 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
823 static int fio_ioring_queue_init(struct thread_data *td)
825 struct ioring_data *ld = td->io_ops_data;
826 struct ioring_options *o = td->eo;
827 int depth = td->o.iodepth;
828 struct io_uring_params p;
831 memset(&p, 0, sizeof(p));
834 p.flags |= IORING_SETUP_IOPOLL;
835 if (o->sqpoll_thread) {
836 p.flags |= IORING_SETUP_SQPOLL;
838 p.flags |= IORING_SETUP_SQ_AFF;
839 p.sq_thread_cpu = o->sqpoll_cpu;
843 * Submission latency for sqpoll_thread is just the time it
844 * takes to fill in the SQ ring entries, and any syscall if
845 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
848 td->o.disable_slat = 1;
852 * Clamp CQ ring size at our SQ ring size, we don't need more entries
855 p.flags |= IORING_SETUP_CQSIZE;
856 p.cq_entries = depth;
859 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
860 * completing IO operations.
862 p.flags |= IORING_SETUP_COOP_TASKRUN;
865 * io_uring is always a single issuer, and we can defer task_work
866 * runs until we reap events.
868 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
871 ret = syscall(__NR_io_uring_setup, depth, &p);
873 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
874 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
875 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
878 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
879 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
882 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
883 p.flags &= ~IORING_SETUP_CQSIZE;
891 fio_ioring_probe(td);
894 ret = syscall(__NR_io_uring_register, ld->ring_fd,
895 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
900 return fio_ioring_mmap(ld, &p);
903 static int fio_ioring_cmd_queue_init(struct thread_data *td)
905 struct ioring_data *ld = td->io_ops_data;
906 struct ioring_options *o = td->eo;
907 int depth = td->o.iodepth;
908 struct io_uring_params p;
911 memset(&p, 0, sizeof(p));
914 p.flags |= IORING_SETUP_IOPOLL;
915 if (o->sqpoll_thread) {
916 p.flags |= IORING_SETUP_SQPOLL;
918 p.flags |= IORING_SETUP_SQ_AFF;
919 p.sq_thread_cpu = o->sqpoll_cpu;
923 * Submission latency for sqpoll_thread is just the time it
924 * takes to fill in the SQ ring entries, and any syscall if
925 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
928 td->o.disable_slat = 1;
930 if (o->cmd_type == FIO_URING_CMD_NVME) {
931 p.flags |= IORING_SETUP_SQE128;
932 p.flags |= IORING_SETUP_CQE32;
936 * Clamp CQ ring size at our SQ ring size, we don't need more entries
939 p.flags |= IORING_SETUP_CQSIZE;
940 p.cq_entries = depth;
943 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
944 * completing IO operations.
946 p.flags |= IORING_SETUP_COOP_TASKRUN;
949 * io_uring is always a single issuer, and we can defer task_work
950 * runs until we reap events.
952 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
955 ret = syscall(__NR_io_uring_setup, depth, &p);
957 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
958 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
959 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
962 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
963 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
966 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
967 p.flags &= ~IORING_SETUP_CQSIZE;
975 fio_ioring_probe(td);
978 ret = syscall(__NR_io_uring_register, ld->ring_fd,
979 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
984 return fio_ioring_mmap(ld, &p);
987 static int fio_ioring_register_files(struct thread_data *td)
989 struct ioring_data *ld = td->io_ops_data;
994 ld->fds = calloc(td->o.nr_files, sizeof(int));
996 for_each_file(td, f, i) {
997 ret = generic_open_file(td, f);
1004 ret = syscall(__NR_io_uring_register, ld->ring_fd,
1005 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1013 * Pretend the file is closed again, and really close it if we hit
1016 for_each_file(td, f, i) {
1018 int fio_unused ret2;
1019 ret2 = generic_close_file(td, f);
1027 static int fio_ioring_post_init(struct thread_data *td)
1029 struct ioring_data *ld = td->io_ops_data;
1030 struct ioring_options *o = td->eo;
1034 for (i = 0; i < td->o.iodepth; i++) {
1035 struct iovec *iov = &ld->iovecs[i];
1037 io_u = ld->io_u_index[i];
1038 iov->iov_base = io_u->buf;
1039 iov->iov_len = td_max_bs(td);
1042 err = fio_ioring_queue_init(td);
1044 int init_err = errno;
1046 if (init_err == ENOSYS)
1047 log_err("fio: your kernel doesn't support io_uring\n");
1048 td_verror(td, init_err, "io_queue_init");
1052 for (i = 0; i < td->o.iodepth; i++) {
1053 struct io_uring_sqe *sqe;
1056 memset(sqe, 0, sizeof(*sqe));
1059 if (o->registerfiles) {
1060 err = fio_ioring_register_files(td);
1062 td_verror(td, errno, "ioring_register_files");
1070 static int fio_ioring_cmd_post_init(struct thread_data *td)
1072 struct ioring_data *ld = td->io_ops_data;
1073 struct ioring_options *o = td->eo;
1077 for (i = 0; i < td->o.iodepth; i++) {
1078 struct iovec *iov = &ld->iovecs[i];
1080 io_u = ld->io_u_index[i];
1081 iov->iov_base = io_u->buf;
1082 iov->iov_len = td_max_bs(td);
1085 err = fio_ioring_cmd_queue_init(td);
1087 int init_err = errno;
1089 td_verror(td, init_err, "io_queue_init");
1093 for (i = 0; i < td->o.iodepth; i++) {
1094 struct io_uring_sqe *sqe;
1096 if (o->cmd_type == FIO_URING_CMD_NVME) {
1097 sqe = &ld->sqes[i << 1];
1098 memset(sqe, 0, 2 * sizeof(*sqe));
1101 memset(sqe, 0, sizeof(*sqe));
1105 if (o->registerfiles) {
1106 err = fio_ioring_register_files(td);
1108 td_verror(td, errno, "ioring_register_files");
1116 static void parse_prchk_flags(struct ioring_options *o)
1121 if (strstr(o->pi_chk, "GUARD") != NULL)
1122 o->prchk = NVME_IO_PRINFO_PRCHK_GUARD;
1123 if (strstr(o->pi_chk, "REFTAG") != NULL)
1124 o->prchk |= NVME_IO_PRINFO_PRCHK_REF;
1125 if (strstr(o->pi_chk, "APPTAG") != NULL)
1126 o->prchk |= NVME_IO_PRINFO_PRCHK_APP;
1129 static int fio_ioring_init(struct thread_data *td)
1131 struct ioring_options *o = td->eo;
1132 struct ioring_data *ld;
1133 unsigned long long md_size;
1136 /* sqthread submission requires registered files */
1137 if (o->sqpoll_thread)
1138 o->registerfiles = 1;
1140 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1141 log_err("fio: io_uring registered files require nr_files to "
1142 "be identical to open_files\n");
1146 ld = calloc(1, sizeof(*ld));
1148 /* ring depth must be a power-of-2 */
1149 ld->iodepth = td->o.iodepth;
1150 td->o.iodepth = roundup_pow2(td->o.iodepth);
1153 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1156 * metadata buffer for nvme command.
1157 * We are only supporting iomem=malloc / mem=malloc as of now.
1159 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1160 (o->cmd_type == FIO_URING_CMD_NVME) && o->md_per_io_size) {
1161 md_size = (unsigned long long) o->md_per_io_size
1162 * (unsigned long long) td->o.iodepth;
1163 md_size += page_mask + td->o.mem_align;
1164 if (td->o.mem_align && td->o.mem_align > page_size)
1165 md_size += td->o.mem_align - page_size;
1166 if (td->o.mem_type == MEM_MALLOC) {
1167 ld->md_buf = malloc(md_size);
1171 log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
1175 parse_prchk_flags(o);
1177 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1179 td->io_ops_data = ld;
1181 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1183 td_verror(td, EINVAL, "fio_ioring_init");
1188 * For io_uring_cmd, trims are async operations unless we are operating
1189 * in zbd mode where trim means zone reset.
1191 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1192 td->o.zone_mode == ZONE_MODE_ZBD)
1193 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1195 ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
1200 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1202 struct ioring_data *ld = td->io_ops_data;
1203 struct ioring_options *o = td->eo;
1204 struct nvme_pi_data *pi_data;
1207 ld->io_u_index[io_u->index] = io_u;
1209 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1210 p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
1211 p += o->md_per_io_size * io_u->index;
1212 io_u->mmap_data = p;
1215 pi_data = calloc(1, sizeof(*pi_data));
1216 pi_data->io_flags |= o->prchk;
1217 pi_data->apptag_mask = o->apptag_mask;
1218 pi_data->apptag = o->apptag;
1219 io_u->engine_data = pi_data;
1226 static void fio_ioring_io_u_free(struct thread_data *td, struct io_u *io_u)
1228 struct ioring_options *o = td->eo;
1231 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1232 (o->cmd_type == FIO_URING_CMD_NVME)) {
1233 pi = io_u->engine_data;
1235 io_u->engine_data = NULL;
1239 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1241 struct ioring_data *ld = td->io_ops_data;
1242 struct ioring_options *o = td->eo;
1244 if (!ld || !o->registerfiles)
1245 return generic_open_file(td, f);
1247 f->fd = ld->fds[f->engine_pos];
1251 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1253 struct ioring_data *ld = td->io_ops_data;
1254 struct ioring_options *o = td->eo;
1256 if (o->cmd_type == FIO_URING_CMD_NVME) {
1257 struct nvme_data *data = NULL;
1258 unsigned int lba_size = 0;
1262 /* Store the namespace-id and lba size. */
1263 data = FILE_ENG_DATA(f);
1265 data = calloc(1, sizeof(struct nvme_data));
1266 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1272 FILE_SET_ENG_DATA(f, data);
1275 lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
1277 for_each_rw_ddir(ddir) {
1278 if (td->o.min_bs[ddir] % lba_size ||
1279 td->o.max_bs[ddir] % lba_size) {
1281 log_err("%s: block size must be a multiple of (LBA data size + Metadata size)\n",
1284 log_err("%s: block size must be a multiple of LBA data size\n",
1286 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1289 if (data->ms && !data->lba_ext && ddir != DDIR_TRIM &&
1290 (o->md_per_io_size < ((td->o.max_bs[ddir] / data->lba_size) *
1292 log_err("%s: md_per_io_size should be at least %llu bytes\n",
1294 ((td->o.max_bs[ddir] / data->lba_size) * data->ms));
1295 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1300 if (!ld || !o->registerfiles)
1301 return generic_open_file(td, f);
1303 f->fd = ld->fds[f->engine_pos];
1307 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1309 struct ioring_data *ld = td->io_ops_data;
1310 struct ioring_options *o = td->eo;
1312 if (!ld || !o->registerfiles)
1313 return generic_close_file(td, f);
1319 static int fio_ioring_cmd_close_file(struct thread_data *td,
1322 struct ioring_data *ld = td->io_ops_data;
1323 struct ioring_options *o = td->eo;
1325 if (o->cmd_type == FIO_URING_CMD_NVME) {
1326 struct nvme_data *data = FILE_ENG_DATA(f);
1328 FILE_SET_ENG_DATA(f, NULL);
1331 if (!ld || !o->registerfiles)
1332 return generic_close_file(td, f);
1338 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1341 struct ioring_options *o = td->eo;
1343 if (fio_file_size_known(f))
1346 if (o->cmd_type == FIO_URING_CMD_NVME) {
1347 struct nvme_data *data = NULL;
1351 data = calloc(1, sizeof(struct nvme_data));
1352 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1358 f->real_file_size = data->lba_size * nlba;
1359 fio_file_set_size_known(f);
1361 FILE_SET_ENG_DATA(f, data);
1364 return generic_get_file_size(td, f);
1367 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1369 enum zbd_zoned_model *model)
1371 return fio_nvme_get_zoned_model(td, f, model);
1374 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1375 struct fio_file *f, uint64_t offset,
1376 struct zbd_zone *zbdz,
1377 unsigned int nr_zones)
1379 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1382 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1383 uint64_t offset, uint64_t length)
1385 return fio_nvme_reset_wp(td, f, offset, length);
1388 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1390 unsigned int *max_open_zones)
1392 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1395 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1396 struct fio_ruhs_info *fruhs_info)
1398 struct nvme_fdp_ruh_status *ruhs;
1401 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1402 ruhs = scalloc(1, bytes);
1406 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1410 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1411 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1412 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1418 static struct ioengine_ops ioengine_uring = {
1420 .version = FIO_IOOPS_VERSION,
1421 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1422 FIO_ASYNCIO_SETS_ISSUE_TIME,
1423 .init = fio_ioring_init,
1424 .post_init = fio_ioring_post_init,
1425 .io_u_init = fio_ioring_io_u_init,
1426 .prep = fio_ioring_prep,
1427 .queue = fio_ioring_queue,
1428 .commit = fio_ioring_commit,
1429 .getevents = fio_ioring_getevents,
1430 .event = fio_ioring_event,
1431 .cleanup = fio_ioring_cleanup,
1432 .open_file = fio_ioring_open_file,
1433 .close_file = fio_ioring_close_file,
1434 .get_file_size = generic_get_file_size,
1436 .option_struct_size = sizeof(struct ioring_options),
1439 static struct ioengine_ops ioengine_uring_cmd = {
1440 .name = "io_uring_cmd",
1441 .version = FIO_IOOPS_VERSION,
1442 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1443 FIO_ASYNCIO_SETS_ISSUE_TIME,
1444 .init = fio_ioring_init,
1445 .post_init = fio_ioring_cmd_post_init,
1446 .io_u_init = fio_ioring_io_u_init,
1447 .io_u_free = fio_ioring_io_u_free,
1448 .prep = fio_ioring_cmd_prep,
1449 .queue = fio_ioring_queue,
1450 .commit = fio_ioring_commit,
1451 .getevents = fio_ioring_getevents,
1452 .event = fio_ioring_cmd_event,
1453 .cleanup = fio_ioring_cleanup,
1454 .open_file = fio_ioring_cmd_open_file,
1455 .close_file = fio_ioring_cmd_close_file,
1456 .get_file_size = fio_ioring_cmd_get_file_size,
1457 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1458 .report_zones = fio_ioring_cmd_report_zones,
1459 .reset_wp = fio_ioring_cmd_reset_wp,
1460 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1462 .option_struct_size = sizeof(struct ioring_options),
1463 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1466 static void fio_init fio_ioring_register(void)
1468 register_ioengine(&ioengine_uring);
1469 register_ioengine(&ioengine_uring_cmd);
1472 static void fio_exit fio_ioring_unregister(void)
1474 unregister_ioengine(&ioengine_uring);
1475 unregister_ioengine(&ioengine_uring_cmd);