4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
33 FIO_URING_CMD_NVME = 1,
40 unsigned *ring_entries;
49 unsigned *ring_entries;
50 struct io_uring_cqe *cqes;
61 struct io_u **io_u_index;
66 struct io_sq_ring sq_ring;
67 struct io_uring_sqe *sqes;
69 unsigned sq_ring_mask;
71 struct io_cq_ring cq_ring;
72 unsigned cq_ring_mask;
79 struct ioring_mmap mmap[3];
81 struct cmdprio cmdprio;
83 struct nvme_dsm_range *dsm;
86 struct ioring_options {
87 struct thread_data *td;
89 struct cmdprio_options cmdprio_options;
90 unsigned int fixedbufs;
91 unsigned int registerfiles;
92 unsigned int sqpoll_thread;
93 unsigned int sqpoll_set;
94 unsigned int sqpoll_cpu;
95 unsigned int nonvectored;
96 unsigned int uncached;
98 unsigned int force_async;
99 unsigned int md_per_io_size;
102 unsigned int apptag_mask;
105 enum uring_cmd_type cmd_type;
108 static const int ddir_to_op[2][2] = {
109 { IORING_OP_READV, IORING_OP_READ },
110 { IORING_OP_WRITEV, IORING_OP_WRITE }
113 static const int fixed_ddir_to_op[2] = {
114 IORING_OP_READ_FIXED,
115 IORING_OP_WRITE_FIXED
118 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
120 struct ioring_options *o = data;
122 o->sqpoll_cpu = *val;
127 static struct fio_option options[] = {
130 .lname = "High Priority",
131 .type = FIO_OPT_STR_SET,
132 .off1 = offsetof(struct ioring_options, hipri),
133 .help = "Use polled IO completions",
134 .category = FIO_OPT_C_ENGINE,
135 .group = FIO_OPT_G_IOURING,
139 .lname = "Fixed (pre-mapped) IO buffers",
140 .type = FIO_OPT_STR_SET,
141 .off1 = offsetof(struct ioring_options, fixedbufs),
142 .help = "Pre map IO buffers",
143 .category = FIO_OPT_C_ENGINE,
144 .group = FIO_OPT_G_IOURING,
147 .name = "registerfiles",
148 .lname = "Register file set",
149 .type = FIO_OPT_STR_SET,
150 .off1 = offsetof(struct ioring_options, registerfiles),
151 .help = "Pre-open/register files",
152 .category = FIO_OPT_C_ENGINE,
153 .group = FIO_OPT_G_IOURING,
156 .name = "sqthread_poll",
157 .lname = "Kernel SQ thread polling",
158 .type = FIO_OPT_STR_SET,
159 .off1 = offsetof(struct ioring_options, sqpoll_thread),
160 .help = "Offload submission/completion to kernel thread",
161 .category = FIO_OPT_C_ENGINE,
162 .group = FIO_OPT_G_IOURING,
165 .name = "sqthread_poll_cpu",
166 .lname = "SQ Thread Poll CPU",
168 .cb = fio_ioring_sqpoll_cb,
169 .help = "What CPU to run SQ thread polling on",
170 .category = FIO_OPT_C_ENGINE,
171 .group = FIO_OPT_G_IOURING,
174 .name = "nonvectored",
175 .lname = "Non-vectored",
177 .off1 = offsetof(struct ioring_options, nonvectored),
179 .help = "Use non-vectored read/write commands",
180 .category = FIO_OPT_C_ENGINE,
181 .group = FIO_OPT_G_IOURING,
187 .off1 = offsetof(struct ioring_options, uncached),
188 .help = "Use RWF_UNCACHED for buffered read/writes",
189 .category = FIO_OPT_C_ENGINE,
190 .group = FIO_OPT_G_IOURING,
194 .lname = "RWF_NOWAIT",
195 .type = FIO_OPT_BOOL,
196 .off1 = offsetof(struct ioring_options, nowait),
197 .help = "Use RWF_NOWAIT for reads/writes",
198 .category = FIO_OPT_C_ENGINE,
199 .group = FIO_OPT_G_IOURING,
202 .name = "force_async",
203 .lname = "Force async",
205 .off1 = offsetof(struct ioring_options, force_async),
206 .help = "Set IOSQE_ASYNC every N requests",
207 .category = FIO_OPT_C_ENGINE,
208 .group = FIO_OPT_G_IOURING,
212 .lname = "Uring cmd type",
214 .off1 = offsetof(struct ioring_options, cmd_type),
215 .help = "Specify uring-cmd type",
219 .oval = FIO_URING_CMD_NVME,
220 .help = "Issue nvme-uring-cmd",
223 .category = FIO_OPT_C_ENGINE,
224 .group = FIO_OPT_G_IOURING,
226 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
228 .name = "md_per_io_size",
229 .lname = "Separate Metadata Buffer Size per I/O",
231 .off1 = offsetof(struct ioring_options, md_per_io_size),
233 .help = "Size of separate metadata buffer per I/O (Default: 0)",
234 .category = FIO_OPT_C_ENGINE,
235 .group = FIO_OPT_G_IOURING,
239 .lname = "Protection Information Action",
240 .type = FIO_OPT_BOOL,
241 .off1 = offsetof(struct ioring_options, pi_act),
243 .help = "Protection Information Action bit (pi_act=1 or pi_act=0)",
244 .category = FIO_OPT_C_ENGINE,
245 .group = FIO_OPT_G_IOURING,
249 .lname = "Protection Information Check",
250 .type = FIO_OPT_STR_STORE,
251 .off1 = offsetof(struct ioring_options, pi_chk),
253 .help = "Control of Protection Information Checking (pi_chk=GUARD,REFTAG,APPTAG)",
254 .category = FIO_OPT_C_ENGINE,
255 .group = FIO_OPT_G_IOURING,
259 .lname = "Application Tag used in Protection Information",
261 .off1 = offsetof(struct ioring_options, apptag),
263 .help = "Application Tag used in Protection Information field (Default: 0x1234)",
264 .category = FIO_OPT_C_ENGINE,
265 .group = FIO_OPT_G_IOURING,
268 .name = "apptag_mask",
269 .lname = "Application Tag Mask",
271 .off1 = offsetof(struct ioring_options, apptag_mask),
273 .help = "Application Tag Mask used with Application Tag (Default: 0xffff)",
274 .category = FIO_OPT_C_ENGINE,
275 .group = FIO_OPT_G_IOURING,
282 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
283 unsigned int min_complete, unsigned int flags)
285 #ifdef FIO_ARCH_HAS_SYSCALL
286 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
287 min_complete, flags, NULL, 0);
289 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
290 min_complete, flags, NULL, 0);
294 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
296 struct ioring_data *ld = td->io_ops_data;
297 struct ioring_options *o = td->eo;
298 struct fio_file *f = io_u->file;
299 struct io_uring_sqe *sqe;
301 sqe = &ld->sqes[io_u->index];
303 if (o->registerfiles) {
304 sqe->fd = f->engine_pos;
305 sqe->flags = IOSQE_FIXED_FILE;
311 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
313 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
314 sqe->addr = (unsigned long) io_u->xfer_buf;
315 sqe->len = io_u->xfer_buflen;
316 sqe->buf_index = io_u->index;
318 struct iovec *iov = &ld->iovecs[io_u->index];
321 * Update based on actual io_u, requeue could have
324 iov->iov_base = io_u->xfer_buf;
325 iov->iov_len = io_u->xfer_buflen;
327 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
328 if (o->nonvectored) {
329 sqe->addr = (unsigned long) iov->iov_base;
330 sqe->len = iov->iov_len;
332 sqe->addr = (unsigned long) iov;
337 if (!td->o.odirect && o->uncached)
338 sqe->rw_flags |= RWF_UNCACHED;
340 sqe->rw_flags |= RWF_NOWAIT;
343 * Since io_uring can have a submission context (sqthread_poll)
344 * that is different from the process context, we cannot rely on
345 * the IO priority set by ioprio_set() (options prio, prioclass,
346 * and priohint) to be inherited.
347 * td->ioprio will have the value of the "default prio", so set
348 * this unconditionally. This value might get overridden by
349 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
350 * cmdprio_bssplit is used.
352 sqe->ioprio = td->ioprio;
353 sqe->off = io_u->offset;
354 } else if (ddir_sync(io_u->ddir)) {
356 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
357 sqe->off = f->first_write;
358 sqe->len = f->last_write - f->first_write;
359 sqe->sync_range_flags = td->o.sync_file_range;
360 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
365 if (io_u->ddir == DDIR_DATASYNC)
366 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
367 sqe->opcode = IORING_OP_FSYNC;
371 if (o->force_async && ++ld->prepped == o->force_async) {
373 sqe->flags |= IOSQE_ASYNC;
376 sqe->user_data = (unsigned long) io_u;
380 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
382 struct ioring_data *ld = td->io_ops_data;
383 struct ioring_options *o = td->eo;
384 struct fio_file *f = io_u->file;
385 struct nvme_uring_cmd *cmd;
386 struct io_uring_sqe *sqe;
388 /* only supports nvme_uring_cmd */
389 if (o->cmd_type != FIO_URING_CMD_NVME)
392 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
395 sqe = &ld->sqes[(io_u->index) << 1];
397 if (o->registerfiles) {
398 sqe->fd = f->engine_pos;
399 sqe->flags = IOSQE_FIXED_FILE;
404 if (!td->o.odirect && o->uncached)
405 sqe->rw_flags |= RWF_UNCACHED;
407 sqe->rw_flags |= RWF_NOWAIT;
409 sqe->opcode = IORING_OP_URING_CMD;
410 sqe->user_data = (unsigned long) io_u;
412 sqe->cmd_op = NVME_URING_CMD_IO;
414 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
415 if (o->force_async && ++ld->prepped == o->force_async) {
417 sqe->flags |= IOSQE_ASYNC;
420 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
421 sqe->buf_index = io_u->index;
424 cmd = (struct nvme_uring_cmd *)sqe->cmd;
425 return fio_nvme_uring_cmd_prep(cmd, io_u,
426 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
427 &ld->dsm[io_u->index]);
430 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
432 struct ioring_data *ld = td->io_ops_data;
433 struct io_uring_cqe *cqe;
437 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
439 cqe = &ld->cq_ring.cqes[index];
440 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
442 if (cqe->res != io_u->xfer_buflen) {
443 if (cqe->res > io_u->xfer_buflen)
444 io_u->error = -cqe->res;
446 io_u->resid = io_u->xfer_buflen - cqe->res;
453 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
455 struct ioring_data *ld = td->io_ops_data;
456 struct ioring_options *o = td->eo;
457 struct io_uring_cqe *cqe;
461 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
462 if (o->cmd_type == FIO_URING_CMD_NVME)
465 cqe = &ld->cq_ring.cqes[index];
466 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
469 io_u->error = -cqe->res;
476 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
479 struct ioring_data *ld = td->io_ops_data;
480 struct io_cq_ring *ring = &ld->cq_ring;
481 unsigned head, reaped = 0;
485 if (head == atomic_load_acquire(ring->tail))
489 } while (reaped + events < max);
492 atomic_store_release(ring->head, head);
497 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
498 unsigned int max, const struct timespec *t)
500 struct ioring_data *ld = td->io_ops_data;
501 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
502 struct ioring_options *o = td->eo;
503 struct io_cq_ring *ring = &ld->cq_ring;
507 ld->cq_ring_off = *ring->head;
509 r = fio_ioring_cqring_reap(td, events, max);
518 if (!o->sqpoll_thread) {
519 r = io_uring_enter(ld, 0, actual_min,
520 IORING_ENTER_GETEVENTS);
522 if (errno == EAGAIN || errno == EINTR)
525 td_verror(td, errno, "io_uring_enter");
529 } while (events < min);
531 return r < 0 ? r : events;
534 static inline void fio_ioring_cmd_nvme_pi(struct thread_data *td,
537 struct ioring_data *ld = td->io_ops_data;
538 struct ioring_options *o = td->eo;
539 struct nvme_uring_cmd *cmd;
540 struct io_uring_sqe *sqe;
541 struct nvme_cmd_ext_io_opts ext_opts = {0};
542 struct nvme_data *data = FILE_ENG_DATA(io_u->file);
544 if (io_u->ddir == DDIR_TRIM)
547 sqe = &ld->sqes[(io_u->index) << 1];
548 cmd = (struct nvme_uring_cmd *)sqe->cmd;
552 ext_opts.io_flags |= NVME_IO_PRINFO_PRACT;
553 ext_opts.io_flags |= o->prchk;
554 ext_opts.apptag = o->apptag;
555 ext_opts.apptag_mask = o->apptag_mask;
558 fio_nvme_pi_fill(cmd, io_u, &ext_opts);
561 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
564 struct ioring_data *ld = td->io_ops_data;
565 struct cmdprio *cmdprio = &ld->cmdprio;
567 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
568 ld->sqes[io_u->index].ioprio = io_u->ioprio;
571 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
574 struct ioring_data *ld = td->io_ops_data;
575 struct ioring_options *o = td->eo;
576 struct io_sq_ring *ring = &ld->sq_ring;
577 unsigned tail, next_tail;
579 fio_ro_check(td, io_u);
581 if (ld->queued == ld->iodepth)
584 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
588 do_io_u_trim(td, io_u);
590 io_u_mark_submit(td, 1);
591 io_u_mark_complete(td, 1);
592 return FIO_Q_COMPLETED;
596 next_tail = tail + 1;
597 if (next_tail == atomic_load_relaxed(ring->head))
600 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
601 fio_ioring_cmdprio_prep(td, io_u);
603 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
604 o->cmd_type == FIO_URING_CMD_NVME)
605 fio_ioring_cmd_nvme_pi(td, io_u);
607 ring->array[tail & ld->sq_ring_mask] = io_u->index;
608 atomic_store_release(ring->tail, next_tail);
614 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
616 struct ioring_data *ld = td->io_ops_data;
619 if (!fio_fill_issue_time(td))
622 fio_gettime(&now, NULL);
625 struct io_sq_ring *ring = &ld->sq_ring;
626 int index = ring->array[start & ld->sq_ring_mask];
627 struct io_u *io_u = ld->io_u_index[index];
629 memcpy(&io_u->issue_time, &now, sizeof(now));
630 io_u_queued(td, io_u);
636 * only used for iolog
638 if (td->o.read_iolog_file)
639 memcpy(&td->last_issue, &now, sizeof(now));
642 static int fio_ioring_commit(struct thread_data *td)
644 struct ioring_data *ld = td->io_ops_data;
645 struct ioring_options *o = td->eo;
652 * Kernel side does submission. just need to check if the ring is
653 * flagged as needing a kick, if so, call io_uring_enter(). This
654 * only happens if we've been idle too long.
656 if (o->sqpoll_thread) {
657 struct io_sq_ring *ring = &ld->sq_ring;
658 unsigned start = *ld->sq_ring.tail - ld->queued;
661 flags = atomic_load_relaxed(ring->flags);
662 if (flags & IORING_SQ_NEED_WAKEUP)
663 io_uring_enter(ld, ld->queued, 0,
664 IORING_ENTER_SQ_WAKEUP);
665 fio_ioring_queued(td, start, ld->queued);
666 io_u_mark_submit(td, ld->queued);
673 unsigned start = *ld->sq_ring.head;
674 long nr = ld->queued;
676 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
678 fio_ioring_queued(td, start, ret);
679 io_u_mark_submit(td, ret);
684 io_u_mark_submit(td, ret);
687 if (errno == EAGAIN || errno == EINTR) {
688 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
691 /* Shouldn't happen */
696 td_verror(td, errno, "io_uring_enter submit");
699 } while (ld->queued);
704 static void fio_ioring_unmap(struct ioring_data *ld)
708 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
709 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
713 static void fio_ioring_cleanup(struct thread_data *td)
715 struct ioring_data *ld = td->io_ops_data;
718 if (!(td->flags & TD_F_CHILD))
719 fio_ioring_unmap(ld);
721 fio_cmdprio_cleanup(&ld->cmdprio);
722 free(ld->io_u_index);
731 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
733 struct io_sq_ring *sring = &ld->sq_ring;
734 struct io_cq_ring *cring = &ld->cq_ring;
737 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
738 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
739 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
741 ld->mmap[0].ptr = ptr;
742 sring->head = ptr + p->sq_off.head;
743 sring->tail = ptr + p->sq_off.tail;
744 sring->ring_mask = ptr + p->sq_off.ring_mask;
745 sring->ring_entries = ptr + p->sq_off.ring_entries;
746 sring->flags = ptr + p->sq_off.flags;
747 sring->array = ptr + p->sq_off.array;
748 ld->sq_ring_mask = *sring->ring_mask;
750 if (p->flags & IORING_SETUP_SQE128)
751 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
753 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
754 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
755 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
757 ld->mmap[1].ptr = ld->sqes;
759 if (p->flags & IORING_SETUP_CQE32) {
760 ld->mmap[2].len = p->cq_off.cqes +
761 2 * p->cq_entries * sizeof(struct io_uring_cqe);
763 ld->mmap[2].len = p->cq_off.cqes +
764 p->cq_entries * sizeof(struct io_uring_cqe);
766 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
767 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
769 ld->mmap[2].ptr = ptr;
770 cring->head = ptr + p->cq_off.head;
771 cring->tail = ptr + p->cq_off.tail;
772 cring->ring_mask = ptr + p->cq_off.ring_mask;
773 cring->ring_entries = ptr + p->cq_off.ring_entries;
774 cring->cqes = ptr + p->cq_off.cqes;
775 ld->cq_ring_mask = *cring->ring_mask;
779 static void fio_ioring_probe(struct thread_data *td)
781 struct ioring_data *ld = td->io_ops_data;
782 struct ioring_options *o = td->eo;
783 struct io_uring_probe *p;
786 /* already set by user, don't touch */
787 if (o->nonvectored != -1)
790 /* default to off, as that's always safe */
793 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
797 ret = syscall(__NR_io_uring_register, ld->ring_fd,
798 IORING_REGISTER_PROBE, p, 256);
802 if (IORING_OP_WRITE > p->ops_len)
805 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
806 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
812 static int fio_ioring_queue_init(struct thread_data *td)
814 struct ioring_data *ld = td->io_ops_data;
815 struct ioring_options *o = td->eo;
816 int depth = td->o.iodepth;
817 struct io_uring_params p;
820 memset(&p, 0, sizeof(p));
823 p.flags |= IORING_SETUP_IOPOLL;
824 if (o->sqpoll_thread) {
825 p.flags |= IORING_SETUP_SQPOLL;
827 p.flags |= IORING_SETUP_SQ_AFF;
828 p.sq_thread_cpu = o->sqpoll_cpu;
832 * Submission latency for sqpoll_thread is just the time it
833 * takes to fill in the SQ ring entries, and any syscall if
834 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
837 td->o.disable_slat = 1;
841 * Clamp CQ ring size at our SQ ring size, we don't need more entries
844 p.flags |= IORING_SETUP_CQSIZE;
845 p.cq_entries = depth;
848 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
849 * completing IO operations.
851 p.flags |= IORING_SETUP_COOP_TASKRUN;
854 * io_uring is always a single issuer, and we can defer task_work
855 * runs until we reap events.
857 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
860 ret = syscall(__NR_io_uring_setup, depth, &p);
862 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
863 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
864 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
867 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
868 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
871 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
872 p.flags &= ~IORING_SETUP_CQSIZE;
880 fio_ioring_probe(td);
883 ret = syscall(__NR_io_uring_register, ld->ring_fd,
884 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
889 return fio_ioring_mmap(ld, &p);
892 static int fio_ioring_cmd_queue_init(struct thread_data *td)
894 struct ioring_data *ld = td->io_ops_data;
895 struct ioring_options *o = td->eo;
896 int depth = td->o.iodepth;
897 struct io_uring_params p;
900 memset(&p, 0, sizeof(p));
903 p.flags |= IORING_SETUP_IOPOLL;
904 if (o->sqpoll_thread) {
905 p.flags |= IORING_SETUP_SQPOLL;
907 p.flags |= IORING_SETUP_SQ_AFF;
908 p.sq_thread_cpu = o->sqpoll_cpu;
912 * Submission latency for sqpoll_thread is just the time it
913 * takes to fill in the SQ ring entries, and any syscall if
914 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
917 td->o.disable_slat = 1;
919 if (o->cmd_type == FIO_URING_CMD_NVME) {
920 p.flags |= IORING_SETUP_SQE128;
921 p.flags |= IORING_SETUP_CQE32;
925 * Clamp CQ ring size at our SQ ring size, we don't need more entries
928 p.flags |= IORING_SETUP_CQSIZE;
929 p.cq_entries = depth;
932 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
933 * completing IO operations.
935 p.flags |= IORING_SETUP_COOP_TASKRUN;
938 * io_uring is always a single issuer, and we can defer task_work
939 * runs until we reap events.
941 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
944 ret = syscall(__NR_io_uring_setup, depth, &p);
946 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
947 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
948 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
951 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
952 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
955 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
956 p.flags &= ~IORING_SETUP_CQSIZE;
964 fio_ioring_probe(td);
967 ret = syscall(__NR_io_uring_register, ld->ring_fd,
968 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
973 return fio_ioring_mmap(ld, &p);
976 static int fio_ioring_register_files(struct thread_data *td)
978 struct ioring_data *ld = td->io_ops_data;
983 ld->fds = calloc(td->o.nr_files, sizeof(int));
985 for_each_file(td, f, i) {
986 ret = generic_open_file(td, f);
993 ret = syscall(__NR_io_uring_register, ld->ring_fd,
994 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
1002 * Pretend the file is closed again, and really close it if we hit
1005 for_each_file(td, f, i) {
1007 int fio_unused ret2;
1008 ret2 = generic_close_file(td, f);
1016 static int fio_ioring_post_init(struct thread_data *td)
1018 struct ioring_data *ld = td->io_ops_data;
1019 struct ioring_options *o = td->eo;
1023 for (i = 0; i < td->o.iodepth; i++) {
1024 struct iovec *iov = &ld->iovecs[i];
1026 io_u = ld->io_u_index[i];
1027 iov->iov_base = io_u->buf;
1028 iov->iov_len = td_max_bs(td);
1031 err = fio_ioring_queue_init(td);
1033 int init_err = errno;
1035 if (init_err == ENOSYS)
1036 log_err("fio: your kernel doesn't support io_uring\n");
1037 td_verror(td, init_err, "io_queue_init");
1041 for (i = 0; i < td->o.iodepth; i++) {
1042 struct io_uring_sqe *sqe;
1045 memset(sqe, 0, sizeof(*sqe));
1048 if (o->registerfiles) {
1049 err = fio_ioring_register_files(td);
1051 td_verror(td, errno, "ioring_register_files");
1059 static int fio_ioring_cmd_post_init(struct thread_data *td)
1061 struct ioring_data *ld = td->io_ops_data;
1062 struct ioring_options *o = td->eo;
1066 for (i = 0; i < td->o.iodepth; i++) {
1067 struct iovec *iov = &ld->iovecs[i];
1069 io_u = ld->io_u_index[i];
1070 iov->iov_base = io_u->buf;
1071 iov->iov_len = td_max_bs(td);
1074 err = fio_ioring_cmd_queue_init(td);
1076 int init_err = errno;
1078 td_verror(td, init_err, "io_queue_init");
1082 for (i = 0; i < td->o.iodepth; i++) {
1083 struct io_uring_sqe *sqe;
1085 if (o->cmd_type == FIO_URING_CMD_NVME) {
1086 sqe = &ld->sqes[i << 1];
1087 memset(sqe, 0, 2 * sizeof(*sqe));
1090 memset(sqe, 0, sizeof(*sqe));
1094 if (o->registerfiles) {
1095 err = fio_ioring_register_files(td);
1097 td_verror(td, errno, "ioring_register_files");
1105 static void parse_prchk_flags(struct ioring_options *o)
1110 if (strstr(o->pi_chk, "GUARD") != NULL)
1111 o->prchk = NVME_IO_PRINFO_PRCHK_GUARD;
1112 if (strstr(o->pi_chk, "REFTAG") != NULL)
1113 o->prchk |= NVME_IO_PRINFO_PRCHK_REF;
1114 if (strstr(o->pi_chk, "APPTAG") != NULL)
1115 o->prchk |= NVME_IO_PRINFO_PRCHK_APP;
1118 static int fio_ioring_init(struct thread_data *td)
1120 struct ioring_options *o = td->eo;
1121 struct ioring_data *ld;
1122 unsigned long long md_size;
1125 /* sqthread submission requires registered files */
1126 if (o->sqpoll_thread)
1127 o->registerfiles = 1;
1129 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1130 log_err("fio: io_uring registered files require nr_files to "
1131 "be identical to open_files\n");
1135 ld = calloc(1, sizeof(*ld));
1137 /* ring depth must be a power-of-2 */
1138 ld->iodepth = td->o.iodepth;
1139 td->o.iodepth = roundup_pow2(td->o.iodepth);
1142 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1145 * metadata buffer for nvme command.
1146 * We are only supporting iomem=malloc / mem=malloc as of now.
1148 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1149 (o->cmd_type == FIO_URING_CMD_NVME) && o->md_per_io_size) {
1150 md_size = (unsigned long long) o->md_per_io_size
1151 * (unsigned long long) td->o.iodepth;
1152 md_size += page_mask + td->o.mem_align;
1153 if (td->o.mem_align && td->o.mem_align > page_size)
1154 md_size += td->o.mem_align - page_size;
1155 if (td->o.mem_type == MEM_MALLOC) {
1156 ld->md_buf = malloc(md_size);
1160 log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
1164 parse_prchk_flags(o);
1166 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1168 td->io_ops_data = ld;
1170 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1172 td_verror(td, EINVAL, "fio_ioring_init");
1177 * For io_uring_cmd, trims are async operations unless we are operating
1178 * in zbd mode where trim means zone reset.
1180 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1181 td->o.zone_mode == ZONE_MODE_ZBD)
1182 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1184 ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
1189 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1191 struct ioring_data *ld = td->io_ops_data;
1192 struct ioring_options *o = td->eo;
1195 ld->io_u_index[io_u->index] = io_u;
1197 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1198 p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
1199 p += o->md_per_io_size * io_u->index;
1200 io_u->mmap_data = p;
1206 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1208 struct ioring_data *ld = td->io_ops_data;
1209 struct ioring_options *o = td->eo;
1211 if (!ld || !o->registerfiles)
1212 return generic_open_file(td, f);
1214 f->fd = ld->fds[f->engine_pos];
1218 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1220 struct ioring_data *ld = td->io_ops_data;
1221 struct ioring_options *o = td->eo;
1223 if (o->cmd_type == FIO_URING_CMD_NVME) {
1224 struct nvme_data *data = NULL;
1225 unsigned int lba_size = 0;
1229 /* Store the namespace-id and lba size. */
1230 data = FILE_ENG_DATA(f);
1232 data = calloc(1, sizeof(struct nvme_data));
1233 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1239 FILE_SET_ENG_DATA(f, data);
1242 lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
1244 for_each_rw_ddir(ddir) {
1245 if (td->o.min_bs[ddir] % lba_size ||
1246 td->o.max_bs[ddir] % lba_size) {
1248 log_err("%s: block size must be a multiple of (LBA data size + Metadata size)\n",
1251 log_err("%s: block size must be a multiple of LBA data size\n",
1253 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1256 if (data->ms && !data->lba_ext && ddir != DDIR_TRIM &&
1257 (o->md_per_io_size < ((td->o.max_bs[ddir] / data->lba_size) *
1259 log_err("%s: md_per_io_size should be at least %llu bytes\n",
1261 ((td->o.max_bs[ddir] / data->lba_size) * data->ms));
1262 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1267 if (!ld || !o->registerfiles)
1268 return generic_open_file(td, f);
1270 f->fd = ld->fds[f->engine_pos];
1274 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1276 struct ioring_data *ld = td->io_ops_data;
1277 struct ioring_options *o = td->eo;
1279 if (!ld || !o->registerfiles)
1280 return generic_close_file(td, f);
1286 static int fio_ioring_cmd_close_file(struct thread_data *td,
1289 struct ioring_data *ld = td->io_ops_data;
1290 struct ioring_options *o = td->eo;
1292 if (o->cmd_type == FIO_URING_CMD_NVME) {
1293 struct nvme_data *data = FILE_ENG_DATA(f);
1295 FILE_SET_ENG_DATA(f, NULL);
1298 if (!ld || !o->registerfiles)
1299 return generic_close_file(td, f);
1305 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1308 struct ioring_options *o = td->eo;
1310 if (fio_file_size_known(f))
1313 if (o->cmd_type == FIO_URING_CMD_NVME) {
1314 struct nvme_data *data = NULL;
1318 data = calloc(1, sizeof(struct nvme_data));
1319 ret = fio_nvme_get_info(f, &nlba, o->pi_act, data);
1325 f->real_file_size = data->lba_size * nlba;
1326 fio_file_set_size_known(f);
1328 FILE_SET_ENG_DATA(f, data);
1331 return generic_get_file_size(td, f);
1334 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1336 enum zbd_zoned_model *model)
1338 return fio_nvme_get_zoned_model(td, f, model);
1341 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1342 struct fio_file *f, uint64_t offset,
1343 struct zbd_zone *zbdz,
1344 unsigned int nr_zones)
1346 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1349 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1350 uint64_t offset, uint64_t length)
1352 return fio_nvme_reset_wp(td, f, offset, length);
1355 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1357 unsigned int *max_open_zones)
1359 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1362 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1363 struct fio_ruhs_info *fruhs_info)
1365 struct nvme_fdp_ruh_status *ruhs;
1368 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1369 ruhs = scalloc(1, bytes);
1373 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1377 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1378 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1379 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1385 static struct ioengine_ops ioengine_uring = {
1387 .version = FIO_IOOPS_VERSION,
1388 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1389 FIO_ASYNCIO_SETS_ISSUE_TIME,
1390 .init = fio_ioring_init,
1391 .post_init = fio_ioring_post_init,
1392 .io_u_init = fio_ioring_io_u_init,
1393 .prep = fio_ioring_prep,
1394 .queue = fio_ioring_queue,
1395 .commit = fio_ioring_commit,
1396 .getevents = fio_ioring_getevents,
1397 .event = fio_ioring_event,
1398 .cleanup = fio_ioring_cleanup,
1399 .open_file = fio_ioring_open_file,
1400 .close_file = fio_ioring_close_file,
1401 .get_file_size = generic_get_file_size,
1403 .option_struct_size = sizeof(struct ioring_options),
1406 static struct ioengine_ops ioengine_uring_cmd = {
1407 .name = "io_uring_cmd",
1408 .version = FIO_IOOPS_VERSION,
1409 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1410 FIO_ASYNCIO_SETS_ISSUE_TIME,
1411 .init = fio_ioring_init,
1412 .post_init = fio_ioring_cmd_post_init,
1413 .io_u_init = fio_ioring_io_u_init,
1414 .prep = fio_ioring_cmd_prep,
1415 .queue = fio_ioring_queue,
1416 .commit = fio_ioring_commit,
1417 .getevents = fio_ioring_getevents,
1418 .event = fio_ioring_cmd_event,
1419 .cleanup = fio_ioring_cleanup,
1420 .open_file = fio_ioring_cmd_open_file,
1421 .close_file = fio_ioring_cmd_close_file,
1422 .get_file_size = fio_ioring_cmd_get_file_size,
1423 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1424 .report_zones = fio_ioring_cmd_report_zones,
1425 .reset_wp = fio_ioring_cmd_reset_wp,
1426 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1428 .option_struct_size = sizeof(struct ioring_options),
1429 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1432 static void fio_init fio_ioring_register(void)
1434 register_ioengine(&ioengine_uring);
1435 register_ioengine(&ioengine_uring_cmd);
1438 static void fio_exit fio_ioring_unregister(void)
1440 unregister_ioengine(&ioengine_uring);
1441 unregister_ioengine(&ioengine_uring_cmd);