4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
33 FIO_URING_CMD_NVME = 1,
40 unsigned *ring_entries;
49 unsigned *ring_entries;
50 struct io_uring_cqe *cqes;
61 struct io_u **io_u_index;
66 struct io_sq_ring sq_ring;
67 struct io_uring_sqe *sqes;
69 unsigned sq_ring_mask;
71 struct io_cq_ring cq_ring;
72 unsigned cq_ring_mask;
79 struct ioring_mmap mmap[3];
81 struct cmdprio cmdprio;
83 struct nvme_dsm_range *dsm;
86 struct ioring_options {
87 struct thread_data *td;
89 struct cmdprio_options cmdprio_options;
90 unsigned int fixedbufs;
91 unsigned int registerfiles;
92 unsigned int sqpoll_thread;
93 unsigned int sqpoll_set;
94 unsigned int sqpoll_cpu;
95 unsigned int nonvectored;
96 unsigned int uncached;
98 unsigned int force_async;
99 unsigned int md_per_io_size;
100 enum uring_cmd_type cmd_type;
103 static const int ddir_to_op[2][2] = {
104 { IORING_OP_READV, IORING_OP_READ },
105 { IORING_OP_WRITEV, IORING_OP_WRITE }
108 static const int fixed_ddir_to_op[2] = {
109 IORING_OP_READ_FIXED,
110 IORING_OP_WRITE_FIXED
113 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
115 struct ioring_options *o = data;
117 o->sqpoll_cpu = *val;
122 static struct fio_option options[] = {
125 .lname = "High Priority",
126 .type = FIO_OPT_STR_SET,
127 .off1 = offsetof(struct ioring_options, hipri),
128 .help = "Use polled IO completions",
129 .category = FIO_OPT_C_ENGINE,
130 .group = FIO_OPT_G_IOURING,
134 .lname = "Fixed (pre-mapped) IO buffers",
135 .type = FIO_OPT_STR_SET,
136 .off1 = offsetof(struct ioring_options, fixedbufs),
137 .help = "Pre map IO buffers",
138 .category = FIO_OPT_C_ENGINE,
139 .group = FIO_OPT_G_IOURING,
142 .name = "registerfiles",
143 .lname = "Register file set",
144 .type = FIO_OPT_STR_SET,
145 .off1 = offsetof(struct ioring_options, registerfiles),
146 .help = "Pre-open/register files",
147 .category = FIO_OPT_C_ENGINE,
148 .group = FIO_OPT_G_IOURING,
151 .name = "sqthread_poll",
152 .lname = "Kernel SQ thread polling",
153 .type = FIO_OPT_STR_SET,
154 .off1 = offsetof(struct ioring_options, sqpoll_thread),
155 .help = "Offload submission/completion to kernel thread",
156 .category = FIO_OPT_C_ENGINE,
157 .group = FIO_OPT_G_IOURING,
160 .name = "sqthread_poll_cpu",
161 .lname = "SQ Thread Poll CPU",
163 .cb = fio_ioring_sqpoll_cb,
164 .help = "What CPU to run SQ thread polling on",
165 .category = FIO_OPT_C_ENGINE,
166 .group = FIO_OPT_G_IOURING,
169 .name = "nonvectored",
170 .lname = "Non-vectored",
172 .off1 = offsetof(struct ioring_options, nonvectored),
174 .help = "Use non-vectored read/write commands",
175 .category = FIO_OPT_C_ENGINE,
176 .group = FIO_OPT_G_IOURING,
182 .off1 = offsetof(struct ioring_options, uncached),
183 .help = "Use RWF_UNCACHED for buffered read/writes",
184 .category = FIO_OPT_C_ENGINE,
185 .group = FIO_OPT_G_IOURING,
189 .lname = "RWF_NOWAIT",
190 .type = FIO_OPT_BOOL,
191 .off1 = offsetof(struct ioring_options, nowait),
192 .help = "Use RWF_NOWAIT for reads/writes",
193 .category = FIO_OPT_C_ENGINE,
194 .group = FIO_OPT_G_IOURING,
197 .name = "force_async",
198 .lname = "Force async",
200 .off1 = offsetof(struct ioring_options, force_async),
201 .help = "Set IOSQE_ASYNC every N requests",
202 .category = FIO_OPT_C_ENGINE,
203 .group = FIO_OPT_G_IOURING,
207 .lname = "Uring cmd type",
209 .off1 = offsetof(struct ioring_options, cmd_type),
210 .help = "Specify uring-cmd type",
214 .oval = FIO_URING_CMD_NVME,
215 .help = "Issue nvme-uring-cmd",
218 .category = FIO_OPT_C_ENGINE,
219 .group = FIO_OPT_G_IOURING,
221 CMDPRIO_OPTIONS(struct ioring_options, FIO_OPT_G_IOURING),
223 .name = "md_per_io_size",
224 .lname = "Separate Metadata Buffer Size per I/O",
226 .off1 = offsetof(struct ioring_options, md_per_io_size),
228 .help = "Size of separate metadata buffer per I/O (Default: 0)",
229 .category = FIO_OPT_C_ENGINE,
230 .group = FIO_OPT_G_IOURING,
237 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
238 unsigned int min_complete, unsigned int flags)
240 #ifdef FIO_ARCH_HAS_SYSCALL
241 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
242 min_complete, flags, NULL, 0);
244 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
245 min_complete, flags, NULL, 0);
249 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
251 struct ioring_data *ld = td->io_ops_data;
252 struct ioring_options *o = td->eo;
253 struct fio_file *f = io_u->file;
254 struct io_uring_sqe *sqe;
256 sqe = &ld->sqes[io_u->index];
258 if (o->registerfiles) {
259 sqe->fd = f->engine_pos;
260 sqe->flags = IOSQE_FIXED_FILE;
266 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
268 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
269 sqe->addr = (unsigned long) io_u->xfer_buf;
270 sqe->len = io_u->xfer_buflen;
271 sqe->buf_index = io_u->index;
273 struct iovec *iov = &ld->iovecs[io_u->index];
276 * Update based on actual io_u, requeue could have
279 iov->iov_base = io_u->xfer_buf;
280 iov->iov_len = io_u->xfer_buflen;
282 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
283 if (o->nonvectored) {
284 sqe->addr = (unsigned long) iov->iov_base;
285 sqe->len = iov->iov_len;
287 sqe->addr = (unsigned long) iov;
292 if (!td->o.odirect && o->uncached)
293 sqe->rw_flags |= RWF_UNCACHED;
295 sqe->rw_flags |= RWF_NOWAIT;
298 * Since io_uring can have a submission context (sqthread_poll)
299 * that is different from the process context, we cannot rely on
300 * the IO priority set by ioprio_set() (options prio, prioclass,
301 * and priohint) to be inherited.
302 * td->ioprio will have the value of the "default prio", so set
303 * this unconditionally. This value might get overridden by
304 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
305 * cmdprio_bssplit is used.
307 sqe->ioprio = td->ioprio;
308 sqe->off = io_u->offset;
309 } else if (ddir_sync(io_u->ddir)) {
311 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
312 sqe->off = f->first_write;
313 sqe->len = f->last_write - f->first_write;
314 sqe->sync_range_flags = td->o.sync_file_range;
315 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
320 if (io_u->ddir == DDIR_DATASYNC)
321 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
322 sqe->opcode = IORING_OP_FSYNC;
326 if (o->force_async && ++ld->prepped == o->force_async) {
328 sqe->flags |= IOSQE_ASYNC;
331 sqe->user_data = (unsigned long) io_u;
335 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
337 struct ioring_data *ld = td->io_ops_data;
338 struct ioring_options *o = td->eo;
339 struct fio_file *f = io_u->file;
340 struct nvme_uring_cmd *cmd;
341 struct io_uring_sqe *sqe;
343 /* only supports nvme_uring_cmd */
344 if (o->cmd_type != FIO_URING_CMD_NVME)
347 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
350 sqe = &ld->sqes[(io_u->index) << 1];
352 if (o->registerfiles) {
353 sqe->fd = f->engine_pos;
354 sqe->flags = IOSQE_FIXED_FILE;
359 if (!td->o.odirect && o->uncached)
360 sqe->rw_flags |= RWF_UNCACHED;
362 sqe->rw_flags |= RWF_NOWAIT;
364 sqe->opcode = IORING_OP_URING_CMD;
365 sqe->user_data = (unsigned long) io_u;
367 sqe->cmd_op = NVME_URING_CMD_IO;
369 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
370 if (o->force_async && ++ld->prepped == o->force_async) {
372 sqe->flags |= IOSQE_ASYNC;
375 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
376 sqe->buf_index = io_u->index;
379 cmd = (struct nvme_uring_cmd *)sqe->cmd;
380 return fio_nvme_uring_cmd_prep(cmd, io_u,
381 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
382 &ld->dsm[io_u->index]);
385 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
387 struct ioring_data *ld = td->io_ops_data;
388 struct io_uring_cqe *cqe;
392 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
394 cqe = &ld->cq_ring.cqes[index];
395 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
397 if (cqe->res != io_u->xfer_buflen) {
398 if (cqe->res > io_u->xfer_buflen)
399 io_u->error = -cqe->res;
401 io_u->resid = io_u->xfer_buflen - cqe->res;
408 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
410 struct ioring_data *ld = td->io_ops_data;
411 struct ioring_options *o = td->eo;
412 struct io_uring_cqe *cqe;
416 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
417 if (o->cmd_type == FIO_URING_CMD_NVME)
420 cqe = &ld->cq_ring.cqes[index];
421 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
424 io_u->error = -cqe->res;
431 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
434 struct ioring_data *ld = td->io_ops_data;
435 struct io_cq_ring *ring = &ld->cq_ring;
436 unsigned head, reaped = 0;
440 if (head == atomic_load_acquire(ring->tail))
444 } while (reaped + events < max);
447 atomic_store_release(ring->head, head);
452 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
453 unsigned int max, const struct timespec *t)
455 struct ioring_data *ld = td->io_ops_data;
456 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
457 struct ioring_options *o = td->eo;
458 struct io_cq_ring *ring = &ld->cq_ring;
462 ld->cq_ring_off = *ring->head;
464 r = fio_ioring_cqring_reap(td, events, max);
473 if (!o->sqpoll_thread) {
474 r = io_uring_enter(ld, 0, actual_min,
475 IORING_ENTER_GETEVENTS);
477 if (errno == EAGAIN || errno == EINTR)
480 td_verror(td, errno, "io_uring_enter");
484 } while (events < min);
486 return r < 0 ? r : events;
489 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
492 struct ioring_data *ld = td->io_ops_data;
493 struct cmdprio *cmdprio = &ld->cmdprio;
495 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
496 ld->sqes[io_u->index].ioprio = io_u->ioprio;
499 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
502 struct ioring_data *ld = td->io_ops_data;
503 struct io_sq_ring *ring = &ld->sq_ring;
504 unsigned tail, next_tail;
506 fio_ro_check(td, io_u);
508 if (ld->queued == ld->iodepth)
511 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
515 do_io_u_trim(td, io_u);
517 io_u_mark_submit(td, 1);
518 io_u_mark_complete(td, 1);
519 return FIO_Q_COMPLETED;
523 next_tail = tail + 1;
524 if (next_tail == atomic_load_relaxed(ring->head))
527 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
528 fio_ioring_cmdprio_prep(td, io_u);
530 ring->array[tail & ld->sq_ring_mask] = io_u->index;
531 atomic_store_release(ring->tail, next_tail);
537 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
539 struct ioring_data *ld = td->io_ops_data;
542 if (!fio_fill_issue_time(td))
545 fio_gettime(&now, NULL);
548 struct io_sq_ring *ring = &ld->sq_ring;
549 int index = ring->array[start & ld->sq_ring_mask];
550 struct io_u *io_u = ld->io_u_index[index];
552 memcpy(&io_u->issue_time, &now, sizeof(now));
553 io_u_queued(td, io_u);
559 * only used for iolog
561 if (td->o.read_iolog_file)
562 memcpy(&td->last_issue, &now, sizeof(now));
565 static int fio_ioring_commit(struct thread_data *td)
567 struct ioring_data *ld = td->io_ops_data;
568 struct ioring_options *o = td->eo;
575 * Kernel side does submission. just need to check if the ring is
576 * flagged as needing a kick, if so, call io_uring_enter(). This
577 * only happens if we've been idle too long.
579 if (o->sqpoll_thread) {
580 struct io_sq_ring *ring = &ld->sq_ring;
581 unsigned start = *ld->sq_ring.tail - ld->queued;
584 flags = atomic_load_relaxed(ring->flags);
585 if (flags & IORING_SQ_NEED_WAKEUP)
586 io_uring_enter(ld, ld->queued, 0,
587 IORING_ENTER_SQ_WAKEUP);
588 fio_ioring_queued(td, start, ld->queued);
589 io_u_mark_submit(td, ld->queued);
596 unsigned start = *ld->sq_ring.head;
597 long nr = ld->queued;
599 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
601 fio_ioring_queued(td, start, ret);
602 io_u_mark_submit(td, ret);
607 io_u_mark_submit(td, ret);
610 if (errno == EAGAIN || errno == EINTR) {
611 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
614 /* Shouldn't happen */
619 td_verror(td, errno, "io_uring_enter submit");
622 } while (ld->queued);
627 static void fio_ioring_unmap(struct ioring_data *ld)
631 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
632 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
636 static void fio_ioring_cleanup(struct thread_data *td)
638 struct ioring_data *ld = td->io_ops_data;
641 if (!(td->flags & TD_F_CHILD))
642 fio_ioring_unmap(ld);
644 fio_cmdprio_cleanup(&ld->cmdprio);
645 free(ld->io_u_index);
654 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
656 struct io_sq_ring *sring = &ld->sq_ring;
657 struct io_cq_ring *cring = &ld->cq_ring;
660 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
661 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
662 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
664 ld->mmap[0].ptr = ptr;
665 sring->head = ptr + p->sq_off.head;
666 sring->tail = ptr + p->sq_off.tail;
667 sring->ring_mask = ptr + p->sq_off.ring_mask;
668 sring->ring_entries = ptr + p->sq_off.ring_entries;
669 sring->flags = ptr + p->sq_off.flags;
670 sring->array = ptr + p->sq_off.array;
671 ld->sq_ring_mask = *sring->ring_mask;
673 if (p->flags & IORING_SETUP_SQE128)
674 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
676 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
677 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
678 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
680 ld->mmap[1].ptr = ld->sqes;
682 if (p->flags & IORING_SETUP_CQE32) {
683 ld->mmap[2].len = p->cq_off.cqes +
684 2 * p->cq_entries * sizeof(struct io_uring_cqe);
686 ld->mmap[2].len = p->cq_off.cqes +
687 p->cq_entries * sizeof(struct io_uring_cqe);
689 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
690 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
692 ld->mmap[2].ptr = ptr;
693 cring->head = ptr + p->cq_off.head;
694 cring->tail = ptr + p->cq_off.tail;
695 cring->ring_mask = ptr + p->cq_off.ring_mask;
696 cring->ring_entries = ptr + p->cq_off.ring_entries;
697 cring->cqes = ptr + p->cq_off.cqes;
698 ld->cq_ring_mask = *cring->ring_mask;
702 static void fio_ioring_probe(struct thread_data *td)
704 struct ioring_data *ld = td->io_ops_data;
705 struct ioring_options *o = td->eo;
706 struct io_uring_probe *p;
709 /* already set by user, don't touch */
710 if (o->nonvectored != -1)
713 /* default to off, as that's always safe */
716 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
720 ret = syscall(__NR_io_uring_register, ld->ring_fd,
721 IORING_REGISTER_PROBE, p, 256);
725 if (IORING_OP_WRITE > p->ops_len)
728 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
729 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
735 static int fio_ioring_queue_init(struct thread_data *td)
737 struct ioring_data *ld = td->io_ops_data;
738 struct ioring_options *o = td->eo;
739 int depth = td->o.iodepth;
740 struct io_uring_params p;
743 memset(&p, 0, sizeof(p));
746 p.flags |= IORING_SETUP_IOPOLL;
747 if (o->sqpoll_thread) {
748 p.flags |= IORING_SETUP_SQPOLL;
750 p.flags |= IORING_SETUP_SQ_AFF;
751 p.sq_thread_cpu = o->sqpoll_cpu;
755 * Submission latency for sqpoll_thread is just the time it
756 * takes to fill in the SQ ring entries, and any syscall if
757 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
760 td->o.disable_slat = 1;
764 * Clamp CQ ring size at our SQ ring size, we don't need more entries
767 p.flags |= IORING_SETUP_CQSIZE;
768 p.cq_entries = depth;
771 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
772 * completing IO operations.
774 p.flags |= IORING_SETUP_COOP_TASKRUN;
777 * io_uring is always a single issuer, and we can defer task_work
778 * runs until we reap events.
780 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
783 ret = syscall(__NR_io_uring_setup, depth, &p);
785 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
786 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
787 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
790 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
791 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
794 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
795 p.flags &= ~IORING_SETUP_CQSIZE;
803 fio_ioring_probe(td);
806 ret = syscall(__NR_io_uring_register, ld->ring_fd,
807 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
812 return fio_ioring_mmap(ld, &p);
815 static int fio_ioring_cmd_queue_init(struct thread_data *td)
817 struct ioring_data *ld = td->io_ops_data;
818 struct ioring_options *o = td->eo;
819 int depth = td->o.iodepth;
820 struct io_uring_params p;
823 memset(&p, 0, sizeof(p));
826 p.flags |= IORING_SETUP_IOPOLL;
827 if (o->sqpoll_thread) {
828 p.flags |= IORING_SETUP_SQPOLL;
830 p.flags |= IORING_SETUP_SQ_AFF;
831 p.sq_thread_cpu = o->sqpoll_cpu;
835 * Submission latency for sqpoll_thread is just the time it
836 * takes to fill in the SQ ring entries, and any syscall if
837 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
840 td->o.disable_slat = 1;
842 if (o->cmd_type == FIO_URING_CMD_NVME) {
843 p.flags |= IORING_SETUP_SQE128;
844 p.flags |= IORING_SETUP_CQE32;
848 * Clamp CQ ring size at our SQ ring size, we don't need more entries
851 p.flags |= IORING_SETUP_CQSIZE;
852 p.cq_entries = depth;
855 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
856 * completing IO operations.
858 p.flags |= IORING_SETUP_COOP_TASKRUN;
861 * io_uring is always a single issuer, and we can defer task_work
862 * runs until we reap events.
864 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
867 ret = syscall(__NR_io_uring_setup, depth, &p);
869 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
870 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
871 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
874 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
875 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
878 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
879 p.flags &= ~IORING_SETUP_CQSIZE;
887 fio_ioring_probe(td);
890 ret = syscall(__NR_io_uring_register, ld->ring_fd,
891 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
896 return fio_ioring_mmap(ld, &p);
899 static int fio_ioring_register_files(struct thread_data *td)
901 struct ioring_data *ld = td->io_ops_data;
906 ld->fds = calloc(td->o.nr_files, sizeof(int));
908 for_each_file(td, f, i) {
909 ret = generic_open_file(td, f);
916 ret = syscall(__NR_io_uring_register, ld->ring_fd,
917 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
925 * Pretend the file is closed again, and really close it if we hit
928 for_each_file(td, f, i) {
931 ret2 = generic_close_file(td, f);
939 static int fio_ioring_post_init(struct thread_data *td)
941 struct ioring_data *ld = td->io_ops_data;
942 struct ioring_options *o = td->eo;
946 for (i = 0; i < td->o.iodepth; i++) {
947 struct iovec *iov = &ld->iovecs[i];
949 io_u = ld->io_u_index[i];
950 iov->iov_base = io_u->buf;
951 iov->iov_len = td_max_bs(td);
954 err = fio_ioring_queue_init(td);
956 int init_err = errno;
958 if (init_err == ENOSYS)
959 log_err("fio: your kernel doesn't support io_uring\n");
960 td_verror(td, init_err, "io_queue_init");
964 for (i = 0; i < td->o.iodepth; i++) {
965 struct io_uring_sqe *sqe;
968 memset(sqe, 0, sizeof(*sqe));
971 if (o->registerfiles) {
972 err = fio_ioring_register_files(td);
974 td_verror(td, errno, "ioring_register_files");
982 static int fio_ioring_cmd_post_init(struct thread_data *td)
984 struct ioring_data *ld = td->io_ops_data;
985 struct ioring_options *o = td->eo;
989 for (i = 0; i < td->o.iodepth; i++) {
990 struct iovec *iov = &ld->iovecs[i];
992 io_u = ld->io_u_index[i];
993 iov->iov_base = io_u->buf;
994 iov->iov_len = td_max_bs(td);
997 err = fio_ioring_cmd_queue_init(td);
999 int init_err = errno;
1001 td_verror(td, init_err, "io_queue_init");
1005 for (i = 0; i < td->o.iodepth; i++) {
1006 struct io_uring_sqe *sqe;
1008 if (o->cmd_type == FIO_URING_CMD_NVME) {
1009 sqe = &ld->sqes[i << 1];
1010 memset(sqe, 0, 2 * sizeof(*sqe));
1013 memset(sqe, 0, sizeof(*sqe));
1017 if (o->registerfiles) {
1018 err = fio_ioring_register_files(td);
1020 td_verror(td, errno, "ioring_register_files");
1028 static int fio_ioring_init(struct thread_data *td)
1030 struct ioring_options *o = td->eo;
1031 struct ioring_data *ld;
1032 unsigned long long md_size;
1035 /* sqthread submission requires registered files */
1036 if (o->sqpoll_thread)
1037 o->registerfiles = 1;
1039 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1040 log_err("fio: io_uring registered files require nr_files to "
1041 "be identical to open_files\n");
1045 ld = calloc(1, sizeof(*ld));
1047 /* ring depth must be a power-of-2 */
1048 ld->iodepth = td->o.iodepth;
1049 td->o.iodepth = roundup_pow2(td->o.iodepth);
1052 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1055 * metadata buffer for nvme command.
1056 * We are only supporting iomem=malloc / mem=malloc as of now.
1058 if (!strcmp(td->io_ops->name, "io_uring_cmd") &&
1059 (o->cmd_type == FIO_URING_CMD_NVME) && o->md_per_io_size) {
1060 md_size = (unsigned long long) o->md_per_io_size
1061 * (unsigned long long) td->o.iodepth;
1062 md_size += page_mask + td->o.mem_align;
1063 if (td->o.mem_align && td->o.mem_align > page_size)
1064 md_size += td->o.mem_align - page_size;
1065 if (td->o.mem_type == MEM_MALLOC) {
1066 ld->md_buf = malloc(md_size);
1070 log_err("fio: Only iomem=malloc or mem=malloc is supported\n");
1075 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1077 td->io_ops_data = ld;
1079 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1081 td_verror(td, EINVAL, "fio_ioring_init");
1086 * For io_uring_cmd, trims are async operations unless we are operating
1087 * in zbd mode where trim means zone reset.
1089 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1090 td->o.zone_mode == ZONE_MODE_ZBD)
1091 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1093 ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
1098 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1100 struct ioring_data *ld = td->io_ops_data;
1101 struct ioring_options *o = td->eo;
1104 ld->io_u_index[io_u->index] = io_u;
1106 if (!strcmp(td->io_ops->name, "io_uring_cmd")) {
1107 p = PTR_ALIGN(ld->md_buf, page_mask) + td->o.mem_align;
1108 p += o->md_per_io_size * io_u->index;
1109 io_u->mmap_data = p;
1115 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1117 struct ioring_data *ld = td->io_ops_data;
1118 struct ioring_options *o = td->eo;
1120 if (!ld || !o->registerfiles)
1121 return generic_open_file(td, f);
1123 f->fd = ld->fds[f->engine_pos];
1127 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1129 struct ioring_data *ld = td->io_ops_data;
1130 struct ioring_options *o = td->eo;
1132 if (o->cmd_type == FIO_URING_CMD_NVME) {
1133 struct nvme_data *data = NULL;
1134 unsigned int lba_size = 0;
1138 /* Store the namespace-id and lba size. */
1139 data = FILE_ENG_DATA(f);
1141 data = calloc(1, sizeof(struct nvme_data));
1142 ret = fio_nvme_get_info(f, &nlba, data);
1148 FILE_SET_ENG_DATA(f, data);
1151 lba_size = data->lba_ext ? data->lba_ext : data->lba_size;
1153 for_each_rw_ddir(ddir) {
1154 if (td->o.min_bs[ddir] % lba_size ||
1155 td->o.max_bs[ddir] % lba_size) {
1157 log_err("%s: block size must be a multiple of (LBA data size + Metadata size)\n",
1160 log_err("%s: block size must be a multiple of LBA data size\n",
1162 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1165 if (data->ms && !data->lba_ext && ddir != DDIR_TRIM &&
1166 (o->md_per_io_size < ((td->o.max_bs[ddir] / data->lba_size) *
1168 log_err("%s: md_per_io_size should be at least %llu bytes\n",
1170 ((td->o.max_bs[ddir] / data->lba_size) * data->ms));
1171 td_verror(td, EINVAL, "fio_ioring_cmd_open_file");
1176 if (!ld || !o->registerfiles)
1177 return generic_open_file(td, f);
1179 f->fd = ld->fds[f->engine_pos];
1183 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1185 struct ioring_data *ld = td->io_ops_data;
1186 struct ioring_options *o = td->eo;
1188 if (!ld || !o->registerfiles)
1189 return generic_close_file(td, f);
1195 static int fio_ioring_cmd_close_file(struct thread_data *td,
1198 struct ioring_data *ld = td->io_ops_data;
1199 struct ioring_options *o = td->eo;
1201 if (o->cmd_type == FIO_URING_CMD_NVME) {
1202 struct nvme_data *data = FILE_ENG_DATA(f);
1204 FILE_SET_ENG_DATA(f, NULL);
1207 if (!ld || !o->registerfiles)
1208 return generic_close_file(td, f);
1214 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1217 struct ioring_options *o = td->eo;
1219 if (fio_file_size_known(f))
1222 if (o->cmd_type == FIO_URING_CMD_NVME) {
1223 struct nvme_data *data = NULL;
1227 data = calloc(1, sizeof(struct nvme_data));
1228 ret = fio_nvme_get_info(f, &nlba, data);
1234 f->real_file_size = data->lba_size * nlba;
1235 fio_file_set_size_known(f);
1237 FILE_SET_ENG_DATA(f, data);
1240 return generic_get_file_size(td, f);
1243 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1245 enum zbd_zoned_model *model)
1247 return fio_nvme_get_zoned_model(td, f, model);
1250 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1251 struct fio_file *f, uint64_t offset,
1252 struct zbd_zone *zbdz,
1253 unsigned int nr_zones)
1255 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1258 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1259 uint64_t offset, uint64_t length)
1261 return fio_nvme_reset_wp(td, f, offset, length);
1264 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1266 unsigned int *max_open_zones)
1268 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1271 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1272 struct fio_ruhs_info *fruhs_info)
1274 struct nvme_fdp_ruh_status *ruhs;
1277 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1278 ruhs = scalloc(1, bytes);
1282 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1286 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1287 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1288 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1294 static struct ioengine_ops ioengine_uring = {
1296 .version = FIO_IOOPS_VERSION,
1297 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1298 FIO_ASYNCIO_SETS_ISSUE_TIME,
1299 .init = fio_ioring_init,
1300 .post_init = fio_ioring_post_init,
1301 .io_u_init = fio_ioring_io_u_init,
1302 .prep = fio_ioring_prep,
1303 .queue = fio_ioring_queue,
1304 .commit = fio_ioring_commit,
1305 .getevents = fio_ioring_getevents,
1306 .event = fio_ioring_event,
1307 .cleanup = fio_ioring_cleanup,
1308 .open_file = fio_ioring_open_file,
1309 .close_file = fio_ioring_close_file,
1310 .get_file_size = generic_get_file_size,
1312 .option_struct_size = sizeof(struct ioring_options),
1315 static struct ioengine_ops ioengine_uring_cmd = {
1316 .name = "io_uring_cmd",
1317 .version = FIO_IOOPS_VERSION,
1318 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1319 FIO_ASYNCIO_SETS_ISSUE_TIME,
1320 .init = fio_ioring_init,
1321 .post_init = fio_ioring_cmd_post_init,
1322 .io_u_init = fio_ioring_io_u_init,
1323 .prep = fio_ioring_cmd_prep,
1324 .queue = fio_ioring_queue,
1325 .commit = fio_ioring_commit,
1326 .getevents = fio_ioring_getevents,
1327 .event = fio_ioring_cmd_event,
1328 .cleanup = fio_ioring_cleanup,
1329 .open_file = fio_ioring_cmd_open_file,
1330 .close_file = fio_ioring_cmd_close_file,
1331 .get_file_size = fio_ioring_cmd_get_file_size,
1332 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1333 .report_zones = fio_ioring_cmd_report_zones,
1334 .reset_wp = fio_ioring_cmd_reset_wp,
1335 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1337 .option_struct_size = sizeof(struct ioring_options),
1338 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1341 static void fio_init fio_ioring_register(void)
1343 register_ioengine(&ioengine_uring);
1344 register_ioengine(&ioengine_uring_cmd);
1347 static void fio_exit fio_ioring_unregister(void)
1349 unregister_ioengine(&ioengine_uring);
1350 unregister_ioengine(&ioengine_uring_cmd);