4 * IO engine using the new native Linux aio io_uring interface. See:
6 * http://git.kernel.dk/cgit/linux-block/log/?h=io_uring
13 #include <sys/resource.h>
16 #include "../lib/pow2.h"
17 #include "../optgroup.h"
18 #include "../lib/memalign.h"
19 #include "../lib/fls.h"
20 #include "../lib/roundup.h"
22 #ifdef ARCH_HAVE_IOURING
24 #include "../lib/types.h"
25 #include "../os/linux/io_uring.h"
33 FIO_URING_CMD_NVME = 1,
40 unsigned *ring_entries;
49 unsigned *ring_entries;
50 struct io_uring_cqe *cqes;
61 struct io_u **io_u_index;
65 struct io_sq_ring sq_ring;
66 struct io_uring_sqe *sqes;
68 unsigned sq_ring_mask;
70 struct io_cq_ring cq_ring;
71 unsigned cq_ring_mask;
78 struct ioring_mmap mmap[3];
80 struct cmdprio cmdprio;
82 struct nvme_dsm_range *dsm;
85 struct ioring_options {
86 struct thread_data *td;
88 struct cmdprio_options cmdprio_options;
89 unsigned int fixedbufs;
90 unsigned int registerfiles;
91 unsigned int sqpoll_thread;
92 unsigned int sqpoll_set;
93 unsigned int sqpoll_cpu;
94 unsigned int nonvectored;
95 unsigned int uncached;
97 unsigned int force_async;
98 enum uring_cmd_type cmd_type;
101 static const int ddir_to_op[2][2] = {
102 { IORING_OP_READV, IORING_OP_READ },
103 { IORING_OP_WRITEV, IORING_OP_WRITE }
106 static const int fixed_ddir_to_op[2] = {
107 IORING_OP_READ_FIXED,
108 IORING_OP_WRITE_FIXED
111 static int fio_ioring_sqpoll_cb(void *data, unsigned long long *val)
113 struct ioring_options *o = data;
115 o->sqpoll_cpu = *val;
120 static struct fio_option options[] = {
123 .lname = "High Priority",
124 .type = FIO_OPT_STR_SET,
125 .off1 = offsetof(struct ioring_options, hipri),
126 .help = "Use polled IO completions",
127 .category = FIO_OPT_C_ENGINE,
128 .group = FIO_OPT_G_IOURING,
130 #ifdef FIO_HAVE_IOPRIO_CLASS
132 .name = "cmdprio_percentage",
133 .lname = "high priority percentage",
135 .off1 = offsetof(struct ioring_options,
136 cmdprio_options.percentage[DDIR_READ]),
137 .off2 = offsetof(struct ioring_options,
138 cmdprio_options.percentage[DDIR_WRITE]),
141 .help = "Send high priority I/O this percentage of the time",
142 .category = FIO_OPT_C_ENGINE,
143 .group = FIO_OPT_G_IOURING,
146 .name = "cmdprio_class",
147 .lname = "Asynchronous I/O priority class",
149 .off1 = offsetof(struct ioring_options,
150 cmdprio_options.class[DDIR_READ]),
151 .off2 = offsetof(struct ioring_options,
152 cmdprio_options.class[DDIR_WRITE]),
153 .help = "Set asynchronous IO priority class",
154 .minval = IOPRIO_MIN_PRIO_CLASS + 1,
155 .maxval = IOPRIO_MAX_PRIO_CLASS,
157 .category = FIO_OPT_C_ENGINE,
158 .group = FIO_OPT_G_IOURING,
162 .lname = "Asynchronous I/O priority level",
164 .off1 = offsetof(struct ioring_options,
165 cmdprio_options.level[DDIR_READ]),
166 .off2 = offsetof(struct ioring_options,
167 cmdprio_options.level[DDIR_WRITE]),
168 .help = "Set asynchronous IO priority level",
169 .minval = IOPRIO_MIN_PRIO,
170 .maxval = IOPRIO_MAX_PRIO,
172 .category = FIO_OPT_C_ENGINE,
173 .group = FIO_OPT_G_IOURING,
176 .name = "cmdprio_bssplit",
177 .lname = "Priority percentage block size split",
178 .type = FIO_OPT_STR_STORE,
179 .off1 = offsetof(struct ioring_options,
180 cmdprio_options.bssplit_str),
181 .help = "Set priority percentages for different block sizes",
182 .category = FIO_OPT_C_ENGINE,
183 .group = FIO_OPT_G_IOURING,
187 .name = "cmdprio_percentage",
188 .lname = "high priority percentage",
189 .type = FIO_OPT_UNSUPPORTED,
190 .help = "Your platform does not support I/O priority classes",
193 .name = "cmdprio_class",
194 .lname = "Asynchronous I/O priority class",
195 .type = FIO_OPT_UNSUPPORTED,
196 .help = "Your platform does not support I/O priority classes",
200 .lname = "Asynchronous I/O priority level",
201 .type = FIO_OPT_UNSUPPORTED,
202 .help = "Your platform does not support I/O priority classes",
205 .name = "cmdprio_bssplit",
206 .lname = "Priority percentage block size split",
207 .type = FIO_OPT_UNSUPPORTED,
208 .help = "Your platform does not support I/O priority classes",
213 .lname = "Fixed (pre-mapped) IO buffers",
214 .type = FIO_OPT_STR_SET,
215 .off1 = offsetof(struct ioring_options, fixedbufs),
216 .help = "Pre map IO buffers",
217 .category = FIO_OPT_C_ENGINE,
218 .group = FIO_OPT_G_IOURING,
221 .name = "registerfiles",
222 .lname = "Register file set",
223 .type = FIO_OPT_STR_SET,
224 .off1 = offsetof(struct ioring_options, registerfiles),
225 .help = "Pre-open/register files",
226 .category = FIO_OPT_C_ENGINE,
227 .group = FIO_OPT_G_IOURING,
230 .name = "sqthread_poll",
231 .lname = "Kernel SQ thread polling",
232 .type = FIO_OPT_STR_SET,
233 .off1 = offsetof(struct ioring_options, sqpoll_thread),
234 .help = "Offload submission/completion to kernel thread",
235 .category = FIO_OPT_C_ENGINE,
236 .group = FIO_OPT_G_IOURING,
239 .name = "sqthread_poll_cpu",
240 .lname = "SQ Thread Poll CPU",
242 .cb = fio_ioring_sqpoll_cb,
243 .help = "What CPU to run SQ thread polling on",
244 .category = FIO_OPT_C_ENGINE,
245 .group = FIO_OPT_G_IOURING,
248 .name = "nonvectored",
249 .lname = "Non-vectored",
251 .off1 = offsetof(struct ioring_options, nonvectored),
253 .help = "Use non-vectored read/write commands",
254 .category = FIO_OPT_C_ENGINE,
255 .group = FIO_OPT_G_IOURING,
261 .off1 = offsetof(struct ioring_options, uncached),
262 .help = "Use RWF_UNCACHED for buffered read/writes",
263 .category = FIO_OPT_C_ENGINE,
264 .group = FIO_OPT_G_IOURING,
268 .lname = "RWF_NOWAIT",
269 .type = FIO_OPT_BOOL,
270 .off1 = offsetof(struct ioring_options, nowait),
271 .help = "Use RWF_NOWAIT for reads/writes",
272 .category = FIO_OPT_C_ENGINE,
273 .group = FIO_OPT_G_IOURING,
276 .name = "force_async",
277 .lname = "Force async",
279 .off1 = offsetof(struct ioring_options, force_async),
280 .help = "Set IOSQE_ASYNC every N requests",
281 .category = FIO_OPT_C_ENGINE,
282 .group = FIO_OPT_G_IOURING,
286 .lname = "Uring cmd type",
288 .off1 = offsetof(struct ioring_options, cmd_type),
289 .help = "Specify uring-cmd type",
293 .oval = FIO_URING_CMD_NVME,
294 .help = "Issue nvme-uring-cmd",
297 .category = FIO_OPT_C_ENGINE,
298 .group = FIO_OPT_G_IOURING,
305 static int io_uring_enter(struct ioring_data *ld, unsigned int to_submit,
306 unsigned int min_complete, unsigned int flags)
308 #ifdef FIO_ARCH_HAS_SYSCALL
309 return __do_syscall6(__NR_io_uring_enter, ld->ring_fd, to_submit,
310 min_complete, flags, NULL, 0);
312 return syscall(__NR_io_uring_enter, ld->ring_fd, to_submit,
313 min_complete, flags, NULL, 0);
317 static int fio_ioring_prep(struct thread_data *td, struct io_u *io_u)
319 struct ioring_data *ld = td->io_ops_data;
320 struct ioring_options *o = td->eo;
321 struct fio_file *f = io_u->file;
322 struct io_uring_sqe *sqe;
324 sqe = &ld->sqes[io_u->index];
326 if (o->registerfiles) {
327 sqe->fd = f->engine_pos;
328 sqe->flags = IOSQE_FIXED_FILE;
334 if (io_u->ddir == DDIR_READ || io_u->ddir == DDIR_WRITE) {
336 sqe->opcode = fixed_ddir_to_op[io_u->ddir];
337 sqe->addr = (unsigned long) io_u->xfer_buf;
338 sqe->len = io_u->xfer_buflen;
339 sqe->buf_index = io_u->index;
341 struct iovec *iov = &ld->iovecs[io_u->index];
344 * Update based on actual io_u, requeue could have
347 iov->iov_base = io_u->xfer_buf;
348 iov->iov_len = io_u->xfer_buflen;
350 sqe->opcode = ddir_to_op[io_u->ddir][!!o->nonvectored];
351 if (o->nonvectored) {
352 sqe->addr = (unsigned long) iov->iov_base;
353 sqe->len = iov->iov_len;
355 sqe->addr = (unsigned long) iov;
360 if (!td->o.odirect && o->uncached)
361 sqe->rw_flags |= RWF_UNCACHED;
363 sqe->rw_flags |= RWF_NOWAIT;
366 * Since io_uring can have a submission context (sqthread_poll)
367 * that is different from the process context, we cannot rely on
368 * the IO priority set by ioprio_set() (option prio/prioclass)
370 * td->ioprio will have the value of the "default prio", so set
371 * this unconditionally. This value might get overridden by
372 * fio_ioring_cmdprio_prep() if the option cmdprio_percentage or
373 * cmdprio_bssplit is used.
375 sqe->ioprio = td->ioprio;
376 sqe->off = io_u->offset;
377 } else if (ddir_sync(io_u->ddir)) {
379 if (io_u->ddir == DDIR_SYNC_FILE_RANGE) {
380 sqe->off = f->first_write;
381 sqe->len = f->last_write - f->first_write;
382 sqe->sync_range_flags = td->o.sync_file_range;
383 sqe->opcode = IORING_OP_SYNC_FILE_RANGE;
388 if (io_u->ddir == DDIR_DATASYNC)
389 sqe->fsync_flags |= IORING_FSYNC_DATASYNC;
390 sqe->opcode = IORING_OP_FSYNC;
394 if (o->force_async && ++ld->prepped == o->force_async) {
396 sqe->flags |= IOSQE_ASYNC;
399 sqe->user_data = (unsigned long) io_u;
403 static int fio_ioring_cmd_prep(struct thread_data *td, struct io_u *io_u)
405 struct ioring_data *ld = td->io_ops_data;
406 struct ioring_options *o = td->eo;
407 struct fio_file *f = io_u->file;
408 struct nvme_uring_cmd *cmd;
409 struct io_uring_sqe *sqe;
411 /* only supports nvme_uring_cmd */
412 if (o->cmd_type != FIO_URING_CMD_NVME)
415 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM)
418 sqe = &ld->sqes[(io_u->index) << 1];
420 if (o->registerfiles) {
421 sqe->fd = f->engine_pos;
422 sqe->flags = IOSQE_FIXED_FILE;
427 if (!td->o.odirect && o->uncached)
428 sqe->rw_flags |= RWF_UNCACHED;
430 sqe->rw_flags |= RWF_NOWAIT;
432 sqe->opcode = IORING_OP_URING_CMD;
433 sqe->user_data = (unsigned long) io_u;
435 sqe->cmd_op = NVME_URING_CMD_IO;
437 sqe->cmd_op = NVME_URING_CMD_IO_VEC;
438 if (o->force_async && ++ld->prepped == o->force_async) {
440 sqe->flags |= IOSQE_ASYNC;
443 sqe->uring_cmd_flags = IORING_URING_CMD_FIXED;
444 sqe->buf_index = io_u->index;
447 cmd = (struct nvme_uring_cmd *)sqe->cmd;
448 return fio_nvme_uring_cmd_prep(cmd, io_u,
449 o->nonvectored ? NULL : &ld->iovecs[io_u->index],
450 &ld->dsm[io_u->index]);
453 static struct io_u *fio_ioring_event(struct thread_data *td, int event)
455 struct ioring_data *ld = td->io_ops_data;
456 struct io_uring_cqe *cqe;
460 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
462 cqe = &ld->cq_ring.cqes[index];
463 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
465 if (cqe->res != io_u->xfer_buflen) {
466 if (cqe->res > io_u->xfer_buflen)
467 io_u->error = -cqe->res;
469 io_u->resid = io_u->xfer_buflen - cqe->res;
476 static struct io_u *fio_ioring_cmd_event(struct thread_data *td, int event)
478 struct ioring_data *ld = td->io_ops_data;
479 struct ioring_options *o = td->eo;
480 struct io_uring_cqe *cqe;
484 index = (event + ld->cq_ring_off) & ld->cq_ring_mask;
485 if (o->cmd_type == FIO_URING_CMD_NVME)
488 cqe = &ld->cq_ring.cqes[index];
489 io_u = (struct io_u *) (uintptr_t) cqe->user_data;
492 io_u->error = -cqe->res;
499 static int fio_ioring_cqring_reap(struct thread_data *td, unsigned int events,
502 struct ioring_data *ld = td->io_ops_data;
503 struct io_cq_ring *ring = &ld->cq_ring;
504 unsigned head, reaped = 0;
508 if (head == atomic_load_acquire(ring->tail))
512 } while (reaped + events < max);
515 atomic_store_release(ring->head, head);
520 static int fio_ioring_getevents(struct thread_data *td, unsigned int min,
521 unsigned int max, const struct timespec *t)
523 struct ioring_data *ld = td->io_ops_data;
524 unsigned actual_min = td->o.iodepth_batch_complete_min == 0 ? 0 : min;
525 struct ioring_options *o = td->eo;
526 struct io_cq_ring *ring = &ld->cq_ring;
530 ld->cq_ring_off = *ring->head;
532 r = fio_ioring_cqring_reap(td, events, max);
541 if (!o->sqpoll_thread) {
542 r = io_uring_enter(ld, 0, actual_min,
543 IORING_ENTER_GETEVENTS);
545 if (errno == EAGAIN || errno == EINTR)
548 td_verror(td, errno, "io_uring_enter");
552 } while (events < min);
554 return r < 0 ? r : events;
557 static inline void fio_ioring_cmdprio_prep(struct thread_data *td,
560 struct ioring_data *ld = td->io_ops_data;
561 struct cmdprio *cmdprio = &ld->cmdprio;
563 if (fio_cmdprio_set_ioprio(td, cmdprio, io_u))
564 ld->sqes[io_u->index].ioprio = io_u->ioprio;
567 static enum fio_q_status fio_ioring_queue(struct thread_data *td,
570 struct ioring_data *ld = td->io_ops_data;
571 struct io_sq_ring *ring = &ld->sq_ring;
572 unsigned tail, next_tail;
574 fio_ro_check(td, io_u);
576 if (ld->queued == ld->iodepth)
579 if (io_u->ddir == DDIR_TRIM && td->io_ops->flags & FIO_ASYNCIO_SYNC_TRIM) {
583 do_io_u_trim(td, io_u);
585 io_u_mark_submit(td, 1);
586 io_u_mark_complete(td, 1);
587 return FIO_Q_COMPLETED;
591 next_tail = tail + 1;
592 if (next_tail == atomic_load_acquire(ring->head))
595 if (ld->cmdprio.mode != CMDPRIO_MODE_NONE)
596 fio_ioring_cmdprio_prep(td, io_u);
598 ring->array[tail & ld->sq_ring_mask] = io_u->index;
599 atomic_store_release(ring->tail, next_tail);
605 static void fio_ioring_queued(struct thread_data *td, int start, int nr)
607 struct ioring_data *ld = td->io_ops_data;
610 if (!fio_fill_issue_time(td))
613 fio_gettime(&now, NULL);
616 struct io_sq_ring *ring = &ld->sq_ring;
617 int index = ring->array[start & ld->sq_ring_mask];
618 struct io_u *io_u = ld->io_u_index[index];
620 memcpy(&io_u->issue_time, &now, sizeof(now));
621 io_u_queued(td, io_u);
627 * only used for iolog
629 if (td->o.read_iolog_file)
630 memcpy(&td->last_issue, &now, sizeof(now));
633 static int fio_ioring_commit(struct thread_data *td)
635 struct ioring_data *ld = td->io_ops_data;
636 struct ioring_options *o = td->eo;
643 * Kernel side does submission. just need to check if the ring is
644 * flagged as needing a kick, if so, call io_uring_enter(). This
645 * only happens if we've been idle too long.
647 if (o->sqpoll_thread) {
648 struct io_sq_ring *ring = &ld->sq_ring;
649 unsigned start = *ld->sq_ring.head;
652 flags = atomic_load_acquire(ring->flags);
653 if (flags & IORING_SQ_NEED_WAKEUP)
654 io_uring_enter(ld, ld->queued, 0,
655 IORING_ENTER_SQ_WAKEUP);
656 fio_ioring_queued(td, start, ld->queued);
657 io_u_mark_submit(td, ld->queued);
664 unsigned start = *ld->sq_ring.head;
665 long nr = ld->queued;
667 ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS);
669 fio_ioring_queued(td, start, ret);
670 io_u_mark_submit(td, ret);
675 io_u_mark_submit(td, ret);
678 if (errno == EAGAIN || errno == EINTR) {
679 ret = fio_ioring_cqring_reap(td, 0, ld->queued);
682 /* Shouldn't happen */
687 td_verror(td, errno, "io_uring_enter submit");
690 } while (ld->queued);
695 static void fio_ioring_unmap(struct ioring_data *ld)
699 for (i = 0; i < FIO_ARRAY_SIZE(ld->mmap); i++)
700 munmap(ld->mmap[i].ptr, ld->mmap[i].len);
704 static void fio_ioring_cleanup(struct thread_data *td)
706 struct ioring_data *ld = td->io_ops_data;
709 if (!(td->flags & TD_F_CHILD))
710 fio_ioring_unmap(ld);
712 fio_cmdprio_cleanup(&ld->cmdprio);
713 free(ld->io_u_index);
721 static int fio_ioring_mmap(struct ioring_data *ld, struct io_uring_params *p)
723 struct io_sq_ring *sring = &ld->sq_ring;
724 struct io_cq_ring *cring = &ld->cq_ring;
727 ld->mmap[0].len = p->sq_off.array + p->sq_entries * sizeof(__u32);
728 ptr = mmap(0, ld->mmap[0].len, PROT_READ | PROT_WRITE,
729 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
731 ld->mmap[0].ptr = ptr;
732 sring->head = ptr + p->sq_off.head;
733 sring->tail = ptr + p->sq_off.tail;
734 sring->ring_mask = ptr + p->sq_off.ring_mask;
735 sring->ring_entries = ptr + p->sq_off.ring_entries;
736 sring->flags = ptr + p->sq_off.flags;
737 sring->array = ptr + p->sq_off.array;
738 ld->sq_ring_mask = *sring->ring_mask;
740 if (p->flags & IORING_SETUP_SQE128)
741 ld->mmap[1].len = 2 * p->sq_entries * sizeof(struct io_uring_sqe);
743 ld->mmap[1].len = p->sq_entries * sizeof(struct io_uring_sqe);
744 ld->sqes = mmap(0, ld->mmap[1].len, PROT_READ | PROT_WRITE,
745 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
747 ld->mmap[1].ptr = ld->sqes;
749 if (p->flags & IORING_SETUP_CQE32) {
750 ld->mmap[2].len = p->cq_off.cqes +
751 2 * p->cq_entries * sizeof(struct io_uring_cqe);
753 ld->mmap[2].len = p->cq_off.cqes +
754 p->cq_entries * sizeof(struct io_uring_cqe);
756 ptr = mmap(0, ld->mmap[2].len, PROT_READ | PROT_WRITE,
757 MAP_SHARED | MAP_POPULATE, ld->ring_fd,
759 ld->mmap[2].ptr = ptr;
760 cring->head = ptr + p->cq_off.head;
761 cring->tail = ptr + p->cq_off.tail;
762 cring->ring_mask = ptr + p->cq_off.ring_mask;
763 cring->ring_entries = ptr + p->cq_off.ring_entries;
764 cring->cqes = ptr + p->cq_off.cqes;
765 ld->cq_ring_mask = *cring->ring_mask;
769 static void fio_ioring_probe(struct thread_data *td)
771 struct ioring_data *ld = td->io_ops_data;
772 struct ioring_options *o = td->eo;
773 struct io_uring_probe *p;
776 /* already set by user, don't touch */
777 if (o->nonvectored != -1)
780 /* default to off, as that's always safe */
783 p = calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
787 ret = syscall(__NR_io_uring_register, ld->ring_fd,
788 IORING_REGISTER_PROBE, p, 256);
792 if (IORING_OP_WRITE > p->ops_len)
795 if ((p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED) &&
796 (p->ops[IORING_OP_WRITE].flags & IO_URING_OP_SUPPORTED))
802 static int fio_ioring_queue_init(struct thread_data *td)
804 struct ioring_data *ld = td->io_ops_data;
805 struct ioring_options *o = td->eo;
806 int depth = td->o.iodepth;
807 struct io_uring_params p;
810 memset(&p, 0, sizeof(p));
813 p.flags |= IORING_SETUP_IOPOLL;
814 if (o->sqpoll_thread) {
815 p.flags |= IORING_SETUP_SQPOLL;
817 p.flags |= IORING_SETUP_SQ_AFF;
818 p.sq_thread_cpu = o->sqpoll_cpu;
822 * Submission latency for sqpoll_thread is just the time it
823 * takes to fill in the SQ ring entries, and any syscall if
824 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
827 td->o.disable_slat = 1;
831 * Clamp CQ ring size at our SQ ring size, we don't need more entries
834 p.flags |= IORING_SETUP_CQSIZE;
835 p.cq_entries = depth;
838 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
839 * completing IO operations.
841 p.flags |= IORING_SETUP_COOP_TASKRUN;
844 * io_uring is always a single issuer, and we can defer task_work
845 * runs until we reap events.
847 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
850 ret = syscall(__NR_io_uring_setup, depth, &p);
852 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
853 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
854 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
857 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
858 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
861 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
862 p.flags &= ~IORING_SETUP_CQSIZE;
870 fio_ioring_probe(td);
873 ret = syscall(__NR_io_uring_register, ld->ring_fd,
874 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
879 return fio_ioring_mmap(ld, &p);
882 static int fio_ioring_cmd_queue_init(struct thread_data *td)
884 struct ioring_data *ld = td->io_ops_data;
885 struct ioring_options *o = td->eo;
886 int depth = td->o.iodepth;
887 struct io_uring_params p;
890 memset(&p, 0, sizeof(p));
893 p.flags |= IORING_SETUP_IOPOLL;
894 if (o->sqpoll_thread) {
895 p.flags |= IORING_SETUP_SQPOLL;
897 p.flags |= IORING_SETUP_SQ_AFF;
898 p.sq_thread_cpu = o->sqpoll_cpu;
902 * Submission latency for sqpoll_thread is just the time it
903 * takes to fill in the SQ ring entries, and any syscall if
904 * IORING_SQ_NEED_WAKEUP is set, we don't need to log that time
907 td->o.disable_slat = 1;
909 if (o->cmd_type == FIO_URING_CMD_NVME) {
910 p.flags |= IORING_SETUP_SQE128;
911 p.flags |= IORING_SETUP_CQE32;
915 * Clamp CQ ring size at our SQ ring size, we don't need more entries
918 p.flags |= IORING_SETUP_CQSIZE;
919 p.cq_entries = depth;
922 * Setup COOP_TASKRUN as we don't need to get IPI interrupted for
923 * completing IO operations.
925 p.flags |= IORING_SETUP_COOP_TASKRUN;
928 * io_uring is always a single issuer, and we can defer task_work
929 * runs until we reap events.
931 p.flags |= IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN;
934 ret = syscall(__NR_io_uring_setup, depth, &p);
936 if (errno == EINVAL && p.flags & IORING_SETUP_DEFER_TASKRUN) {
937 p.flags &= ~IORING_SETUP_DEFER_TASKRUN;
938 p.flags &= ~IORING_SETUP_SINGLE_ISSUER;
941 if (errno == EINVAL && p.flags & IORING_SETUP_COOP_TASKRUN) {
942 p.flags &= ~IORING_SETUP_COOP_TASKRUN;
945 if (errno == EINVAL && p.flags & IORING_SETUP_CQSIZE) {
946 p.flags &= ~IORING_SETUP_CQSIZE;
954 fio_ioring_probe(td);
957 ret = syscall(__NR_io_uring_register, ld->ring_fd,
958 IORING_REGISTER_BUFFERS, ld->iovecs, depth);
963 return fio_ioring_mmap(ld, &p);
966 static int fio_ioring_register_files(struct thread_data *td)
968 struct ioring_data *ld = td->io_ops_data;
973 ld->fds = calloc(td->o.nr_files, sizeof(int));
975 for_each_file(td, f, i) {
976 ret = generic_open_file(td, f);
983 ret = syscall(__NR_io_uring_register, ld->ring_fd,
984 IORING_REGISTER_FILES, ld->fds, td->o.nr_files);
992 * Pretend the file is closed again, and really close it if we hit
995 for_each_file(td, f, i) {
998 ret2 = generic_close_file(td, f);
1006 static int fio_ioring_post_init(struct thread_data *td)
1008 struct ioring_data *ld = td->io_ops_data;
1009 struct ioring_options *o = td->eo;
1013 for (i = 0; i < td->o.iodepth; i++) {
1014 struct iovec *iov = &ld->iovecs[i];
1016 io_u = ld->io_u_index[i];
1017 iov->iov_base = io_u->buf;
1018 iov->iov_len = td_max_bs(td);
1021 err = fio_ioring_queue_init(td);
1023 int init_err = errno;
1025 if (init_err == ENOSYS)
1026 log_err("fio: your kernel doesn't support io_uring\n");
1027 td_verror(td, init_err, "io_queue_init");
1031 for (i = 0; i < td->o.iodepth; i++) {
1032 struct io_uring_sqe *sqe;
1035 memset(sqe, 0, sizeof(*sqe));
1038 if (o->registerfiles) {
1039 err = fio_ioring_register_files(td);
1041 td_verror(td, errno, "ioring_register_files");
1049 static int fio_ioring_cmd_post_init(struct thread_data *td)
1051 struct ioring_data *ld = td->io_ops_data;
1052 struct ioring_options *o = td->eo;
1056 for (i = 0; i < td->o.iodepth; i++) {
1057 struct iovec *iov = &ld->iovecs[i];
1059 io_u = ld->io_u_index[i];
1060 iov->iov_base = io_u->buf;
1061 iov->iov_len = td_max_bs(td);
1064 err = fio_ioring_cmd_queue_init(td);
1066 int init_err = errno;
1068 td_verror(td, init_err, "io_queue_init");
1072 for (i = 0; i < td->o.iodepth; i++) {
1073 struct io_uring_sqe *sqe;
1075 if (o->cmd_type == FIO_URING_CMD_NVME) {
1076 sqe = &ld->sqes[i << 1];
1077 memset(sqe, 0, 2 * sizeof(*sqe));
1080 memset(sqe, 0, sizeof(*sqe));
1084 if (o->registerfiles) {
1085 err = fio_ioring_register_files(td);
1087 td_verror(td, errno, "ioring_register_files");
1095 static int fio_ioring_init(struct thread_data *td)
1097 struct ioring_options *o = td->eo;
1098 struct ioring_data *ld;
1101 /* sqthread submission requires registered files */
1102 if (o->sqpoll_thread)
1103 o->registerfiles = 1;
1105 if (o->registerfiles && td->o.nr_files != td->o.open_files) {
1106 log_err("fio: io_uring registered files require nr_files to "
1107 "be identical to open_files\n");
1111 ld = calloc(1, sizeof(*ld));
1113 /* ring depth must be a power-of-2 */
1114 ld->iodepth = td->o.iodepth;
1115 td->o.iodepth = roundup_pow2(td->o.iodepth);
1118 ld->io_u_index = calloc(td->o.iodepth, sizeof(struct io_u *));
1119 ld->iovecs = calloc(td->o.iodepth, sizeof(struct iovec));
1121 td->io_ops_data = ld;
1123 ret = fio_cmdprio_init(td, &ld->cmdprio, &o->cmdprio_options);
1125 td_verror(td, EINVAL, "fio_ioring_init");
1130 * For io_uring_cmd, trims are async operations unless we are operating
1131 * in zbd mode where trim means zone reset.
1133 if (!strcmp(td->io_ops->name, "io_uring_cmd") && td_trim(td) &&
1134 td->o.zone_mode == ZONE_MODE_ZBD)
1135 td->io_ops->flags |= FIO_ASYNCIO_SYNC_TRIM;
1137 ld->dsm = calloc(ld->iodepth, sizeof(*ld->dsm));
1142 static int fio_ioring_io_u_init(struct thread_data *td, struct io_u *io_u)
1144 struct ioring_data *ld = td->io_ops_data;
1146 ld->io_u_index[io_u->index] = io_u;
1150 static int fio_ioring_open_file(struct thread_data *td, struct fio_file *f)
1152 struct ioring_data *ld = td->io_ops_data;
1153 struct ioring_options *o = td->eo;
1155 if (!ld || !o->registerfiles)
1156 return generic_open_file(td, f);
1158 f->fd = ld->fds[f->engine_pos];
1162 static int fio_ioring_cmd_open_file(struct thread_data *td, struct fio_file *f)
1164 struct ioring_data *ld = td->io_ops_data;
1165 struct ioring_options *o = td->eo;
1167 if (o->cmd_type == FIO_URING_CMD_NVME) {
1168 struct nvme_data *data = NULL;
1169 unsigned int nsid, lba_size = 0;
1174 /* Store the namespace-id and lba size. */
1175 data = FILE_ENG_DATA(f);
1177 ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
1181 data = calloc(1, sizeof(struct nvme_data));
1184 data->lba_ext = lba_size + ms;
1186 data->lba_shift = ilog2(lba_size);
1188 FILE_SET_ENG_DATA(f, data);
1191 assert(data->lba_shift < 32);
1192 lba_size = data->lba_ext ? data->lba_ext : (1U << data->lba_shift);
1194 for_each_rw_ddir(ddir) {
1195 if (td->o.min_bs[ddir] % lba_size ||
1196 td->o.max_bs[ddir] % lba_size) {
1198 log_err("block size must be a multiple of "
1199 "(LBA data size + Metadata size)\n");
1201 log_err("block size must be a multiple of LBA data size\n");
1206 if (!ld || !o->registerfiles)
1207 return generic_open_file(td, f);
1209 f->fd = ld->fds[f->engine_pos];
1213 static int fio_ioring_close_file(struct thread_data *td, struct fio_file *f)
1215 struct ioring_data *ld = td->io_ops_data;
1216 struct ioring_options *o = td->eo;
1218 if (!ld || !o->registerfiles)
1219 return generic_close_file(td, f);
1225 static int fio_ioring_cmd_close_file(struct thread_data *td,
1228 struct ioring_data *ld = td->io_ops_data;
1229 struct ioring_options *o = td->eo;
1231 if (o->cmd_type == FIO_URING_CMD_NVME) {
1232 struct nvme_data *data = FILE_ENG_DATA(f);
1234 FILE_SET_ENG_DATA(f, NULL);
1237 if (!ld || !o->registerfiles)
1238 return generic_close_file(td, f);
1244 static int fio_ioring_cmd_get_file_size(struct thread_data *td,
1247 struct ioring_options *o = td->eo;
1249 if (fio_file_size_known(f))
1252 if (o->cmd_type == FIO_URING_CMD_NVME) {
1253 struct nvme_data *data = NULL;
1254 unsigned int nsid, lba_size = 0;
1259 ret = fio_nvme_get_info(f, &nsid, &lba_size, &ms, &nlba);
1263 data = calloc(1, sizeof(struct nvme_data));
1266 data->lba_ext = lba_size + ms;
1268 data->lba_shift = ilog2(lba_size);
1270 f->real_file_size = lba_size * nlba;
1271 fio_file_set_size_known(f);
1273 FILE_SET_ENG_DATA(f, data);
1276 return generic_get_file_size(td, f);
1279 static int fio_ioring_cmd_get_zoned_model(struct thread_data *td,
1281 enum zbd_zoned_model *model)
1283 return fio_nvme_get_zoned_model(td, f, model);
1286 static int fio_ioring_cmd_report_zones(struct thread_data *td,
1287 struct fio_file *f, uint64_t offset,
1288 struct zbd_zone *zbdz,
1289 unsigned int nr_zones)
1291 return fio_nvme_report_zones(td, f, offset, zbdz, nr_zones);
1294 static int fio_ioring_cmd_reset_wp(struct thread_data *td, struct fio_file *f,
1295 uint64_t offset, uint64_t length)
1297 return fio_nvme_reset_wp(td, f, offset, length);
1300 static int fio_ioring_cmd_get_max_open_zones(struct thread_data *td,
1302 unsigned int *max_open_zones)
1304 return fio_nvme_get_max_open_zones(td, f, max_open_zones);
1307 static int fio_ioring_cmd_fetch_ruhs(struct thread_data *td, struct fio_file *f,
1308 struct fio_ruhs_info *fruhs_info)
1310 struct nvme_fdp_ruh_status *ruhs;
1313 bytes = sizeof(*ruhs) + FDP_MAX_RUHS * sizeof(struct nvme_fdp_ruh_status_desc);
1314 ruhs = scalloc(1, bytes);
1318 ret = fio_nvme_iomgmt_ruhs(td, f, ruhs, bytes);
1322 fruhs_info->nr_ruhs = le16_to_cpu(ruhs->nruhsd);
1323 for (i = 0; i < fruhs_info->nr_ruhs; i++)
1324 fruhs_info->plis[i] = le16_to_cpu(ruhs->ruhss[i].pid);
1330 static struct ioengine_ops ioengine_uring = {
1332 .version = FIO_IOOPS_VERSION,
1333 .flags = FIO_ASYNCIO_SYNC_TRIM | FIO_NO_OFFLOAD |
1334 FIO_ASYNCIO_SETS_ISSUE_TIME,
1335 .init = fio_ioring_init,
1336 .post_init = fio_ioring_post_init,
1337 .io_u_init = fio_ioring_io_u_init,
1338 .prep = fio_ioring_prep,
1339 .queue = fio_ioring_queue,
1340 .commit = fio_ioring_commit,
1341 .getevents = fio_ioring_getevents,
1342 .event = fio_ioring_event,
1343 .cleanup = fio_ioring_cleanup,
1344 .open_file = fio_ioring_open_file,
1345 .close_file = fio_ioring_close_file,
1346 .get_file_size = generic_get_file_size,
1348 .option_struct_size = sizeof(struct ioring_options),
1351 static struct ioengine_ops ioengine_uring_cmd = {
1352 .name = "io_uring_cmd",
1353 .version = FIO_IOOPS_VERSION,
1354 .flags = FIO_NO_OFFLOAD | FIO_MEMALIGN | FIO_RAWIO |
1355 FIO_ASYNCIO_SETS_ISSUE_TIME,
1356 .init = fio_ioring_init,
1357 .post_init = fio_ioring_cmd_post_init,
1358 .io_u_init = fio_ioring_io_u_init,
1359 .prep = fio_ioring_cmd_prep,
1360 .queue = fio_ioring_queue,
1361 .commit = fio_ioring_commit,
1362 .getevents = fio_ioring_getevents,
1363 .event = fio_ioring_cmd_event,
1364 .cleanup = fio_ioring_cleanup,
1365 .open_file = fio_ioring_cmd_open_file,
1366 .close_file = fio_ioring_cmd_close_file,
1367 .get_file_size = fio_ioring_cmd_get_file_size,
1368 .get_zoned_model = fio_ioring_cmd_get_zoned_model,
1369 .report_zones = fio_ioring_cmd_report_zones,
1370 .reset_wp = fio_ioring_cmd_reset_wp,
1371 .get_max_open_zones = fio_ioring_cmd_get_max_open_zones,
1373 .option_struct_size = sizeof(struct ioring_options),
1374 .fdp_fetch_ruhs = fio_ioring_cmd_fetch_ruhs,
1377 static void fio_init fio_ioring_register(void)
1379 register_ioengine(&ioengine_uring);
1380 register_ioengine(&ioengine_uring_cmd);
1383 static void fio_exit fio_ioring_unregister(void)
1385 unregister_ioengine(&ioengine_uring);
1386 unregister_ioengine(&ioengine_uring_cmd);